xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c (revision 4d66c56f7efe122d09d06cd3ebfa52a43d51a9cb)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <net/netevent.h>
21 #include <net/neighbour.h>
22 #include <net/arp.h>
23 #include <net/ip_fib.h>
24 #include <net/ip6_fib.h>
25 #include <net/nexthop.h>
26 #include <net/fib_rules.h>
27 #include <net/ip_tunnels.h>
28 #include <net/l3mdev.h>
29 #include <net/addrconf.h>
30 #include <net/ndisc.h>
31 #include <net/ipv6.h>
32 #include <net/fib_notifier.h>
33 #include <net/switchdev.h>
34 
35 #include "spectrum.h"
36 #include "core.h"
37 #include "reg.h"
38 #include "spectrum_cnt.h"
39 #include "spectrum_dpipe.h"
40 #include "spectrum_ipip.h"
41 #include "spectrum_mr.h"
42 #include "spectrum_mr_tcam.h"
43 #include "spectrum_router.h"
44 #include "spectrum_span.h"
45 
46 struct mlxsw_sp_fib;
47 struct mlxsw_sp_vr;
48 struct mlxsw_sp_lpm_tree;
49 struct mlxsw_sp_rif_ops;
50 
51 struct mlxsw_sp_router {
52 	struct mlxsw_sp *mlxsw_sp;
53 	struct mlxsw_sp_rif **rifs;
54 	struct mlxsw_sp_vr *vrs;
55 	struct rhashtable neigh_ht;
56 	struct rhashtable nexthop_group_ht;
57 	struct rhashtable nexthop_ht;
58 	struct list_head nexthop_list;
59 	struct {
60 		/* One tree for each protocol: IPv4 and IPv6 */
61 		struct mlxsw_sp_lpm_tree *proto_trees[2];
62 		struct mlxsw_sp_lpm_tree *trees;
63 		unsigned int tree_count;
64 	} lpm;
65 	struct {
66 		struct delayed_work dw;
67 		unsigned long interval;	/* ms */
68 	} neighs_update;
69 	struct delayed_work nexthop_probe_dw;
70 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
71 	struct list_head nexthop_neighs_list;
72 	struct list_head ipip_list;
73 	bool aborted;
74 	struct notifier_block fib_nb;
75 	struct notifier_block netevent_nb;
76 	struct notifier_block inetaddr_nb;
77 	struct notifier_block inet6addr_nb;
78 	const struct mlxsw_sp_rif_ops **rif_ops_arr;
79 	const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
80 	u32 adj_discard_index;
81 };
82 
83 struct mlxsw_sp_rif {
84 	struct list_head nexthop_list;
85 	struct list_head neigh_list;
86 	struct net_device *dev; /* NULL for underlay RIF */
87 	struct mlxsw_sp_fid *fid;
88 	unsigned char addr[ETH_ALEN];
89 	int mtu;
90 	u16 rif_index;
91 	u16 vr_id;
92 	const struct mlxsw_sp_rif_ops *ops;
93 	struct mlxsw_sp *mlxsw_sp;
94 
95 	unsigned int counter_ingress;
96 	bool counter_ingress_valid;
97 	unsigned int counter_egress;
98 	bool counter_egress_valid;
99 };
100 
101 struct mlxsw_sp_rif_params {
102 	struct net_device *dev;
103 	union {
104 		u16 system_port;
105 		u16 lag_id;
106 	};
107 	u16 vid;
108 	bool lag;
109 };
110 
111 struct mlxsw_sp_rif_subport {
112 	struct mlxsw_sp_rif common;
113 	refcount_t ref_count;
114 	union {
115 		u16 system_port;
116 		u16 lag_id;
117 	};
118 	u16 vid;
119 	bool lag;
120 };
121 
122 struct mlxsw_sp_rif_ipip_lb {
123 	struct mlxsw_sp_rif common;
124 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
125 	u16 ul_vr_id; /* Reserved for Spectrum-2. */
126 	u16 ul_rif_id; /* Reserved for Spectrum. */
127 };
128 
129 struct mlxsw_sp_rif_params_ipip_lb {
130 	struct mlxsw_sp_rif_params common;
131 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
132 };
133 
134 struct mlxsw_sp_rif_ops {
135 	enum mlxsw_sp_rif_type type;
136 	size_t rif_size;
137 
138 	void (*setup)(struct mlxsw_sp_rif *rif,
139 		      const struct mlxsw_sp_rif_params *params);
140 	int (*configure)(struct mlxsw_sp_rif *rif);
141 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
142 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
143 					 struct netlink_ext_ack *extack);
144 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
145 };
146 
147 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
148 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
149 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
150 				  struct mlxsw_sp_lpm_tree *lpm_tree);
151 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
152 				     const struct mlxsw_sp_fib *fib,
153 				     u8 tree_id);
154 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
155 				       const struct mlxsw_sp_fib *fib);
156 
157 static unsigned int *
158 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
159 			   enum mlxsw_sp_rif_counter_dir dir)
160 {
161 	switch (dir) {
162 	case MLXSW_SP_RIF_COUNTER_EGRESS:
163 		return &rif->counter_egress;
164 	case MLXSW_SP_RIF_COUNTER_INGRESS:
165 		return &rif->counter_ingress;
166 	}
167 	return NULL;
168 }
169 
170 static bool
171 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
172 			       enum mlxsw_sp_rif_counter_dir dir)
173 {
174 	switch (dir) {
175 	case MLXSW_SP_RIF_COUNTER_EGRESS:
176 		return rif->counter_egress_valid;
177 	case MLXSW_SP_RIF_COUNTER_INGRESS:
178 		return rif->counter_ingress_valid;
179 	}
180 	return false;
181 }
182 
183 static void
184 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
185 			       enum mlxsw_sp_rif_counter_dir dir,
186 			       bool valid)
187 {
188 	switch (dir) {
189 	case MLXSW_SP_RIF_COUNTER_EGRESS:
190 		rif->counter_egress_valid = valid;
191 		break;
192 	case MLXSW_SP_RIF_COUNTER_INGRESS:
193 		rif->counter_ingress_valid = valid;
194 		break;
195 	}
196 }
197 
198 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
199 				     unsigned int counter_index, bool enable,
200 				     enum mlxsw_sp_rif_counter_dir dir)
201 {
202 	char ritr_pl[MLXSW_REG_RITR_LEN];
203 	bool is_egress = false;
204 	int err;
205 
206 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
207 		is_egress = true;
208 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
209 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
210 	if (err)
211 		return err;
212 
213 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
214 				    is_egress);
215 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
216 }
217 
218 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
219 				   struct mlxsw_sp_rif *rif,
220 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
221 {
222 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
223 	unsigned int *p_counter_index;
224 	bool valid;
225 	int err;
226 
227 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
228 	if (!valid)
229 		return -EINVAL;
230 
231 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
232 	if (!p_counter_index)
233 		return -EINVAL;
234 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
235 			     MLXSW_REG_RICNT_OPCODE_NOP);
236 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
237 	if (err)
238 		return err;
239 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
240 	return 0;
241 }
242 
243 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
244 				      unsigned int counter_index)
245 {
246 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
247 
248 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
249 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
250 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
251 }
252 
253 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
254 			       struct mlxsw_sp_rif *rif,
255 			       enum mlxsw_sp_rif_counter_dir dir)
256 {
257 	unsigned int *p_counter_index;
258 	int err;
259 
260 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
261 	if (!p_counter_index)
262 		return -EINVAL;
263 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
264 				     p_counter_index);
265 	if (err)
266 		return err;
267 
268 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
269 	if (err)
270 		goto err_counter_clear;
271 
272 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
273 					*p_counter_index, true, dir);
274 	if (err)
275 		goto err_counter_edit;
276 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
277 	return 0;
278 
279 err_counter_edit:
280 err_counter_clear:
281 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
282 			      *p_counter_index);
283 	return err;
284 }
285 
286 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
287 			       struct mlxsw_sp_rif *rif,
288 			       enum mlxsw_sp_rif_counter_dir dir)
289 {
290 	unsigned int *p_counter_index;
291 
292 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
293 		return;
294 
295 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
296 	if (WARN_ON(!p_counter_index))
297 		return;
298 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
299 				  *p_counter_index, false, dir);
300 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
301 			      *p_counter_index);
302 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
303 }
304 
305 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
306 {
307 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
308 	struct devlink *devlink;
309 
310 	devlink = priv_to_devlink(mlxsw_sp->core);
311 	if (!devlink_dpipe_table_counter_enabled(devlink,
312 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
313 		return;
314 	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
315 }
316 
317 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
318 {
319 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
320 
321 	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
322 }
323 
324 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
325 
326 struct mlxsw_sp_prefix_usage {
327 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
328 };
329 
330 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
331 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
332 
333 static bool
334 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
335 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
336 {
337 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
338 }
339 
340 static void
341 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
342 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
343 {
344 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
345 }
346 
347 static void
348 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
349 			  unsigned char prefix_len)
350 {
351 	set_bit(prefix_len, prefix_usage->b);
352 }
353 
354 static void
355 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
356 			    unsigned char prefix_len)
357 {
358 	clear_bit(prefix_len, prefix_usage->b);
359 }
360 
361 struct mlxsw_sp_fib_key {
362 	unsigned char addr[sizeof(struct in6_addr)];
363 	unsigned char prefix_len;
364 };
365 
366 enum mlxsw_sp_fib_entry_type {
367 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
368 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
369 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
370 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
371 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
372 
373 	/* This is a special case of local delivery, where a packet should be
374 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
375 	 * because that's a type of next hop, not of FIB entry. (There can be
376 	 * several next hops in a REMOTE entry, and some of them may be
377 	 * encapsulating entries.)
378 	 */
379 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
380 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
381 };
382 
383 struct mlxsw_sp_nexthop_group;
384 
385 struct mlxsw_sp_fib_node {
386 	struct list_head entry_list;
387 	struct list_head list;
388 	struct rhash_head ht_node;
389 	struct mlxsw_sp_fib *fib;
390 	struct mlxsw_sp_fib_key key;
391 };
392 
393 struct mlxsw_sp_fib_entry_decap {
394 	struct mlxsw_sp_ipip_entry *ipip_entry;
395 	u32 tunnel_index;
396 };
397 
398 struct mlxsw_sp_fib_entry {
399 	struct list_head list;
400 	struct mlxsw_sp_fib_node *fib_node;
401 	enum mlxsw_sp_fib_entry_type type;
402 	struct list_head nexthop_group_node;
403 	struct mlxsw_sp_nexthop_group *nh_group;
404 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
405 };
406 
407 struct mlxsw_sp_fib4_entry {
408 	struct mlxsw_sp_fib_entry common;
409 	u32 tb_id;
410 	u32 prio;
411 	u8 tos;
412 	u8 type;
413 };
414 
415 struct mlxsw_sp_fib6_entry {
416 	struct mlxsw_sp_fib_entry common;
417 	struct list_head rt6_list;
418 	unsigned int nrt6;
419 };
420 
421 struct mlxsw_sp_rt6 {
422 	struct list_head list;
423 	struct fib6_info *rt;
424 };
425 
426 struct mlxsw_sp_lpm_tree {
427 	u8 id; /* tree ID */
428 	unsigned int ref_count;
429 	enum mlxsw_sp_l3proto proto;
430 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
431 	struct mlxsw_sp_prefix_usage prefix_usage;
432 };
433 
434 struct mlxsw_sp_fib {
435 	struct rhashtable ht;
436 	struct list_head node_list;
437 	struct mlxsw_sp_vr *vr;
438 	struct mlxsw_sp_lpm_tree *lpm_tree;
439 	enum mlxsw_sp_l3proto proto;
440 };
441 
442 struct mlxsw_sp_vr {
443 	u16 id; /* virtual router ID */
444 	u32 tb_id; /* kernel fib table id */
445 	unsigned int rif_count;
446 	struct mlxsw_sp_fib *fib4;
447 	struct mlxsw_sp_fib *fib6;
448 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
449 	struct mlxsw_sp_rif *ul_rif;
450 	refcount_t ul_rif_refcnt;
451 };
452 
453 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
454 
455 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
456 						struct mlxsw_sp_vr *vr,
457 						enum mlxsw_sp_l3proto proto)
458 {
459 	struct mlxsw_sp_lpm_tree *lpm_tree;
460 	struct mlxsw_sp_fib *fib;
461 	int err;
462 
463 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
464 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
465 	if (!fib)
466 		return ERR_PTR(-ENOMEM);
467 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
468 	if (err)
469 		goto err_rhashtable_init;
470 	INIT_LIST_HEAD(&fib->node_list);
471 	fib->proto = proto;
472 	fib->vr = vr;
473 	fib->lpm_tree = lpm_tree;
474 	mlxsw_sp_lpm_tree_hold(lpm_tree);
475 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
476 	if (err)
477 		goto err_lpm_tree_bind;
478 	return fib;
479 
480 err_lpm_tree_bind:
481 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
482 err_rhashtable_init:
483 	kfree(fib);
484 	return ERR_PTR(err);
485 }
486 
487 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
488 				 struct mlxsw_sp_fib *fib)
489 {
490 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
491 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
492 	WARN_ON(!list_empty(&fib->node_list));
493 	rhashtable_destroy(&fib->ht);
494 	kfree(fib);
495 }
496 
497 static struct mlxsw_sp_lpm_tree *
498 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
499 {
500 	static struct mlxsw_sp_lpm_tree *lpm_tree;
501 	int i;
502 
503 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
504 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
505 		if (lpm_tree->ref_count == 0)
506 			return lpm_tree;
507 	}
508 	return NULL;
509 }
510 
511 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
512 				   struct mlxsw_sp_lpm_tree *lpm_tree)
513 {
514 	char ralta_pl[MLXSW_REG_RALTA_LEN];
515 
516 	mlxsw_reg_ralta_pack(ralta_pl, true,
517 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
518 			     lpm_tree->id);
519 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
520 }
521 
522 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
523 				   struct mlxsw_sp_lpm_tree *lpm_tree)
524 {
525 	char ralta_pl[MLXSW_REG_RALTA_LEN];
526 
527 	mlxsw_reg_ralta_pack(ralta_pl, false,
528 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
529 			     lpm_tree->id);
530 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
531 }
532 
533 static int
534 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
535 				  struct mlxsw_sp_prefix_usage *prefix_usage,
536 				  struct mlxsw_sp_lpm_tree *lpm_tree)
537 {
538 	char ralst_pl[MLXSW_REG_RALST_LEN];
539 	u8 root_bin = 0;
540 	u8 prefix;
541 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
542 
543 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
544 		root_bin = prefix;
545 
546 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
547 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
548 		if (prefix == 0)
549 			continue;
550 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
551 					 MLXSW_REG_RALST_BIN_NO_CHILD);
552 		last_prefix = prefix;
553 	}
554 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
555 }
556 
557 static struct mlxsw_sp_lpm_tree *
558 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
559 			 struct mlxsw_sp_prefix_usage *prefix_usage,
560 			 enum mlxsw_sp_l3proto proto)
561 {
562 	struct mlxsw_sp_lpm_tree *lpm_tree;
563 	int err;
564 
565 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
566 	if (!lpm_tree)
567 		return ERR_PTR(-EBUSY);
568 	lpm_tree->proto = proto;
569 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
570 	if (err)
571 		return ERR_PTR(err);
572 
573 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
574 						lpm_tree);
575 	if (err)
576 		goto err_left_struct_set;
577 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
578 	       sizeof(lpm_tree->prefix_usage));
579 	memset(&lpm_tree->prefix_ref_count, 0,
580 	       sizeof(lpm_tree->prefix_ref_count));
581 	lpm_tree->ref_count = 1;
582 	return lpm_tree;
583 
584 err_left_struct_set:
585 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
586 	return ERR_PTR(err);
587 }
588 
589 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
590 				      struct mlxsw_sp_lpm_tree *lpm_tree)
591 {
592 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
593 }
594 
595 static struct mlxsw_sp_lpm_tree *
596 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
597 		      struct mlxsw_sp_prefix_usage *prefix_usage,
598 		      enum mlxsw_sp_l3proto proto)
599 {
600 	struct mlxsw_sp_lpm_tree *lpm_tree;
601 	int i;
602 
603 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
604 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
605 		if (lpm_tree->ref_count != 0 &&
606 		    lpm_tree->proto == proto &&
607 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
608 					     prefix_usage)) {
609 			mlxsw_sp_lpm_tree_hold(lpm_tree);
610 			return lpm_tree;
611 		}
612 	}
613 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614 }
615 
616 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617 {
618 	lpm_tree->ref_count++;
619 }
620 
621 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 				  struct mlxsw_sp_lpm_tree *lpm_tree)
623 {
624 	if (--lpm_tree->ref_count == 0)
625 		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
626 }
627 
628 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
629 
630 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
631 {
632 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
633 	struct mlxsw_sp_lpm_tree *lpm_tree;
634 	u64 max_trees;
635 	int err, i;
636 
637 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
638 		return -EIO;
639 
640 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
641 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
642 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
643 					     sizeof(struct mlxsw_sp_lpm_tree),
644 					     GFP_KERNEL);
645 	if (!mlxsw_sp->router->lpm.trees)
646 		return -ENOMEM;
647 
648 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
649 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
650 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
651 	}
652 
653 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
654 					 MLXSW_SP_L3_PROTO_IPV4);
655 	if (IS_ERR(lpm_tree)) {
656 		err = PTR_ERR(lpm_tree);
657 		goto err_ipv4_tree_get;
658 	}
659 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
660 
661 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
662 					 MLXSW_SP_L3_PROTO_IPV6);
663 	if (IS_ERR(lpm_tree)) {
664 		err = PTR_ERR(lpm_tree);
665 		goto err_ipv6_tree_get;
666 	}
667 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
668 
669 	return 0;
670 
671 err_ipv6_tree_get:
672 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
673 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
674 err_ipv4_tree_get:
675 	kfree(mlxsw_sp->router->lpm.trees);
676 	return err;
677 }
678 
679 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
680 {
681 	struct mlxsw_sp_lpm_tree *lpm_tree;
682 
683 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
684 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
685 
686 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
687 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
688 
689 	kfree(mlxsw_sp->router->lpm.trees);
690 }
691 
692 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
693 {
694 	return !!vr->fib4 || !!vr->fib6 ||
695 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
696 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
697 }
698 
699 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
700 {
701 	struct mlxsw_sp_vr *vr;
702 	int i;
703 
704 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
705 		vr = &mlxsw_sp->router->vrs[i];
706 		if (!mlxsw_sp_vr_is_used(vr))
707 			return vr;
708 	}
709 	return NULL;
710 }
711 
712 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
713 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
714 {
715 	char raltb_pl[MLXSW_REG_RALTB_LEN];
716 
717 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
718 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
719 			     tree_id);
720 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
721 }
722 
723 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
724 				       const struct mlxsw_sp_fib *fib)
725 {
726 	char raltb_pl[MLXSW_REG_RALTB_LEN];
727 
728 	/* Bind to tree 0 which is default */
729 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
730 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
731 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
732 }
733 
734 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
735 {
736 	/* For our purpose, squash main, default and local tables into one */
737 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
738 		tb_id = RT_TABLE_MAIN;
739 	return tb_id;
740 }
741 
742 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
743 					    u32 tb_id)
744 {
745 	struct mlxsw_sp_vr *vr;
746 	int i;
747 
748 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
749 
750 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
751 		vr = &mlxsw_sp->router->vrs[i];
752 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
753 			return vr;
754 	}
755 	return NULL;
756 }
757 
758 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
759 				u16 *vr_id)
760 {
761 	struct mlxsw_sp_vr *vr;
762 
763 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
764 	if (!vr)
765 		return -ESRCH;
766 	*vr_id = vr->id;
767 
768 	return 0;
769 }
770 
771 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
772 					    enum mlxsw_sp_l3proto proto)
773 {
774 	switch (proto) {
775 	case MLXSW_SP_L3_PROTO_IPV4:
776 		return vr->fib4;
777 	case MLXSW_SP_L3_PROTO_IPV6:
778 		return vr->fib6;
779 	}
780 	return NULL;
781 }
782 
783 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
784 					      u32 tb_id,
785 					      struct netlink_ext_ack *extack)
786 {
787 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
788 	struct mlxsw_sp_fib *fib4;
789 	struct mlxsw_sp_fib *fib6;
790 	struct mlxsw_sp_vr *vr;
791 	int err;
792 
793 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
794 	if (!vr) {
795 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
796 		return ERR_PTR(-EBUSY);
797 	}
798 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
799 	if (IS_ERR(fib4))
800 		return ERR_CAST(fib4);
801 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
802 	if (IS_ERR(fib6)) {
803 		err = PTR_ERR(fib6);
804 		goto err_fib6_create;
805 	}
806 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
807 					     MLXSW_SP_L3_PROTO_IPV4);
808 	if (IS_ERR(mr4_table)) {
809 		err = PTR_ERR(mr4_table);
810 		goto err_mr4_table_create;
811 	}
812 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
813 					     MLXSW_SP_L3_PROTO_IPV6);
814 	if (IS_ERR(mr6_table)) {
815 		err = PTR_ERR(mr6_table);
816 		goto err_mr6_table_create;
817 	}
818 
819 	vr->fib4 = fib4;
820 	vr->fib6 = fib6;
821 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
822 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
823 	vr->tb_id = tb_id;
824 	return vr;
825 
826 err_mr6_table_create:
827 	mlxsw_sp_mr_table_destroy(mr4_table);
828 err_mr4_table_create:
829 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
830 err_fib6_create:
831 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
832 	return ERR_PTR(err);
833 }
834 
835 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
836 				struct mlxsw_sp_vr *vr)
837 {
838 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
839 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
840 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
841 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
842 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
843 	vr->fib6 = NULL;
844 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
845 	vr->fib4 = NULL;
846 }
847 
848 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
849 					   struct netlink_ext_ack *extack)
850 {
851 	struct mlxsw_sp_vr *vr;
852 
853 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
854 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
855 	if (!vr)
856 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
857 	return vr;
858 }
859 
860 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
861 {
862 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
863 	    list_empty(&vr->fib6->node_list) &&
864 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
865 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
866 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
867 }
868 
869 static bool
870 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
871 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
872 {
873 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
874 
875 	if (!mlxsw_sp_vr_is_used(vr))
876 		return false;
877 	if (fib->lpm_tree->id == tree_id)
878 		return true;
879 	return false;
880 }
881 
882 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
883 					struct mlxsw_sp_fib *fib,
884 					struct mlxsw_sp_lpm_tree *new_tree)
885 {
886 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
887 	int err;
888 
889 	fib->lpm_tree = new_tree;
890 	mlxsw_sp_lpm_tree_hold(new_tree);
891 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
892 	if (err)
893 		goto err_tree_bind;
894 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
895 	return 0;
896 
897 err_tree_bind:
898 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
899 	fib->lpm_tree = old_tree;
900 	return err;
901 }
902 
903 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
904 					 struct mlxsw_sp_fib *fib,
905 					 struct mlxsw_sp_lpm_tree *new_tree)
906 {
907 	enum mlxsw_sp_l3proto proto = fib->proto;
908 	struct mlxsw_sp_lpm_tree *old_tree;
909 	u8 old_id, new_id = new_tree->id;
910 	struct mlxsw_sp_vr *vr;
911 	int i, err;
912 
913 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
914 	old_id = old_tree->id;
915 
916 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
917 		vr = &mlxsw_sp->router->vrs[i];
918 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
919 			continue;
920 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
921 						   mlxsw_sp_vr_fib(vr, proto),
922 						   new_tree);
923 		if (err)
924 			goto err_tree_replace;
925 	}
926 
927 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
928 	       sizeof(new_tree->prefix_ref_count));
929 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
930 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
931 
932 	return 0;
933 
934 err_tree_replace:
935 	for (i--; i >= 0; i--) {
936 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
937 			continue;
938 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
939 					     mlxsw_sp_vr_fib(vr, proto),
940 					     old_tree);
941 	}
942 	return err;
943 }
944 
945 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
946 {
947 	struct mlxsw_sp_vr *vr;
948 	u64 max_vrs;
949 	int i;
950 
951 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
952 		return -EIO;
953 
954 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
955 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
956 					GFP_KERNEL);
957 	if (!mlxsw_sp->router->vrs)
958 		return -ENOMEM;
959 
960 	for (i = 0; i < max_vrs; i++) {
961 		vr = &mlxsw_sp->router->vrs[i];
962 		vr->id = i;
963 	}
964 
965 	return 0;
966 }
967 
968 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
969 
970 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
971 {
972 	/* At this stage we're guaranteed not to have new incoming
973 	 * FIB notifications and the work queue is free from FIBs
974 	 * sitting on top of mlxsw netdevs. However, we can still
975 	 * have other FIBs queued. Flush the queue before flushing
976 	 * the device's tables. No need for locks, as we're the only
977 	 * writer.
978 	 */
979 	mlxsw_core_flush_owq();
980 	mlxsw_sp_router_fib_flush(mlxsw_sp);
981 	kfree(mlxsw_sp->router->vrs);
982 }
983 
984 static struct net_device *
985 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
986 {
987 	struct ip_tunnel *tun = netdev_priv(ol_dev);
988 	struct net *net = dev_net(ol_dev);
989 
990 	return __dev_get_by_index(net, tun->parms.link);
991 }
992 
993 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
994 {
995 	struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
996 
997 	if (d)
998 		return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
999 	else
1000 		return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
1001 }
1002 
1003 static struct mlxsw_sp_rif *
1004 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1005 		    const struct mlxsw_sp_rif_params *params,
1006 		    struct netlink_ext_ack *extack);
1007 
1008 static struct mlxsw_sp_rif_ipip_lb *
1009 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1010 				enum mlxsw_sp_ipip_type ipipt,
1011 				struct net_device *ol_dev,
1012 				struct netlink_ext_ack *extack)
1013 {
1014 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1015 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1016 	struct mlxsw_sp_rif *rif;
1017 
1018 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1019 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1020 		.common.dev = ol_dev,
1021 		.common.lag = false,
1022 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1023 	};
1024 
1025 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1026 	if (IS_ERR(rif))
1027 		return ERR_CAST(rif);
1028 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1029 }
1030 
1031 static struct mlxsw_sp_ipip_entry *
1032 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1033 			  enum mlxsw_sp_ipip_type ipipt,
1034 			  struct net_device *ol_dev)
1035 {
1036 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1037 	struct mlxsw_sp_ipip_entry *ipip_entry;
1038 	struct mlxsw_sp_ipip_entry *ret = NULL;
1039 
1040 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1041 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1042 	if (!ipip_entry)
1043 		return ERR_PTR(-ENOMEM);
1044 
1045 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1046 							    ol_dev, NULL);
1047 	if (IS_ERR(ipip_entry->ol_lb)) {
1048 		ret = ERR_CAST(ipip_entry->ol_lb);
1049 		goto err_ol_ipip_lb_create;
1050 	}
1051 
1052 	ipip_entry->ipipt = ipipt;
1053 	ipip_entry->ol_dev = ol_dev;
1054 
1055 	switch (ipip_ops->ul_proto) {
1056 	case MLXSW_SP_L3_PROTO_IPV4:
1057 		ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1058 		break;
1059 	case MLXSW_SP_L3_PROTO_IPV6:
1060 		WARN_ON(1);
1061 		break;
1062 	}
1063 
1064 	return ipip_entry;
1065 
1066 err_ol_ipip_lb_create:
1067 	kfree(ipip_entry);
1068 	return ret;
1069 }
1070 
1071 static void
1072 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1073 {
1074 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1075 	kfree(ipip_entry);
1076 }
1077 
1078 static bool
1079 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1080 				  const enum mlxsw_sp_l3proto ul_proto,
1081 				  union mlxsw_sp_l3addr saddr,
1082 				  u32 ul_tb_id,
1083 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1084 {
1085 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1086 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1087 	union mlxsw_sp_l3addr tun_saddr;
1088 
1089 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1090 		return false;
1091 
1092 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1093 	return tun_ul_tb_id == ul_tb_id &&
1094 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1095 }
1096 
1097 static int
1098 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1099 			      struct mlxsw_sp_fib_entry *fib_entry,
1100 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1101 {
1102 	u32 tunnel_index;
1103 	int err;
1104 
1105 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1106 				  1, &tunnel_index);
1107 	if (err)
1108 		return err;
1109 
1110 	ipip_entry->decap_fib_entry = fib_entry;
1111 	fib_entry->decap.ipip_entry = ipip_entry;
1112 	fib_entry->decap.tunnel_index = tunnel_index;
1113 	return 0;
1114 }
1115 
1116 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1117 					  struct mlxsw_sp_fib_entry *fib_entry)
1118 {
1119 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1120 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1121 	fib_entry->decap.ipip_entry = NULL;
1122 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1123 			   1, fib_entry->decap.tunnel_index);
1124 }
1125 
1126 static struct mlxsw_sp_fib_node *
1127 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1128 			 size_t addr_len, unsigned char prefix_len);
1129 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1130 				     struct mlxsw_sp_fib_entry *fib_entry);
1131 
1132 static void
1133 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1134 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1135 {
1136 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1137 
1138 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1139 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1140 
1141 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1142 }
1143 
1144 static void
1145 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1146 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1147 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1148 {
1149 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1150 					  ipip_entry))
1151 		return;
1152 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1153 
1154 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1155 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1156 }
1157 
1158 static struct mlxsw_sp_fib_entry *
1159 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1160 				     enum mlxsw_sp_l3proto proto,
1161 				     const union mlxsw_sp_l3addr *addr,
1162 				     enum mlxsw_sp_fib_entry_type type)
1163 {
1164 	struct mlxsw_sp_fib_entry *fib_entry;
1165 	struct mlxsw_sp_fib_node *fib_node;
1166 	unsigned char addr_prefix_len;
1167 	struct mlxsw_sp_fib *fib;
1168 	struct mlxsw_sp_vr *vr;
1169 	const void *addrp;
1170 	size_t addr_len;
1171 	u32 addr4;
1172 
1173 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1174 	if (!vr)
1175 		return NULL;
1176 	fib = mlxsw_sp_vr_fib(vr, proto);
1177 
1178 	switch (proto) {
1179 	case MLXSW_SP_L3_PROTO_IPV4:
1180 		addr4 = be32_to_cpu(addr->addr4);
1181 		addrp = &addr4;
1182 		addr_len = 4;
1183 		addr_prefix_len = 32;
1184 		break;
1185 	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1186 	default:
1187 		WARN_ON(1);
1188 		return NULL;
1189 	}
1190 
1191 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1192 					    addr_prefix_len);
1193 	if (!fib_node || list_empty(&fib_node->entry_list))
1194 		return NULL;
1195 
1196 	fib_entry = list_first_entry(&fib_node->entry_list,
1197 				     struct mlxsw_sp_fib_entry, list);
1198 	if (fib_entry->type != type)
1199 		return NULL;
1200 
1201 	return fib_entry;
1202 }
1203 
1204 /* Given an IPIP entry, find the corresponding decap route. */
1205 static struct mlxsw_sp_fib_entry *
1206 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1207 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1208 {
1209 	static struct mlxsw_sp_fib_node *fib_node;
1210 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1211 	struct mlxsw_sp_fib_entry *fib_entry;
1212 	unsigned char saddr_prefix_len;
1213 	union mlxsw_sp_l3addr saddr;
1214 	struct mlxsw_sp_fib *ul_fib;
1215 	struct mlxsw_sp_vr *ul_vr;
1216 	const void *saddrp;
1217 	size_t saddr_len;
1218 	u32 ul_tb_id;
1219 	u32 saddr4;
1220 
1221 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1222 
1223 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1224 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1225 	if (!ul_vr)
1226 		return NULL;
1227 
1228 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1229 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1230 					   ipip_entry->ol_dev);
1231 
1232 	switch (ipip_ops->ul_proto) {
1233 	case MLXSW_SP_L3_PROTO_IPV4:
1234 		saddr4 = be32_to_cpu(saddr.addr4);
1235 		saddrp = &saddr4;
1236 		saddr_len = 4;
1237 		saddr_prefix_len = 32;
1238 		break;
1239 	case MLXSW_SP_L3_PROTO_IPV6:
1240 		WARN_ON(1);
1241 		return NULL;
1242 	}
1243 
1244 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1245 					    saddr_prefix_len);
1246 	if (!fib_node || list_empty(&fib_node->entry_list))
1247 		return NULL;
1248 
1249 	fib_entry = list_first_entry(&fib_node->entry_list,
1250 				     struct mlxsw_sp_fib_entry, list);
1251 	if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1252 		return NULL;
1253 
1254 	return fib_entry;
1255 }
1256 
1257 static struct mlxsw_sp_ipip_entry *
1258 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1259 			   enum mlxsw_sp_ipip_type ipipt,
1260 			   struct net_device *ol_dev)
1261 {
1262 	struct mlxsw_sp_ipip_entry *ipip_entry;
1263 
1264 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1265 	if (IS_ERR(ipip_entry))
1266 		return ipip_entry;
1267 
1268 	list_add_tail(&ipip_entry->ipip_list_node,
1269 		      &mlxsw_sp->router->ipip_list);
1270 
1271 	return ipip_entry;
1272 }
1273 
1274 static void
1275 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1276 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1277 {
1278 	list_del(&ipip_entry->ipip_list_node);
1279 	mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1280 }
1281 
1282 static bool
1283 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1284 				  const struct net_device *ul_dev,
1285 				  enum mlxsw_sp_l3proto ul_proto,
1286 				  union mlxsw_sp_l3addr ul_dip,
1287 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1288 {
1289 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1290 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1291 
1292 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1293 		return false;
1294 
1295 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1296 						 ul_tb_id, ipip_entry);
1297 }
1298 
1299 /* Given decap parameters, find the corresponding IPIP entry. */
1300 static struct mlxsw_sp_ipip_entry *
1301 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1302 				  const struct net_device *ul_dev,
1303 				  enum mlxsw_sp_l3proto ul_proto,
1304 				  union mlxsw_sp_l3addr ul_dip)
1305 {
1306 	struct mlxsw_sp_ipip_entry *ipip_entry;
1307 
1308 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1309 			    ipip_list_node)
1310 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1311 						      ul_proto, ul_dip,
1312 						      ipip_entry))
1313 			return ipip_entry;
1314 
1315 	return NULL;
1316 }
1317 
1318 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1319 				      const struct net_device *dev,
1320 				      enum mlxsw_sp_ipip_type *p_type)
1321 {
1322 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1323 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1324 	enum mlxsw_sp_ipip_type ipipt;
1325 
1326 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1327 		ipip_ops = router->ipip_ops_arr[ipipt];
1328 		if (dev->type == ipip_ops->dev_type) {
1329 			if (p_type)
1330 				*p_type = ipipt;
1331 			return true;
1332 		}
1333 	}
1334 	return false;
1335 }
1336 
1337 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1338 				const struct net_device *dev)
1339 {
1340 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1341 }
1342 
1343 static struct mlxsw_sp_ipip_entry *
1344 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1345 				   const struct net_device *ol_dev)
1346 {
1347 	struct mlxsw_sp_ipip_entry *ipip_entry;
1348 
1349 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1350 			    ipip_list_node)
1351 		if (ipip_entry->ol_dev == ol_dev)
1352 			return ipip_entry;
1353 
1354 	return NULL;
1355 }
1356 
1357 static struct mlxsw_sp_ipip_entry *
1358 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1359 				   const struct net_device *ul_dev,
1360 				   struct mlxsw_sp_ipip_entry *start)
1361 {
1362 	struct mlxsw_sp_ipip_entry *ipip_entry;
1363 
1364 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1365 					ipip_list_node);
1366 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1367 				     ipip_list_node) {
1368 		struct net_device *ipip_ul_dev =
1369 			__mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1370 
1371 		if (ipip_ul_dev == ul_dev)
1372 			return ipip_entry;
1373 	}
1374 
1375 	return NULL;
1376 }
1377 
1378 bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1379 				const struct net_device *dev)
1380 {
1381 	return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1382 }
1383 
1384 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1385 						const struct net_device *ol_dev,
1386 						enum mlxsw_sp_ipip_type ipipt)
1387 {
1388 	const struct mlxsw_sp_ipip_ops *ops
1389 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1390 
1391 	/* For deciding whether decap should be offloaded, we don't care about
1392 	 * overlay protocol, so ask whether either one is supported.
1393 	 */
1394 	return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1395 	       ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1396 }
1397 
1398 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1399 						struct net_device *ol_dev)
1400 {
1401 	struct mlxsw_sp_ipip_entry *ipip_entry;
1402 	enum mlxsw_sp_l3proto ul_proto;
1403 	enum mlxsw_sp_ipip_type ipipt;
1404 	union mlxsw_sp_l3addr saddr;
1405 	u32 ul_tb_id;
1406 
1407 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1408 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1409 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1410 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1411 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1412 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1413 							  saddr, ul_tb_id,
1414 							  NULL)) {
1415 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1416 								ol_dev);
1417 			if (IS_ERR(ipip_entry))
1418 				return PTR_ERR(ipip_entry);
1419 		}
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1426 						   struct net_device *ol_dev)
1427 {
1428 	struct mlxsw_sp_ipip_entry *ipip_entry;
1429 
1430 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1431 	if (ipip_entry)
1432 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1433 }
1434 
1435 static void
1436 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1437 				struct mlxsw_sp_ipip_entry *ipip_entry)
1438 {
1439 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1440 
1441 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1442 	if (decap_fib_entry)
1443 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1444 						  decap_fib_entry);
1445 }
1446 
1447 static int
1448 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1449 			u16 ul_rif_id, bool enable)
1450 {
1451 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1452 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1453 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1454 	char ritr_pl[MLXSW_REG_RITR_LEN];
1455 	u32 saddr4;
1456 
1457 	switch (lb_cf.ul_protocol) {
1458 	case MLXSW_SP_L3_PROTO_IPV4:
1459 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1460 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1461 				    rif->rif_index, rif->vr_id, rif->dev->mtu);
1462 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1463 			    MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1464 			    ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1465 		break;
1466 
1467 	case MLXSW_SP_L3_PROTO_IPV6:
1468 		return -EAFNOSUPPORT;
1469 	}
1470 
1471 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1472 }
1473 
1474 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1475 						 struct net_device *ol_dev)
1476 {
1477 	struct mlxsw_sp_ipip_entry *ipip_entry;
1478 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1479 	int err = 0;
1480 
1481 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1482 	if (ipip_entry) {
1483 		lb_rif = ipip_entry->ol_lb;
1484 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1485 					      lb_rif->ul_rif_id, true);
1486 		if (err)
1487 			goto out;
1488 		lb_rif->common.mtu = ol_dev->mtu;
1489 	}
1490 
1491 out:
1492 	return err;
1493 }
1494 
1495 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1496 						struct net_device *ol_dev)
1497 {
1498 	struct mlxsw_sp_ipip_entry *ipip_entry;
1499 
1500 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1501 	if (ipip_entry)
1502 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1503 }
1504 
1505 static void
1506 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1507 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1508 {
1509 	if (ipip_entry->decap_fib_entry)
1510 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1511 }
1512 
1513 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1514 						  struct net_device *ol_dev)
1515 {
1516 	struct mlxsw_sp_ipip_entry *ipip_entry;
1517 
1518 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1519 	if (ipip_entry)
1520 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1521 }
1522 
1523 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1524 					 struct mlxsw_sp_rif *old_rif,
1525 					 struct mlxsw_sp_rif *new_rif);
1526 static int
1527 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1528 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1529 				 bool keep_encap,
1530 				 struct netlink_ext_ack *extack)
1531 {
1532 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1533 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1534 
1535 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1536 						     ipip_entry->ipipt,
1537 						     ipip_entry->ol_dev,
1538 						     extack);
1539 	if (IS_ERR(new_lb_rif))
1540 		return PTR_ERR(new_lb_rif);
1541 	ipip_entry->ol_lb = new_lb_rif;
1542 
1543 	if (keep_encap)
1544 		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1545 					     &new_lb_rif->common);
1546 
1547 	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1548 
1549 	return 0;
1550 }
1551 
1552 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1553 					struct mlxsw_sp_rif *rif);
1554 
1555 /**
1556  * Update the offload related to an IPIP entry. This always updates decap, and
1557  * in addition to that it also:
1558  * @recreate_loopback: recreates the associated loopback RIF
1559  * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1560  *              relevant when recreate_loopback is true.
1561  * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1562  *                   is only relevant when recreate_loopback is false.
1563  */
1564 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1565 					struct mlxsw_sp_ipip_entry *ipip_entry,
1566 					bool recreate_loopback,
1567 					bool keep_encap,
1568 					bool update_nexthops,
1569 					struct netlink_ext_ack *extack)
1570 {
1571 	int err;
1572 
1573 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1574 	 * recreate it. That creates a window of opportunity where RALUE and
1575 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1576 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1577 	 * of RALUE, demote the decap route back.
1578 	 */
1579 	if (ipip_entry->decap_fib_entry)
1580 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1581 
1582 	if (recreate_loopback) {
1583 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1584 						       keep_encap, extack);
1585 		if (err)
1586 			return err;
1587 	} else if (update_nexthops) {
1588 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1589 					    &ipip_entry->ol_lb->common);
1590 	}
1591 
1592 	if (ipip_entry->ol_dev->flags & IFF_UP)
1593 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1594 
1595 	return 0;
1596 }
1597 
1598 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1599 						struct net_device *ol_dev,
1600 						struct netlink_ext_ack *extack)
1601 {
1602 	struct mlxsw_sp_ipip_entry *ipip_entry =
1603 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1604 	enum mlxsw_sp_l3proto ul_proto;
1605 	union mlxsw_sp_l3addr saddr;
1606 	u32 ul_tb_id;
1607 
1608 	if (!ipip_entry)
1609 		return 0;
1610 
1611 	/* For flat configuration cases, moving overlay to a different VRF might
1612 	 * cause local address conflict, and the conflicting tunnels need to be
1613 	 * demoted.
1614 	 */
1615 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1616 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1617 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1618 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1619 						 saddr, ul_tb_id,
1620 						 ipip_entry)) {
1621 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1622 		return 0;
1623 	}
1624 
1625 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1626 						   true, false, false, extack);
1627 }
1628 
1629 static int
1630 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1631 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1632 				     struct net_device *ul_dev,
1633 				     struct netlink_ext_ack *extack)
1634 {
1635 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1636 						   true, true, false, extack);
1637 }
1638 
1639 static int
1640 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1641 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1642 				    struct net_device *ul_dev)
1643 {
1644 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1645 						   false, false, true, NULL);
1646 }
1647 
1648 static int
1649 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1650 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1651 				      struct net_device *ul_dev)
1652 {
1653 	/* A down underlay device causes encapsulated packets to not be
1654 	 * forwarded, but decap still works. So refresh next hops without
1655 	 * touching anything else.
1656 	 */
1657 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1658 						   false, false, true, NULL);
1659 }
1660 
1661 static int
1662 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1663 					struct net_device *ol_dev,
1664 					struct netlink_ext_ack *extack)
1665 {
1666 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1667 	struct mlxsw_sp_ipip_entry *ipip_entry;
1668 	int err;
1669 
1670 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1671 	if (!ipip_entry)
1672 		/* A change might make a tunnel eligible for offloading, but
1673 		 * that is currently not implemented. What falls to slow path
1674 		 * stays there.
1675 		 */
1676 		return 0;
1677 
1678 	/* A change might make a tunnel not eligible for offloading. */
1679 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1680 						 ipip_entry->ipipt)) {
1681 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1682 		return 0;
1683 	}
1684 
1685 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1686 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1687 	return err;
1688 }
1689 
1690 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1691 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1692 {
1693 	struct net_device *ol_dev = ipip_entry->ol_dev;
1694 
1695 	if (ol_dev->flags & IFF_UP)
1696 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1697 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1698 }
1699 
1700 /* The configuration where several tunnels have the same local address in the
1701  * same underlay table needs special treatment in the HW. That is currently not
1702  * implemented in the driver. This function finds and demotes the first tunnel
1703  * with a given source address, except the one passed in in the argument
1704  * `except'.
1705  */
1706 bool
1707 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1708 				     enum mlxsw_sp_l3proto ul_proto,
1709 				     union mlxsw_sp_l3addr saddr,
1710 				     u32 ul_tb_id,
1711 				     const struct mlxsw_sp_ipip_entry *except)
1712 {
1713 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1714 
1715 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1716 				 ipip_list_node) {
1717 		if (ipip_entry != except &&
1718 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1719 						      ul_tb_id, ipip_entry)) {
1720 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1721 			return true;
1722 		}
1723 	}
1724 
1725 	return false;
1726 }
1727 
1728 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1729 						     struct net_device *ul_dev)
1730 {
1731 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1732 
1733 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1734 				 ipip_list_node) {
1735 		struct net_device *ipip_ul_dev =
1736 			__mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1737 
1738 		if (ipip_ul_dev == ul_dev)
1739 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1740 	}
1741 }
1742 
1743 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1744 				     struct net_device *ol_dev,
1745 				     unsigned long event,
1746 				     struct netdev_notifier_info *info)
1747 {
1748 	struct netdev_notifier_changeupper_info *chup;
1749 	struct netlink_ext_ack *extack;
1750 
1751 	switch (event) {
1752 	case NETDEV_REGISTER:
1753 		return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1754 	case NETDEV_UNREGISTER:
1755 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1756 		return 0;
1757 	case NETDEV_UP:
1758 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1759 		return 0;
1760 	case NETDEV_DOWN:
1761 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1762 		return 0;
1763 	case NETDEV_CHANGEUPPER:
1764 		chup = container_of(info, typeof(*chup), info);
1765 		extack = info->extack;
1766 		if (netif_is_l3_master(chup->upper_dev))
1767 			return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1768 								    ol_dev,
1769 								    extack);
1770 		return 0;
1771 	case NETDEV_CHANGE:
1772 		extack = info->extack;
1773 		return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1774 							       ol_dev, extack);
1775 	case NETDEV_CHANGEMTU:
1776 		return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1777 	}
1778 	return 0;
1779 }
1780 
1781 static int
1782 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1783 				   struct mlxsw_sp_ipip_entry *ipip_entry,
1784 				   struct net_device *ul_dev,
1785 				   unsigned long event,
1786 				   struct netdev_notifier_info *info)
1787 {
1788 	struct netdev_notifier_changeupper_info *chup;
1789 	struct netlink_ext_ack *extack;
1790 
1791 	switch (event) {
1792 	case NETDEV_CHANGEUPPER:
1793 		chup = container_of(info, typeof(*chup), info);
1794 		extack = info->extack;
1795 		if (netif_is_l3_master(chup->upper_dev))
1796 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1797 								    ipip_entry,
1798 								    ul_dev,
1799 								    extack);
1800 		break;
1801 
1802 	case NETDEV_UP:
1803 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1804 							   ul_dev);
1805 	case NETDEV_DOWN:
1806 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1807 							     ipip_entry,
1808 							     ul_dev);
1809 	}
1810 	return 0;
1811 }
1812 
1813 int
1814 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1815 				 struct net_device *ul_dev,
1816 				 unsigned long event,
1817 				 struct netdev_notifier_info *info)
1818 {
1819 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1820 	int err;
1821 
1822 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1823 								ul_dev,
1824 								ipip_entry))) {
1825 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1826 							 ul_dev, event, info);
1827 		if (err) {
1828 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1829 								 ul_dev);
1830 			return err;
1831 		}
1832 	}
1833 
1834 	return 0;
1835 }
1836 
1837 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1838 				      enum mlxsw_sp_l3proto ul_proto,
1839 				      const union mlxsw_sp_l3addr *ul_sip,
1840 				      u32 tunnel_index)
1841 {
1842 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1843 	struct mlxsw_sp_fib_entry *fib_entry;
1844 	int err;
1845 
1846 	/* It is valid to create a tunnel with a local IP and only later
1847 	 * assign this IP address to a local interface
1848 	 */
1849 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1850 							 ul_proto, ul_sip,
1851 							 type);
1852 	if (!fib_entry)
1853 		return 0;
1854 
1855 	fib_entry->decap.tunnel_index = tunnel_index;
1856 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1857 
1858 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1859 	if (err)
1860 		goto err_fib_entry_update;
1861 
1862 	return 0;
1863 
1864 err_fib_entry_update:
1865 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1866 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1867 	return err;
1868 }
1869 
1870 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1871 				      enum mlxsw_sp_l3proto ul_proto,
1872 				      const union mlxsw_sp_l3addr *ul_sip)
1873 {
1874 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1875 	struct mlxsw_sp_fib_entry *fib_entry;
1876 
1877 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1878 							 ul_proto, ul_sip,
1879 							 type);
1880 	if (!fib_entry)
1881 		return;
1882 
1883 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1884 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1885 }
1886 
1887 struct mlxsw_sp_neigh_key {
1888 	struct neighbour *n;
1889 };
1890 
1891 struct mlxsw_sp_neigh_entry {
1892 	struct list_head rif_list_node;
1893 	struct rhash_head ht_node;
1894 	struct mlxsw_sp_neigh_key key;
1895 	u16 rif;
1896 	bool connected;
1897 	unsigned char ha[ETH_ALEN];
1898 	struct list_head nexthop_list; /* list of nexthops using
1899 					* this neigh entry
1900 					*/
1901 	struct list_head nexthop_neighs_list_node;
1902 	unsigned int counter_index;
1903 	bool counter_valid;
1904 };
1905 
1906 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1907 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1908 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1909 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
1910 };
1911 
1912 struct mlxsw_sp_neigh_entry *
1913 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1914 			struct mlxsw_sp_neigh_entry *neigh_entry)
1915 {
1916 	if (!neigh_entry) {
1917 		if (list_empty(&rif->neigh_list))
1918 			return NULL;
1919 		else
1920 			return list_first_entry(&rif->neigh_list,
1921 						typeof(*neigh_entry),
1922 						rif_list_node);
1923 	}
1924 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
1925 		return NULL;
1926 	return list_next_entry(neigh_entry, rif_list_node);
1927 }
1928 
1929 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1930 {
1931 	return neigh_entry->key.n->tbl->family;
1932 }
1933 
1934 unsigned char *
1935 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1936 {
1937 	return neigh_entry->ha;
1938 }
1939 
1940 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1941 {
1942 	struct neighbour *n;
1943 
1944 	n = neigh_entry->key.n;
1945 	return ntohl(*((__be32 *) n->primary_key));
1946 }
1947 
1948 struct in6_addr *
1949 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1950 {
1951 	struct neighbour *n;
1952 
1953 	n = neigh_entry->key.n;
1954 	return (struct in6_addr *) &n->primary_key;
1955 }
1956 
1957 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1958 			       struct mlxsw_sp_neigh_entry *neigh_entry,
1959 			       u64 *p_counter)
1960 {
1961 	if (!neigh_entry->counter_valid)
1962 		return -EINVAL;
1963 
1964 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1965 					 p_counter, NULL);
1966 }
1967 
1968 static struct mlxsw_sp_neigh_entry *
1969 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1970 			   u16 rif)
1971 {
1972 	struct mlxsw_sp_neigh_entry *neigh_entry;
1973 
1974 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1975 	if (!neigh_entry)
1976 		return NULL;
1977 
1978 	neigh_entry->key.n = n;
1979 	neigh_entry->rif = rif;
1980 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1981 
1982 	return neigh_entry;
1983 }
1984 
1985 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1986 {
1987 	kfree(neigh_entry);
1988 }
1989 
1990 static int
1991 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1992 			    struct mlxsw_sp_neigh_entry *neigh_entry)
1993 {
1994 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
1995 				      &neigh_entry->ht_node,
1996 				      mlxsw_sp_neigh_ht_params);
1997 }
1998 
1999 static void
2000 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2001 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2002 {
2003 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2004 			       &neigh_entry->ht_node,
2005 			       mlxsw_sp_neigh_ht_params);
2006 }
2007 
2008 static bool
2009 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2010 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2011 {
2012 	struct devlink *devlink;
2013 	const char *table_name;
2014 
2015 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2016 	case AF_INET:
2017 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2018 		break;
2019 	case AF_INET6:
2020 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2021 		break;
2022 	default:
2023 		WARN_ON(1);
2024 		return false;
2025 	}
2026 
2027 	devlink = priv_to_devlink(mlxsw_sp->core);
2028 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2029 }
2030 
2031 static void
2032 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2033 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2034 {
2035 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2036 		return;
2037 
2038 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2039 		return;
2040 
2041 	neigh_entry->counter_valid = true;
2042 }
2043 
2044 static void
2045 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2046 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2047 {
2048 	if (!neigh_entry->counter_valid)
2049 		return;
2050 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2051 				   neigh_entry->counter_index);
2052 	neigh_entry->counter_valid = false;
2053 }
2054 
2055 static struct mlxsw_sp_neigh_entry *
2056 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2057 {
2058 	struct mlxsw_sp_neigh_entry *neigh_entry;
2059 	struct mlxsw_sp_rif *rif;
2060 	int err;
2061 
2062 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2063 	if (!rif)
2064 		return ERR_PTR(-EINVAL);
2065 
2066 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2067 	if (!neigh_entry)
2068 		return ERR_PTR(-ENOMEM);
2069 
2070 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2071 	if (err)
2072 		goto err_neigh_entry_insert;
2073 
2074 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2075 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2076 
2077 	return neigh_entry;
2078 
2079 err_neigh_entry_insert:
2080 	mlxsw_sp_neigh_entry_free(neigh_entry);
2081 	return ERR_PTR(err);
2082 }
2083 
2084 static void
2085 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2086 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2087 {
2088 	list_del(&neigh_entry->rif_list_node);
2089 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2090 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2091 	mlxsw_sp_neigh_entry_free(neigh_entry);
2092 }
2093 
2094 static struct mlxsw_sp_neigh_entry *
2095 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2096 {
2097 	struct mlxsw_sp_neigh_key key;
2098 
2099 	key.n = n;
2100 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2101 				      &key, mlxsw_sp_neigh_ht_params);
2102 }
2103 
2104 static void
2105 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2106 {
2107 	unsigned long interval;
2108 
2109 #if IS_ENABLED(CONFIG_IPV6)
2110 	interval = min_t(unsigned long,
2111 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2112 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2113 #else
2114 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2115 #endif
2116 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2117 }
2118 
2119 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2120 						   char *rauhtd_pl,
2121 						   int ent_index)
2122 {
2123 	struct net_device *dev;
2124 	struct neighbour *n;
2125 	__be32 dipn;
2126 	u32 dip;
2127 	u16 rif;
2128 
2129 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2130 
2131 	if (!mlxsw_sp->router->rifs[rif]) {
2132 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2133 		return;
2134 	}
2135 
2136 	dipn = htonl(dip);
2137 	dev = mlxsw_sp->router->rifs[rif]->dev;
2138 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2139 	if (!n)
2140 		return;
2141 
2142 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2143 	neigh_event_send(n, NULL);
2144 	neigh_release(n);
2145 }
2146 
2147 #if IS_ENABLED(CONFIG_IPV6)
2148 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2149 						   char *rauhtd_pl,
2150 						   int rec_index)
2151 {
2152 	struct net_device *dev;
2153 	struct neighbour *n;
2154 	struct in6_addr dip;
2155 	u16 rif;
2156 
2157 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2158 					 (char *) &dip);
2159 
2160 	if (!mlxsw_sp->router->rifs[rif]) {
2161 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2162 		return;
2163 	}
2164 
2165 	dev = mlxsw_sp->router->rifs[rif]->dev;
2166 	n = neigh_lookup(&nd_tbl, &dip, dev);
2167 	if (!n)
2168 		return;
2169 
2170 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2171 	neigh_event_send(n, NULL);
2172 	neigh_release(n);
2173 }
2174 #else
2175 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2176 						   char *rauhtd_pl,
2177 						   int rec_index)
2178 {
2179 }
2180 #endif
2181 
2182 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2183 						   char *rauhtd_pl,
2184 						   int rec_index)
2185 {
2186 	u8 num_entries;
2187 	int i;
2188 
2189 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2190 								rec_index);
2191 	/* Hardware starts counting at 0, so add 1. */
2192 	num_entries++;
2193 
2194 	/* Each record consists of several neighbour entries. */
2195 	for (i = 0; i < num_entries; i++) {
2196 		int ent_index;
2197 
2198 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2199 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2200 						       ent_index);
2201 	}
2202 
2203 }
2204 
2205 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2206 						   char *rauhtd_pl,
2207 						   int rec_index)
2208 {
2209 	/* One record contains one entry. */
2210 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2211 					       rec_index);
2212 }
2213 
2214 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2215 					      char *rauhtd_pl, int rec_index)
2216 {
2217 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2218 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2219 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2220 						       rec_index);
2221 		break;
2222 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2223 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2224 						       rec_index);
2225 		break;
2226 	}
2227 }
2228 
2229 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2230 {
2231 	u8 num_rec, last_rec_index, num_entries;
2232 
2233 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2234 	last_rec_index = num_rec - 1;
2235 
2236 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2237 		return false;
2238 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2239 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2240 		return true;
2241 
2242 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2243 								last_rec_index);
2244 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2245 		return true;
2246 	return false;
2247 }
2248 
2249 static int
2250 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2251 				       char *rauhtd_pl,
2252 				       enum mlxsw_reg_rauhtd_type type)
2253 {
2254 	int i, num_rec;
2255 	int err;
2256 
2257 	/* Make sure the neighbour's netdev isn't removed in the
2258 	 * process.
2259 	 */
2260 	rtnl_lock();
2261 	do {
2262 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2263 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2264 				      rauhtd_pl);
2265 		if (err) {
2266 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2267 			break;
2268 		}
2269 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2270 		for (i = 0; i < num_rec; i++)
2271 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2272 							  i);
2273 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2274 	rtnl_unlock();
2275 
2276 	return err;
2277 }
2278 
2279 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2280 {
2281 	enum mlxsw_reg_rauhtd_type type;
2282 	char *rauhtd_pl;
2283 	int err;
2284 
2285 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2286 	if (!rauhtd_pl)
2287 		return -ENOMEM;
2288 
2289 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2290 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2291 	if (err)
2292 		goto out;
2293 
2294 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2295 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2296 out:
2297 	kfree(rauhtd_pl);
2298 	return err;
2299 }
2300 
2301 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2302 {
2303 	struct mlxsw_sp_neigh_entry *neigh_entry;
2304 
2305 	/* Take RTNL mutex here to prevent lists from changes */
2306 	rtnl_lock();
2307 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2308 			    nexthop_neighs_list_node)
2309 		/* If this neigh have nexthops, make the kernel think this neigh
2310 		 * is active regardless of the traffic.
2311 		 */
2312 		neigh_event_send(neigh_entry->key.n, NULL);
2313 	rtnl_unlock();
2314 }
2315 
2316 static void
2317 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2318 {
2319 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2320 
2321 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2322 			       msecs_to_jiffies(interval));
2323 }
2324 
2325 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2326 {
2327 	struct mlxsw_sp_router *router;
2328 	int err;
2329 
2330 	router = container_of(work, struct mlxsw_sp_router,
2331 			      neighs_update.dw.work);
2332 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2333 	if (err)
2334 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2335 
2336 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2337 
2338 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2339 }
2340 
2341 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2342 {
2343 	struct mlxsw_sp_neigh_entry *neigh_entry;
2344 	struct mlxsw_sp_router *router;
2345 
2346 	router = container_of(work, struct mlxsw_sp_router,
2347 			      nexthop_probe_dw.work);
2348 	/* Iterate over nexthop neighbours, find those who are unresolved and
2349 	 * send arp on them. This solves the chicken-egg problem when
2350 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2351 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2352 	 * using different nexthop.
2353 	 *
2354 	 * Take RTNL mutex here to prevent lists from changes.
2355 	 */
2356 	rtnl_lock();
2357 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2358 			    nexthop_neighs_list_node)
2359 		if (!neigh_entry->connected)
2360 			neigh_event_send(neigh_entry->key.n, NULL);
2361 	rtnl_unlock();
2362 
2363 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2364 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2365 }
2366 
2367 static void
2368 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2369 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2370 			      bool removing, bool dead);
2371 
2372 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2373 {
2374 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2375 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2376 }
2377 
2378 static int
2379 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2380 				struct mlxsw_sp_neigh_entry *neigh_entry,
2381 				enum mlxsw_reg_rauht_op op)
2382 {
2383 	struct neighbour *n = neigh_entry->key.n;
2384 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2385 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2386 
2387 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2388 			      dip);
2389 	if (neigh_entry->counter_valid)
2390 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2391 					     neigh_entry->counter_index);
2392 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2393 }
2394 
2395 static int
2396 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2397 				struct mlxsw_sp_neigh_entry *neigh_entry,
2398 				enum mlxsw_reg_rauht_op op)
2399 {
2400 	struct neighbour *n = neigh_entry->key.n;
2401 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2402 	const char *dip = n->primary_key;
2403 
2404 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2405 			      dip);
2406 	if (neigh_entry->counter_valid)
2407 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2408 					     neigh_entry->counter_index);
2409 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2410 }
2411 
2412 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2413 {
2414 	struct neighbour *n = neigh_entry->key.n;
2415 
2416 	/* Packets with a link-local destination address are trapped
2417 	 * after LPM lookup and never reach the neighbour table, so
2418 	 * there is no need to program such neighbours to the device.
2419 	 */
2420 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2421 	    IPV6_ADDR_LINKLOCAL)
2422 		return true;
2423 	return false;
2424 }
2425 
2426 static void
2427 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2428 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2429 			    bool adding)
2430 {
2431 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2432 	int err;
2433 
2434 	if (!adding && !neigh_entry->connected)
2435 		return;
2436 	neigh_entry->connected = adding;
2437 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2438 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2439 						      op);
2440 		if (err)
2441 			return;
2442 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2443 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2444 			return;
2445 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2446 						      op);
2447 		if (err)
2448 			return;
2449 	} else {
2450 		WARN_ON_ONCE(1);
2451 		return;
2452 	}
2453 
2454 	if (adding)
2455 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2456 	else
2457 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2458 }
2459 
2460 void
2461 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2462 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2463 				    bool adding)
2464 {
2465 	if (adding)
2466 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2467 	else
2468 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2469 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2470 }
2471 
2472 struct mlxsw_sp_netevent_work {
2473 	struct work_struct work;
2474 	struct mlxsw_sp *mlxsw_sp;
2475 	struct neighbour *n;
2476 };
2477 
2478 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2479 {
2480 	struct mlxsw_sp_netevent_work *net_work =
2481 		container_of(work, struct mlxsw_sp_netevent_work, work);
2482 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2483 	struct mlxsw_sp_neigh_entry *neigh_entry;
2484 	struct neighbour *n = net_work->n;
2485 	unsigned char ha[ETH_ALEN];
2486 	bool entry_connected;
2487 	u8 nud_state, dead;
2488 
2489 	/* If these parameters are changed after we release the lock,
2490 	 * then we are guaranteed to receive another event letting us
2491 	 * know about it.
2492 	 */
2493 	read_lock_bh(&n->lock);
2494 	memcpy(ha, n->ha, ETH_ALEN);
2495 	nud_state = n->nud_state;
2496 	dead = n->dead;
2497 	read_unlock_bh(&n->lock);
2498 
2499 	rtnl_lock();
2500 	mlxsw_sp_span_respin(mlxsw_sp);
2501 
2502 	entry_connected = nud_state & NUD_VALID && !dead;
2503 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2504 	if (!entry_connected && !neigh_entry)
2505 		goto out;
2506 	if (!neigh_entry) {
2507 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2508 		if (IS_ERR(neigh_entry))
2509 			goto out;
2510 	}
2511 
2512 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2513 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2514 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2515 				      dead);
2516 
2517 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2518 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2519 
2520 out:
2521 	rtnl_unlock();
2522 	neigh_release(n);
2523 	kfree(net_work);
2524 }
2525 
2526 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2527 
2528 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2529 {
2530 	struct mlxsw_sp_netevent_work *net_work =
2531 		container_of(work, struct mlxsw_sp_netevent_work, work);
2532 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2533 
2534 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2535 	kfree(net_work);
2536 }
2537 
2538 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2539 
2540 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2541 {
2542 	struct mlxsw_sp_netevent_work *net_work =
2543 		container_of(work, struct mlxsw_sp_netevent_work, work);
2544 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2545 
2546 	__mlxsw_sp_router_init(mlxsw_sp);
2547 	kfree(net_work);
2548 }
2549 
2550 static int mlxsw_sp_router_schedule_work(struct net *net,
2551 					 struct notifier_block *nb,
2552 					 void (*cb)(struct work_struct *))
2553 {
2554 	struct mlxsw_sp_netevent_work *net_work;
2555 	struct mlxsw_sp_router *router;
2556 
2557 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2558 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2559 		return NOTIFY_DONE;
2560 
2561 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2562 	if (!net_work)
2563 		return NOTIFY_BAD;
2564 
2565 	INIT_WORK(&net_work->work, cb);
2566 	net_work->mlxsw_sp = router->mlxsw_sp;
2567 	mlxsw_core_schedule_work(&net_work->work);
2568 	return NOTIFY_DONE;
2569 }
2570 
2571 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2572 					  unsigned long event, void *ptr)
2573 {
2574 	struct mlxsw_sp_netevent_work *net_work;
2575 	struct mlxsw_sp_port *mlxsw_sp_port;
2576 	struct mlxsw_sp *mlxsw_sp;
2577 	unsigned long interval;
2578 	struct neigh_parms *p;
2579 	struct neighbour *n;
2580 
2581 	switch (event) {
2582 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2583 		p = ptr;
2584 
2585 		/* We don't care about changes in the default table. */
2586 		if (!p->dev || (p->tbl->family != AF_INET &&
2587 				p->tbl->family != AF_INET6))
2588 			return NOTIFY_DONE;
2589 
2590 		/* We are in atomic context and can't take RTNL mutex,
2591 		 * so use RCU variant to walk the device chain.
2592 		 */
2593 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2594 		if (!mlxsw_sp_port)
2595 			return NOTIFY_DONE;
2596 
2597 		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2598 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2599 		mlxsw_sp->router->neighs_update.interval = interval;
2600 
2601 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2602 		break;
2603 	case NETEVENT_NEIGH_UPDATE:
2604 		n = ptr;
2605 
2606 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2607 			return NOTIFY_DONE;
2608 
2609 		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2610 		if (!mlxsw_sp_port)
2611 			return NOTIFY_DONE;
2612 
2613 		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2614 		if (!net_work) {
2615 			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2616 			return NOTIFY_BAD;
2617 		}
2618 
2619 		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2620 		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2621 		net_work->n = n;
2622 
2623 		/* Take a reference to ensure the neighbour won't be
2624 		 * destructed until we drop the reference in delayed
2625 		 * work.
2626 		 */
2627 		neigh_clone(n);
2628 		mlxsw_core_schedule_work(&net_work->work);
2629 		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2630 		break;
2631 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2632 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2633 		return mlxsw_sp_router_schedule_work(ptr, nb,
2634 				mlxsw_sp_router_mp_hash_event_work);
2635 
2636 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2637 		return mlxsw_sp_router_schedule_work(ptr, nb,
2638 				mlxsw_sp_router_update_priority_work);
2639 	}
2640 
2641 	return NOTIFY_DONE;
2642 }
2643 
2644 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2645 {
2646 	int err;
2647 
2648 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2649 			      &mlxsw_sp_neigh_ht_params);
2650 	if (err)
2651 		return err;
2652 
2653 	/* Initialize the polling interval according to the default
2654 	 * table.
2655 	 */
2656 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2657 
2658 	/* Create the delayed works for the activity_update */
2659 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2660 			  mlxsw_sp_router_neighs_update_work);
2661 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2662 			  mlxsw_sp_router_probe_unresolved_nexthops);
2663 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2664 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2665 	return 0;
2666 }
2667 
2668 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2669 {
2670 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2671 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2672 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2673 }
2674 
2675 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2676 					 struct mlxsw_sp_rif *rif)
2677 {
2678 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2679 
2680 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2681 				 rif_list_node) {
2682 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2683 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2684 	}
2685 }
2686 
2687 enum mlxsw_sp_nexthop_type {
2688 	MLXSW_SP_NEXTHOP_TYPE_ETH,
2689 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2690 };
2691 
2692 struct mlxsw_sp_nexthop_key {
2693 	struct fib_nh *fib_nh;
2694 };
2695 
2696 struct mlxsw_sp_nexthop {
2697 	struct list_head neigh_list_node; /* member of neigh entry list */
2698 	struct list_head rif_list_node;
2699 	struct list_head router_list_node;
2700 	struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2701 						* this belongs to
2702 						*/
2703 	struct rhash_head ht_node;
2704 	struct mlxsw_sp_nexthop_key key;
2705 	unsigned char gw_addr[sizeof(struct in6_addr)];
2706 	int ifindex;
2707 	int nh_weight;
2708 	int norm_nh_weight;
2709 	int num_adj_entries;
2710 	struct mlxsw_sp_rif *rif;
2711 	u8 should_offload:1, /* set indicates this neigh is connected and
2712 			      * should be put to KVD linear area of this group.
2713 			      */
2714 	   offloaded:1, /* set in case the neigh is actually put into
2715 			 * KVD linear area of this group.
2716 			 */
2717 	   update:1; /* set indicates that MAC of this neigh should be
2718 		      * updated in HW
2719 		      */
2720 	enum mlxsw_sp_nexthop_type type;
2721 	union {
2722 		struct mlxsw_sp_neigh_entry *neigh_entry;
2723 		struct mlxsw_sp_ipip_entry *ipip_entry;
2724 	};
2725 	unsigned int counter_index;
2726 	bool counter_valid;
2727 };
2728 
2729 struct mlxsw_sp_nexthop_group {
2730 	void *priv;
2731 	struct rhash_head ht_node;
2732 	struct list_head fib_list; /* list of fib entries that use this group */
2733 	struct neigh_table *neigh_tbl;
2734 	u8 adj_index_valid:1,
2735 	   gateway:1; /* routes using the group use a gateway */
2736 	u32 adj_index;
2737 	u16 ecmp_size;
2738 	u16 count;
2739 	int sum_norm_weight;
2740 	struct mlxsw_sp_nexthop nexthops[0];
2741 #define nh_rif	nexthops[0].rif
2742 };
2743 
2744 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2745 				    struct mlxsw_sp_nexthop *nh)
2746 {
2747 	struct devlink *devlink;
2748 
2749 	devlink = priv_to_devlink(mlxsw_sp->core);
2750 	if (!devlink_dpipe_table_counter_enabled(devlink,
2751 						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2752 		return;
2753 
2754 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2755 		return;
2756 
2757 	nh->counter_valid = true;
2758 }
2759 
2760 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2761 				   struct mlxsw_sp_nexthop *nh)
2762 {
2763 	if (!nh->counter_valid)
2764 		return;
2765 	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2766 	nh->counter_valid = false;
2767 }
2768 
2769 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2770 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2771 {
2772 	if (!nh->counter_valid)
2773 		return -EINVAL;
2774 
2775 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2776 					 p_counter, NULL);
2777 }
2778 
2779 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2780 					       struct mlxsw_sp_nexthop *nh)
2781 {
2782 	if (!nh) {
2783 		if (list_empty(&router->nexthop_list))
2784 			return NULL;
2785 		else
2786 			return list_first_entry(&router->nexthop_list,
2787 						typeof(*nh), router_list_node);
2788 	}
2789 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2790 		return NULL;
2791 	return list_next_entry(nh, router_list_node);
2792 }
2793 
2794 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2795 {
2796 	return nh->offloaded;
2797 }
2798 
2799 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2800 {
2801 	if (!nh->offloaded)
2802 		return NULL;
2803 	return nh->neigh_entry->ha;
2804 }
2805 
2806 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2807 			     u32 *p_adj_size, u32 *p_adj_hash_index)
2808 {
2809 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2810 	u32 adj_hash_index = 0;
2811 	int i;
2812 
2813 	if (!nh->offloaded || !nh_grp->adj_index_valid)
2814 		return -EINVAL;
2815 
2816 	*p_adj_index = nh_grp->adj_index;
2817 	*p_adj_size = nh_grp->ecmp_size;
2818 
2819 	for (i = 0; i < nh_grp->count; i++) {
2820 		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2821 
2822 		if (nh_iter == nh)
2823 			break;
2824 		if (nh_iter->offloaded)
2825 			adj_hash_index += nh_iter->num_adj_entries;
2826 	}
2827 
2828 	*p_adj_hash_index = adj_hash_index;
2829 	return 0;
2830 }
2831 
2832 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2833 {
2834 	return nh->rif;
2835 }
2836 
2837 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2838 {
2839 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2840 	int i;
2841 
2842 	for (i = 0; i < nh_grp->count; i++) {
2843 		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2844 
2845 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2846 			return true;
2847 	}
2848 	return false;
2849 }
2850 
2851 static struct fib_info *
2852 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2853 {
2854 	return nh_grp->priv;
2855 }
2856 
2857 struct mlxsw_sp_nexthop_group_cmp_arg {
2858 	enum mlxsw_sp_l3proto proto;
2859 	union {
2860 		struct fib_info *fi;
2861 		struct mlxsw_sp_fib6_entry *fib6_entry;
2862 	};
2863 };
2864 
2865 static bool
2866 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2867 				    const struct in6_addr *gw, int ifindex,
2868 				    int weight)
2869 {
2870 	int i;
2871 
2872 	for (i = 0; i < nh_grp->count; i++) {
2873 		const struct mlxsw_sp_nexthop *nh;
2874 
2875 		nh = &nh_grp->nexthops[i];
2876 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
2877 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2878 			return true;
2879 	}
2880 
2881 	return false;
2882 }
2883 
2884 static bool
2885 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2886 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
2887 {
2888 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2889 
2890 	if (nh_grp->count != fib6_entry->nrt6)
2891 		return false;
2892 
2893 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2894 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
2895 		struct in6_addr *gw;
2896 		int ifindex, weight;
2897 
2898 		ifindex = fib6_nh->fib_nh_dev->ifindex;
2899 		weight = fib6_nh->fib_nh_weight;
2900 		gw = &fib6_nh->fib_nh_gw6;
2901 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2902 							 weight))
2903 			return false;
2904 	}
2905 
2906 	return true;
2907 }
2908 
2909 static int
2910 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2911 {
2912 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2913 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2914 
2915 	switch (cmp_arg->proto) {
2916 	case MLXSW_SP_L3_PROTO_IPV4:
2917 		return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2918 	case MLXSW_SP_L3_PROTO_IPV6:
2919 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2920 						    cmp_arg->fib6_entry);
2921 	default:
2922 		WARN_ON(1);
2923 		return 1;
2924 	}
2925 }
2926 
2927 static int
2928 mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2929 {
2930 	return nh_grp->neigh_tbl->family;
2931 }
2932 
2933 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2934 {
2935 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
2936 	const struct mlxsw_sp_nexthop *nh;
2937 	struct fib_info *fi;
2938 	unsigned int val;
2939 	int i;
2940 
2941 	switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2942 	case AF_INET:
2943 		fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2944 		return jhash(&fi, sizeof(fi), seed);
2945 	case AF_INET6:
2946 		val = nh_grp->count;
2947 		for (i = 0; i < nh_grp->count; i++) {
2948 			nh = &nh_grp->nexthops[i];
2949 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
2950 		}
2951 		return jhash(&val, sizeof(val), seed);
2952 	default:
2953 		WARN_ON(1);
2954 		return 0;
2955 	}
2956 }
2957 
2958 static u32
2959 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2960 {
2961 	unsigned int val = fib6_entry->nrt6;
2962 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2963 	struct net_device *dev;
2964 
2965 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2966 		dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
2967 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
2968 	}
2969 
2970 	return jhash(&val, sizeof(val), seed);
2971 }
2972 
2973 static u32
2974 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2975 {
2976 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2977 
2978 	switch (cmp_arg->proto) {
2979 	case MLXSW_SP_L3_PROTO_IPV4:
2980 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2981 	case MLXSW_SP_L3_PROTO_IPV6:
2982 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2983 	default:
2984 		WARN_ON(1);
2985 		return 0;
2986 	}
2987 }
2988 
2989 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
2990 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
2991 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
2992 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
2993 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
2994 };
2995 
2996 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2997 					 struct mlxsw_sp_nexthop_group *nh_grp)
2998 {
2999 	if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3000 	    !nh_grp->gateway)
3001 		return 0;
3002 
3003 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3004 				      &nh_grp->ht_node,
3005 				      mlxsw_sp_nexthop_group_ht_params);
3006 }
3007 
3008 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3009 					  struct mlxsw_sp_nexthop_group *nh_grp)
3010 {
3011 	if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
3012 	    !nh_grp->gateway)
3013 		return;
3014 
3015 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3016 			       &nh_grp->ht_node,
3017 			       mlxsw_sp_nexthop_group_ht_params);
3018 }
3019 
3020 static struct mlxsw_sp_nexthop_group *
3021 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3022 			       struct fib_info *fi)
3023 {
3024 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3025 
3026 	cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
3027 	cmp_arg.fi = fi;
3028 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3029 				      &cmp_arg,
3030 				      mlxsw_sp_nexthop_group_ht_params);
3031 }
3032 
3033 static struct mlxsw_sp_nexthop_group *
3034 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3035 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3036 {
3037 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3038 
3039 	cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
3040 	cmp_arg.fib6_entry = fib6_entry;
3041 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3042 				      &cmp_arg,
3043 				      mlxsw_sp_nexthop_group_ht_params);
3044 }
3045 
3046 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3047 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3048 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3049 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3050 };
3051 
3052 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3053 				   struct mlxsw_sp_nexthop *nh)
3054 {
3055 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3056 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3057 }
3058 
3059 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3060 				    struct mlxsw_sp_nexthop *nh)
3061 {
3062 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3063 			       mlxsw_sp_nexthop_ht_params);
3064 }
3065 
3066 static struct mlxsw_sp_nexthop *
3067 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3068 			struct mlxsw_sp_nexthop_key key)
3069 {
3070 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3071 				      mlxsw_sp_nexthop_ht_params);
3072 }
3073 
3074 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3075 					     const struct mlxsw_sp_fib *fib,
3076 					     u32 adj_index, u16 ecmp_size,
3077 					     u32 new_adj_index,
3078 					     u16 new_ecmp_size)
3079 {
3080 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3081 
3082 	mlxsw_reg_raleu_pack(raleu_pl,
3083 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
3084 			     fib->vr->id, adj_index, ecmp_size, new_adj_index,
3085 			     new_ecmp_size);
3086 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3087 }
3088 
3089 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3090 					  struct mlxsw_sp_nexthop_group *nh_grp,
3091 					  u32 old_adj_index, u16 old_ecmp_size)
3092 {
3093 	struct mlxsw_sp_fib_entry *fib_entry;
3094 	struct mlxsw_sp_fib *fib = NULL;
3095 	int err;
3096 
3097 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3098 		if (fib == fib_entry->fib_node->fib)
3099 			continue;
3100 		fib = fib_entry->fib_node->fib;
3101 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3102 							old_adj_index,
3103 							old_ecmp_size,
3104 							nh_grp->adj_index,
3105 							nh_grp->ecmp_size);
3106 		if (err)
3107 			return err;
3108 	}
3109 	return 0;
3110 }
3111 
3112 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3113 				     struct mlxsw_sp_nexthop *nh)
3114 {
3115 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3116 	char ratr_pl[MLXSW_REG_RATR_LEN];
3117 
3118 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3119 			    true, MLXSW_REG_RATR_TYPE_ETHERNET,
3120 			    adj_index, neigh_entry->rif);
3121 	mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3122 	if (nh->counter_valid)
3123 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3124 	else
3125 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3126 
3127 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3128 }
3129 
3130 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3131 			    struct mlxsw_sp_nexthop *nh)
3132 {
3133 	int i;
3134 
3135 	for (i = 0; i < nh->num_adj_entries; i++) {
3136 		int err;
3137 
3138 		err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3139 		if (err)
3140 			return err;
3141 	}
3142 
3143 	return 0;
3144 }
3145 
3146 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3147 					  u32 adj_index,
3148 					  struct mlxsw_sp_nexthop *nh)
3149 {
3150 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3151 
3152 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3153 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3154 }
3155 
3156 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3157 					u32 adj_index,
3158 					struct mlxsw_sp_nexthop *nh)
3159 {
3160 	int i;
3161 
3162 	for (i = 0; i < nh->num_adj_entries; i++) {
3163 		int err;
3164 
3165 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3166 						     nh);
3167 		if (err)
3168 			return err;
3169 	}
3170 
3171 	return 0;
3172 }
3173 
3174 static int
3175 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3176 			      struct mlxsw_sp_nexthop_group *nh_grp,
3177 			      bool reallocate)
3178 {
3179 	u32 adj_index = nh_grp->adj_index; /* base */
3180 	struct mlxsw_sp_nexthop *nh;
3181 	int i;
3182 	int err;
3183 
3184 	for (i = 0; i < nh_grp->count; i++) {
3185 		nh = &nh_grp->nexthops[i];
3186 
3187 		if (!nh->should_offload) {
3188 			nh->offloaded = 0;
3189 			continue;
3190 		}
3191 
3192 		if (nh->update || reallocate) {
3193 			switch (nh->type) {
3194 			case MLXSW_SP_NEXTHOP_TYPE_ETH:
3195 				err = mlxsw_sp_nexthop_update
3196 					    (mlxsw_sp, adj_index, nh);
3197 				break;
3198 			case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3199 				err = mlxsw_sp_nexthop_ipip_update
3200 					    (mlxsw_sp, adj_index, nh);
3201 				break;
3202 			}
3203 			if (err)
3204 				return err;
3205 			nh->update = 0;
3206 			nh->offloaded = 1;
3207 		}
3208 		adj_index += nh->num_adj_entries;
3209 	}
3210 	return 0;
3211 }
3212 
3213 static bool
3214 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3215 				 const struct mlxsw_sp_fib_entry *fib_entry);
3216 
3217 static int
3218 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3219 				    struct mlxsw_sp_nexthop_group *nh_grp)
3220 {
3221 	struct mlxsw_sp_fib_entry *fib_entry;
3222 	int err;
3223 
3224 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3225 		if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3226 						      fib_entry))
3227 			continue;
3228 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3229 		if (err)
3230 			return err;
3231 	}
3232 	return 0;
3233 }
3234 
3235 static void
3236 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3237 				   enum mlxsw_reg_ralue_op op, int err);
3238 
3239 static void
3240 mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3241 {
3242 	enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3243 	struct mlxsw_sp_fib_entry *fib_entry;
3244 
3245 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3246 		if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3247 						      fib_entry))
3248 			continue;
3249 		mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3250 	}
3251 }
3252 
3253 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3254 {
3255 	/* Valid sizes for an adjacency group are:
3256 	 * 1-64, 512, 1024, 2048 and 4096.
3257 	 */
3258 	if (*p_adj_grp_size <= 64)
3259 		return;
3260 	else if (*p_adj_grp_size <= 512)
3261 		*p_adj_grp_size = 512;
3262 	else if (*p_adj_grp_size <= 1024)
3263 		*p_adj_grp_size = 1024;
3264 	else if (*p_adj_grp_size <= 2048)
3265 		*p_adj_grp_size = 2048;
3266 	else
3267 		*p_adj_grp_size = 4096;
3268 }
3269 
3270 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3271 					     unsigned int alloc_size)
3272 {
3273 	if (alloc_size >= 4096)
3274 		*p_adj_grp_size = 4096;
3275 	else if (alloc_size >= 2048)
3276 		*p_adj_grp_size = 2048;
3277 	else if (alloc_size >= 1024)
3278 		*p_adj_grp_size = 1024;
3279 	else if (alloc_size >= 512)
3280 		*p_adj_grp_size = 512;
3281 }
3282 
3283 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3284 				     u16 *p_adj_grp_size)
3285 {
3286 	unsigned int alloc_size;
3287 	int err;
3288 
3289 	/* Round up the requested group size to the next size supported
3290 	 * by the device and make sure the request can be satisfied.
3291 	 */
3292 	mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3293 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3294 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3295 					      *p_adj_grp_size, &alloc_size);
3296 	if (err)
3297 		return err;
3298 	/* It is possible the allocation results in more allocated
3299 	 * entries than requested. Try to use as much of them as
3300 	 * possible.
3301 	 */
3302 	mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3303 
3304 	return 0;
3305 }
3306 
3307 static void
3308 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3309 {
3310 	int i, g = 0, sum_norm_weight = 0;
3311 	struct mlxsw_sp_nexthop *nh;
3312 
3313 	for (i = 0; i < nh_grp->count; i++) {
3314 		nh = &nh_grp->nexthops[i];
3315 
3316 		if (!nh->should_offload)
3317 			continue;
3318 		if (g > 0)
3319 			g = gcd(nh->nh_weight, g);
3320 		else
3321 			g = nh->nh_weight;
3322 	}
3323 
3324 	for (i = 0; i < nh_grp->count; i++) {
3325 		nh = &nh_grp->nexthops[i];
3326 
3327 		if (!nh->should_offload)
3328 			continue;
3329 		nh->norm_nh_weight = nh->nh_weight / g;
3330 		sum_norm_weight += nh->norm_nh_weight;
3331 	}
3332 
3333 	nh_grp->sum_norm_weight = sum_norm_weight;
3334 }
3335 
3336 static void
3337 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3338 {
3339 	int total = nh_grp->sum_norm_weight;
3340 	u16 ecmp_size = nh_grp->ecmp_size;
3341 	int i, weight = 0, lower_bound = 0;
3342 
3343 	for (i = 0; i < nh_grp->count; i++) {
3344 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3345 		int upper_bound;
3346 
3347 		if (!nh->should_offload)
3348 			continue;
3349 		weight += nh->norm_nh_weight;
3350 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3351 		nh->num_adj_entries = upper_bound - lower_bound;
3352 		lower_bound = upper_bound;
3353 	}
3354 }
3355 
3356 static void
3357 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3358 			       struct mlxsw_sp_nexthop_group *nh_grp)
3359 {
3360 	u16 ecmp_size, old_ecmp_size;
3361 	struct mlxsw_sp_nexthop *nh;
3362 	bool offload_change = false;
3363 	u32 adj_index;
3364 	bool old_adj_index_valid;
3365 	u32 old_adj_index;
3366 	int i;
3367 	int err;
3368 
3369 	if (!nh_grp->gateway) {
3370 		mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3371 		return;
3372 	}
3373 
3374 	for (i = 0; i < nh_grp->count; i++) {
3375 		nh = &nh_grp->nexthops[i];
3376 
3377 		if (nh->should_offload != nh->offloaded) {
3378 			offload_change = true;
3379 			if (nh->should_offload)
3380 				nh->update = 1;
3381 		}
3382 	}
3383 	if (!offload_change) {
3384 		/* Nothing was added or removed, so no need to reallocate. Just
3385 		 * update MAC on existing adjacency indexes.
3386 		 */
3387 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3388 		if (err) {
3389 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3390 			goto set_trap;
3391 		}
3392 		return;
3393 	}
3394 	mlxsw_sp_nexthop_group_normalize(nh_grp);
3395 	if (!nh_grp->sum_norm_weight)
3396 		/* No neigh of this group is connected so we just set
3397 		 * the trap and let everthing flow through kernel.
3398 		 */
3399 		goto set_trap;
3400 
3401 	ecmp_size = nh_grp->sum_norm_weight;
3402 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3403 	if (err)
3404 		/* No valid allocation size available. */
3405 		goto set_trap;
3406 
3407 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3408 				  ecmp_size, &adj_index);
3409 	if (err) {
3410 		/* We ran out of KVD linear space, just set the
3411 		 * trap and let everything flow through kernel.
3412 		 */
3413 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3414 		goto set_trap;
3415 	}
3416 	old_adj_index_valid = nh_grp->adj_index_valid;
3417 	old_adj_index = nh_grp->adj_index;
3418 	old_ecmp_size = nh_grp->ecmp_size;
3419 	nh_grp->adj_index_valid = 1;
3420 	nh_grp->adj_index = adj_index;
3421 	nh_grp->ecmp_size = ecmp_size;
3422 	mlxsw_sp_nexthop_group_rebalance(nh_grp);
3423 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3424 	if (err) {
3425 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3426 		goto set_trap;
3427 	}
3428 
3429 	if (!old_adj_index_valid) {
3430 		/* The trap was set for fib entries, so we have to call
3431 		 * fib entry update to unset it and use adjacency index.
3432 		 */
3433 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3434 		if (err) {
3435 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3436 			goto set_trap;
3437 		}
3438 		return;
3439 	}
3440 
3441 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3442 					     old_adj_index, old_ecmp_size);
3443 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3444 			   old_ecmp_size, old_adj_index);
3445 	if (err) {
3446 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3447 		goto set_trap;
3448 	}
3449 
3450 	/* Offload state within the group changed, so update the flags. */
3451 	mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3452 
3453 	return;
3454 
3455 set_trap:
3456 	old_adj_index_valid = nh_grp->adj_index_valid;
3457 	nh_grp->adj_index_valid = 0;
3458 	for (i = 0; i < nh_grp->count; i++) {
3459 		nh = &nh_grp->nexthops[i];
3460 		nh->offloaded = 0;
3461 	}
3462 	err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3463 	if (err)
3464 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3465 	if (old_adj_index_valid)
3466 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3467 				   nh_grp->ecmp_size, nh_grp->adj_index);
3468 }
3469 
3470 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3471 					    bool removing)
3472 {
3473 	if (!removing)
3474 		nh->should_offload = 1;
3475 	else
3476 		nh->should_offload = 0;
3477 	nh->update = 1;
3478 }
3479 
3480 static int
3481 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3482 				    struct mlxsw_sp_neigh_entry *neigh_entry)
3483 {
3484 	struct neighbour *n, *old_n = neigh_entry->key.n;
3485 	struct mlxsw_sp_nexthop *nh;
3486 	bool entry_connected;
3487 	u8 nud_state, dead;
3488 	int err;
3489 
3490 	nh = list_first_entry(&neigh_entry->nexthop_list,
3491 			      struct mlxsw_sp_nexthop, neigh_list_node);
3492 
3493 	n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3494 	if (!n) {
3495 		n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3496 				 nh->rif->dev);
3497 		if (IS_ERR(n))
3498 			return PTR_ERR(n);
3499 		neigh_event_send(n, NULL);
3500 	}
3501 
3502 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3503 	neigh_entry->key.n = n;
3504 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3505 	if (err)
3506 		goto err_neigh_entry_insert;
3507 
3508 	read_lock_bh(&n->lock);
3509 	nud_state = n->nud_state;
3510 	dead = n->dead;
3511 	read_unlock_bh(&n->lock);
3512 	entry_connected = nud_state & NUD_VALID && !dead;
3513 
3514 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3515 			    neigh_list_node) {
3516 		neigh_release(old_n);
3517 		neigh_clone(n);
3518 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3519 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3520 	}
3521 
3522 	neigh_release(n);
3523 
3524 	return 0;
3525 
3526 err_neigh_entry_insert:
3527 	neigh_entry->key.n = old_n;
3528 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3529 	neigh_release(n);
3530 	return err;
3531 }
3532 
3533 static void
3534 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3535 			      struct mlxsw_sp_neigh_entry *neigh_entry,
3536 			      bool removing, bool dead)
3537 {
3538 	struct mlxsw_sp_nexthop *nh;
3539 
3540 	if (list_empty(&neigh_entry->nexthop_list))
3541 		return;
3542 
3543 	if (dead) {
3544 		int err;
3545 
3546 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3547 							  neigh_entry);
3548 		if (err)
3549 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3550 		return;
3551 	}
3552 
3553 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
3554 			    neigh_list_node) {
3555 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3556 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3557 	}
3558 }
3559 
3560 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3561 				      struct mlxsw_sp_rif *rif)
3562 {
3563 	if (nh->rif)
3564 		return;
3565 
3566 	nh->rif = rif;
3567 	list_add(&nh->rif_list_node, &rif->nexthop_list);
3568 }
3569 
3570 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3571 {
3572 	if (!nh->rif)
3573 		return;
3574 
3575 	list_del(&nh->rif_list_node);
3576 	nh->rif = NULL;
3577 }
3578 
3579 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3580 				       struct mlxsw_sp_nexthop *nh)
3581 {
3582 	struct mlxsw_sp_neigh_entry *neigh_entry;
3583 	struct neighbour *n;
3584 	u8 nud_state, dead;
3585 	int err;
3586 
3587 	if (!nh->nh_grp->gateway || nh->neigh_entry)
3588 		return 0;
3589 
3590 	/* Take a reference of neigh here ensuring that neigh would
3591 	 * not be destructed before the nexthop entry is finished.
3592 	 * The reference is taken either in neigh_lookup() or
3593 	 * in neigh_create() in case n is not found.
3594 	 */
3595 	n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3596 	if (!n) {
3597 		n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3598 				 nh->rif->dev);
3599 		if (IS_ERR(n))
3600 			return PTR_ERR(n);
3601 		neigh_event_send(n, NULL);
3602 	}
3603 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3604 	if (!neigh_entry) {
3605 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3606 		if (IS_ERR(neigh_entry)) {
3607 			err = -EINVAL;
3608 			goto err_neigh_entry_create;
3609 		}
3610 	}
3611 
3612 	/* If that is the first nexthop connected to that neigh, add to
3613 	 * nexthop_neighs_list
3614 	 */
3615 	if (list_empty(&neigh_entry->nexthop_list))
3616 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3617 			      &mlxsw_sp->router->nexthop_neighs_list);
3618 
3619 	nh->neigh_entry = neigh_entry;
3620 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3621 	read_lock_bh(&n->lock);
3622 	nud_state = n->nud_state;
3623 	dead = n->dead;
3624 	read_unlock_bh(&n->lock);
3625 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3626 
3627 	return 0;
3628 
3629 err_neigh_entry_create:
3630 	neigh_release(n);
3631 	return err;
3632 }
3633 
3634 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3635 					struct mlxsw_sp_nexthop *nh)
3636 {
3637 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3638 	struct neighbour *n;
3639 
3640 	if (!neigh_entry)
3641 		return;
3642 	n = neigh_entry->key.n;
3643 
3644 	__mlxsw_sp_nexthop_neigh_update(nh, true);
3645 	list_del(&nh->neigh_list_node);
3646 	nh->neigh_entry = NULL;
3647 
3648 	/* If that is the last nexthop connected to that neigh, remove from
3649 	 * nexthop_neighs_list
3650 	 */
3651 	if (list_empty(&neigh_entry->nexthop_list))
3652 		list_del(&neigh_entry->nexthop_neighs_list_node);
3653 
3654 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3655 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3656 
3657 	neigh_release(n);
3658 }
3659 
3660 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3661 {
3662 	struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3663 
3664 	return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3665 }
3666 
3667 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3668 				       struct mlxsw_sp_nexthop *nh,
3669 				       struct mlxsw_sp_ipip_entry *ipip_entry)
3670 {
3671 	bool removing;
3672 
3673 	if (!nh->nh_grp->gateway || nh->ipip_entry)
3674 		return;
3675 
3676 	nh->ipip_entry = ipip_entry;
3677 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3678 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
3679 	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3680 }
3681 
3682 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3683 				       struct mlxsw_sp_nexthop *nh)
3684 {
3685 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3686 
3687 	if (!ipip_entry)
3688 		return;
3689 
3690 	__mlxsw_sp_nexthop_neigh_update(nh, true);
3691 	nh->ipip_entry = NULL;
3692 }
3693 
3694 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3695 					const struct fib_nh *fib_nh,
3696 					enum mlxsw_sp_ipip_type *p_ipipt)
3697 {
3698 	struct net_device *dev = fib_nh->fib_nh_dev;
3699 
3700 	return dev &&
3701 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3702 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3703 }
3704 
3705 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3706 				       struct mlxsw_sp_nexthop *nh)
3707 {
3708 	switch (nh->type) {
3709 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
3710 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3711 		mlxsw_sp_nexthop_rif_fini(nh);
3712 		break;
3713 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3714 		mlxsw_sp_nexthop_rif_fini(nh);
3715 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3716 		break;
3717 	}
3718 }
3719 
3720 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3721 				       struct mlxsw_sp_nexthop *nh,
3722 				       struct fib_nh *fib_nh)
3723 {
3724 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3725 	struct net_device *dev = fib_nh->fib_nh_dev;
3726 	struct mlxsw_sp_ipip_entry *ipip_entry;
3727 	struct mlxsw_sp_rif *rif;
3728 	int err;
3729 
3730 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3731 	if (ipip_entry) {
3732 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3733 		if (ipip_ops->can_offload(mlxsw_sp, dev,
3734 					  MLXSW_SP_L3_PROTO_IPV4)) {
3735 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3736 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3737 			return 0;
3738 		}
3739 	}
3740 
3741 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3742 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3743 	if (!rif)
3744 		return 0;
3745 
3746 	mlxsw_sp_nexthop_rif_init(nh, rif);
3747 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3748 	if (err)
3749 		goto err_neigh_init;
3750 
3751 	return 0;
3752 
3753 err_neigh_init:
3754 	mlxsw_sp_nexthop_rif_fini(nh);
3755 	return err;
3756 }
3757 
3758 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3759 					struct mlxsw_sp_nexthop *nh)
3760 {
3761 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3762 }
3763 
3764 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3765 				  struct mlxsw_sp_nexthop_group *nh_grp,
3766 				  struct mlxsw_sp_nexthop *nh,
3767 				  struct fib_nh *fib_nh)
3768 {
3769 	struct net_device *dev = fib_nh->fib_nh_dev;
3770 	struct in_device *in_dev;
3771 	int err;
3772 
3773 	nh->nh_grp = nh_grp;
3774 	nh->key.fib_nh = fib_nh;
3775 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3776 	nh->nh_weight = fib_nh->fib_nh_weight;
3777 #else
3778 	nh->nh_weight = 1;
3779 #endif
3780 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3781 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3782 	if (err)
3783 		return err;
3784 
3785 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3786 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3787 
3788 	if (!dev)
3789 		return 0;
3790 
3791 	in_dev = __in_dev_get_rtnl(dev);
3792 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3793 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
3794 		return 0;
3795 
3796 	err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3797 	if (err)
3798 		goto err_nexthop_neigh_init;
3799 
3800 	return 0;
3801 
3802 err_nexthop_neigh_init:
3803 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3804 	return err;
3805 }
3806 
3807 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3808 				   struct mlxsw_sp_nexthop *nh)
3809 {
3810 	mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3811 	list_del(&nh->router_list_node);
3812 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
3813 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3814 }
3815 
3816 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3817 				    unsigned long event, struct fib_nh *fib_nh)
3818 {
3819 	struct mlxsw_sp_nexthop_key key;
3820 	struct mlxsw_sp_nexthop *nh;
3821 
3822 	if (mlxsw_sp->router->aborted)
3823 		return;
3824 
3825 	key.fib_nh = fib_nh;
3826 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3827 	if (WARN_ON_ONCE(!nh))
3828 		return;
3829 
3830 	switch (event) {
3831 	case FIB_EVENT_NH_ADD:
3832 		mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3833 		break;
3834 	case FIB_EVENT_NH_DEL:
3835 		mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
3836 		break;
3837 	}
3838 
3839 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3840 }
3841 
3842 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3843 					struct mlxsw_sp_rif *rif)
3844 {
3845 	struct mlxsw_sp_nexthop *nh;
3846 	bool removing;
3847 
3848 	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3849 		switch (nh->type) {
3850 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
3851 			removing = false;
3852 			break;
3853 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3854 			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3855 			break;
3856 		default:
3857 			WARN_ON(1);
3858 			continue;
3859 		}
3860 
3861 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
3862 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3863 	}
3864 }
3865 
3866 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3867 					 struct mlxsw_sp_rif *old_rif,
3868 					 struct mlxsw_sp_rif *new_rif)
3869 {
3870 	struct mlxsw_sp_nexthop *nh;
3871 
3872 	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3873 	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3874 		nh->rif = new_rif;
3875 	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3876 }
3877 
3878 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3879 					   struct mlxsw_sp_rif *rif)
3880 {
3881 	struct mlxsw_sp_nexthop *nh, *tmp;
3882 
3883 	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
3884 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3885 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3886 	}
3887 }
3888 
3889 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3890 				   struct fib_info *fi)
3891 {
3892 	const struct fib_nh *nh = fib_info_nh(fi, 0);
3893 
3894 	return nh->fib_nh_scope == RT_SCOPE_LINK ||
3895 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
3896 }
3897 
3898 static struct mlxsw_sp_nexthop_group *
3899 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
3900 {
3901 	unsigned int nhs = fib_info_num_path(fi);
3902 	struct mlxsw_sp_nexthop_group *nh_grp;
3903 	struct mlxsw_sp_nexthop *nh;
3904 	struct fib_nh *fib_nh;
3905 	int i;
3906 	int err;
3907 
3908 	nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
3909 	if (!nh_grp)
3910 		return ERR_PTR(-ENOMEM);
3911 	nh_grp->priv = fi;
3912 	INIT_LIST_HEAD(&nh_grp->fib_list);
3913 	nh_grp->neigh_tbl = &arp_tbl;
3914 
3915 	nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
3916 	nh_grp->count = nhs;
3917 	fib_info_hold(fi);
3918 	for (i = 0; i < nh_grp->count; i++) {
3919 		nh = &nh_grp->nexthops[i];
3920 		fib_nh = fib_info_nh(fi, i);
3921 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
3922 		if (err)
3923 			goto err_nexthop4_init;
3924 	}
3925 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3926 	if (err)
3927 		goto err_nexthop_group_insert;
3928 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3929 	return nh_grp;
3930 
3931 err_nexthop_group_insert:
3932 err_nexthop4_init:
3933 	for (i--; i >= 0; i--) {
3934 		nh = &nh_grp->nexthops[i];
3935 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
3936 	}
3937 	fib_info_put(fi);
3938 	kfree(nh_grp);
3939 	return ERR_PTR(err);
3940 }
3941 
3942 static void
3943 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3944 				struct mlxsw_sp_nexthop_group *nh_grp)
3945 {
3946 	struct mlxsw_sp_nexthop *nh;
3947 	int i;
3948 
3949 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
3950 	for (i = 0; i < nh_grp->count; i++) {
3951 		nh = &nh_grp->nexthops[i];
3952 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
3953 	}
3954 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3955 	WARN_ON_ONCE(nh_grp->adj_index_valid);
3956 	fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
3957 	kfree(nh_grp);
3958 }
3959 
3960 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3961 				       struct mlxsw_sp_fib_entry *fib_entry,
3962 				       struct fib_info *fi)
3963 {
3964 	struct mlxsw_sp_nexthop_group *nh_grp;
3965 
3966 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
3967 	if (!nh_grp) {
3968 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
3969 		if (IS_ERR(nh_grp))
3970 			return PTR_ERR(nh_grp);
3971 	}
3972 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3973 	fib_entry->nh_group = nh_grp;
3974 	return 0;
3975 }
3976 
3977 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3978 					struct mlxsw_sp_fib_entry *fib_entry)
3979 {
3980 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3981 
3982 	list_del(&fib_entry->nexthop_group_node);
3983 	if (!list_empty(&nh_grp->fib_list))
3984 		return;
3985 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
3986 }
3987 
3988 static bool
3989 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3990 {
3991 	struct mlxsw_sp_fib4_entry *fib4_entry;
3992 
3993 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3994 				  common);
3995 	return !fib4_entry->tos;
3996 }
3997 
3998 static bool
3999 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4000 {
4001 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4002 
4003 	switch (fib_entry->fib_node->fib->proto) {
4004 	case MLXSW_SP_L3_PROTO_IPV4:
4005 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4006 			return false;
4007 		break;
4008 	case MLXSW_SP_L3_PROTO_IPV6:
4009 		break;
4010 	}
4011 
4012 	switch (fib_entry->type) {
4013 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4014 		return !!nh_group->adj_index_valid;
4015 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4016 		return !!nh_group->nh_rif;
4017 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4018 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4019 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4020 		return true;
4021 	default:
4022 		return false;
4023 	}
4024 }
4025 
4026 static struct mlxsw_sp_nexthop *
4027 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4028 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4029 {
4030 	int i;
4031 
4032 	for (i = 0; i < nh_grp->count; i++) {
4033 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4034 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
4035 
4036 		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4037 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4038 				    &rt->fib6_nh->fib_nh_gw6))
4039 			return nh;
4040 		continue;
4041 	}
4042 
4043 	return NULL;
4044 }
4045 
4046 static void
4047 mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
4048 {
4049 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4050 	int i;
4051 
4052 	if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
4053 	    fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
4054 	    fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
4055 	    fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
4056 		nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4057 		return;
4058 	}
4059 
4060 	for (i = 0; i < nh_grp->count; i++) {
4061 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4062 
4063 		if (nh->offloaded)
4064 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4065 		else
4066 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4067 	}
4068 }
4069 
4070 static void
4071 mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
4072 {
4073 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4074 	int i;
4075 
4076 	if (!list_is_singular(&nh_grp->fib_list))
4077 		return;
4078 
4079 	for (i = 0; i < nh_grp->count; i++) {
4080 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4081 
4082 		nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4083 	}
4084 }
4085 
4086 static void
4087 mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
4088 {
4089 	struct mlxsw_sp_fib6_entry *fib6_entry;
4090 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4091 
4092 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4093 				  common);
4094 
4095 	if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
4096 	    fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
4097 		list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4098 				 list)->rt->fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4099 		return;
4100 	}
4101 
4102 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4103 		struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4104 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
4105 		struct mlxsw_sp_nexthop *nh;
4106 
4107 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
4108 		if (nh && nh->offloaded)
4109 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4110 		else
4111 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4112 	}
4113 }
4114 
4115 static void
4116 mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
4117 {
4118 	struct mlxsw_sp_fib6_entry *fib6_entry;
4119 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4120 
4121 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4122 				  common);
4123 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4124 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
4125 
4126 		rt->fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4127 	}
4128 }
4129 
4130 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
4131 {
4132 	switch (fib_entry->fib_node->fib->proto) {
4133 	case MLXSW_SP_L3_PROTO_IPV4:
4134 		mlxsw_sp_fib4_entry_offload_set(fib_entry);
4135 		break;
4136 	case MLXSW_SP_L3_PROTO_IPV6:
4137 		mlxsw_sp_fib6_entry_offload_set(fib_entry);
4138 		break;
4139 	}
4140 }
4141 
4142 static void
4143 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
4144 {
4145 	switch (fib_entry->fib_node->fib->proto) {
4146 	case MLXSW_SP_L3_PROTO_IPV4:
4147 		mlxsw_sp_fib4_entry_offload_unset(fib_entry);
4148 		break;
4149 	case MLXSW_SP_L3_PROTO_IPV6:
4150 		mlxsw_sp_fib6_entry_offload_unset(fib_entry);
4151 		break;
4152 	}
4153 }
4154 
4155 static void
4156 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
4157 				   enum mlxsw_reg_ralue_op op, int err)
4158 {
4159 	switch (op) {
4160 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
4161 		return mlxsw_sp_fib_entry_offload_unset(fib_entry);
4162 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
4163 		if (err)
4164 			return;
4165 		if (mlxsw_sp_fib_entry_should_offload(fib_entry))
4166 			mlxsw_sp_fib_entry_offload_set(fib_entry);
4167 		else
4168 			mlxsw_sp_fib_entry_offload_unset(fib_entry);
4169 		return;
4170 	default:
4171 		return;
4172 	}
4173 }
4174 
4175 static void
4176 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
4177 			      const struct mlxsw_sp_fib_entry *fib_entry,
4178 			      enum mlxsw_reg_ralue_op op)
4179 {
4180 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4181 	enum mlxsw_reg_ralxx_protocol proto;
4182 	u32 *p_dip;
4183 
4184 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
4185 
4186 	switch (fib->proto) {
4187 	case MLXSW_SP_L3_PROTO_IPV4:
4188 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
4189 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
4190 				      fib_entry->fib_node->key.prefix_len,
4191 				      *p_dip);
4192 		break;
4193 	case MLXSW_SP_L3_PROTO_IPV6:
4194 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
4195 				      fib_entry->fib_node->key.prefix_len,
4196 				      fib_entry->fib_node->key.addr);
4197 		break;
4198 	}
4199 }
4200 
4201 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
4202 {
4203 	u32 adj_discard_index = mlxsw_sp->router->adj_discard_index;
4204 	enum mlxsw_reg_ratr_trap_action trap_action;
4205 	char ratr_pl[MLXSW_REG_RATR_LEN];
4206 
4207 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
4208 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4209 			    MLXSW_REG_RATR_TYPE_ETHERNET, adj_discard_index,
4210 			    rif_index);
4211 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4212 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4213 }
4214 
4215 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4216 					struct mlxsw_sp_fib_entry *fib_entry,
4217 					enum mlxsw_reg_ralue_op op)
4218 {
4219 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4220 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4221 	enum mlxsw_reg_ralue_trap_action trap_action;
4222 	u16 trap_id = 0;
4223 	u32 adjacency_index = 0;
4224 	u16 ecmp_size = 0;
4225 	int err;
4226 
4227 	/* In case the nexthop group adjacency index is valid, use it
4228 	 * with provided ECMP size. Otherwise, setup trap and pass
4229 	 * traffic to kernel.
4230 	 */
4231 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4232 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4233 		adjacency_index = fib_entry->nh_group->adj_index;
4234 		ecmp_size = fib_entry->nh_group->ecmp_size;
4235 	} else if (!nh_group->adj_index_valid && nh_group->count &&
4236 		   nh_group->nh_rif) {
4237 		err = mlxsw_sp_adj_discard_write(mlxsw_sp,
4238 						 nh_group->nh_rif->rif_index);
4239 		if (err)
4240 			return err;
4241 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4242 		adjacency_index = mlxsw_sp->router->adj_discard_index;
4243 		ecmp_size = 1;
4244 	} else {
4245 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4246 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4247 	}
4248 
4249 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4250 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4251 					adjacency_index, ecmp_size);
4252 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4253 }
4254 
4255 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4256 				       struct mlxsw_sp_fib_entry *fib_entry,
4257 				       enum mlxsw_reg_ralue_op op)
4258 {
4259 	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4260 	enum mlxsw_reg_ralue_trap_action trap_action;
4261 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4262 	u16 trap_id = 0;
4263 	u16 rif_index = 0;
4264 
4265 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4266 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4267 		rif_index = rif->rif_index;
4268 	} else {
4269 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4270 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4271 	}
4272 
4273 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4274 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4275 				       rif_index);
4276 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4277 }
4278 
4279 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4280 				      struct mlxsw_sp_fib_entry *fib_entry,
4281 				      enum mlxsw_reg_ralue_op op)
4282 {
4283 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4284 
4285 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4286 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4287 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4288 }
4289 
4290 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4291 					   struct mlxsw_sp_fib_entry *fib_entry,
4292 					   enum mlxsw_reg_ralue_op op)
4293 {
4294 	enum mlxsw_reg_ralue_trap_action trap_action;
4295 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4296 
4297 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4298 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4299 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
4300 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4301 }
4302 
4303 static int
4304 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4305 				  struct mlxsw_sp_fib_entry *fib_entry,
4306 				  enum mlxsw_reg_ralue_op op)
4307 {
4308 	enum mlxsw_reg_ralue_trap_action trap_action;
4309 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4310 	u16 trap_id;
4311 
4312 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4313 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
4314 
4315 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4316 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
4317 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4318 }
4319 
4320 static int
4321 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4322 				 struct mlxsw_sp_fib_entry *fib_entry,
4323 				 enum mlxsw_reg_ralue_op op)
4324 {
4325 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4326 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4327 
4328 	if (WARN_ON(!ipip_entry))
4329 		return -EINVAL;
4330 
4331 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4332 	return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4333 				      fib_entry->decap.tunnel_index);
4334 }
4335 
4336 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4337 					   struct mlxsw_sp_fib_entry *fib_entry,
4338 					   enum mlxsw_reg_ralue_op op)
4339 {
4340 	char ralue_pl[MLXSW_REG_RALUE_LEN];
4341 
4342 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
4343 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
4344 					   fib_entry->decap.tunnel_index);
4345 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4346 }
4347 
4348 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4349 				   struct mlxsw_sp_fib_entry *fib_entry,
4350 				   enum mlxsw_reg_ralue_op op)
4351 {
4352 	switch (fib_entry->type) {
4353 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4354 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
4355 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4356 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
4357 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4358 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
4359 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4360 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
4361 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4362 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
4363 							 op);
4364 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4365 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4366 							fib_entry, op);
4367 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4368 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
4369 	}
4370 	return -EINVAL;
4371 }
4372 
4373 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4374 				 struct mlxsw_sp_fib_entry *fib_entry,
4375 				 enum mlxsw_reg_ralue_op op)
4376 {
4377 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
4378 
4379 	mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
4380 
4381 	return err;
4382 }
4383 
4384 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4385 				     struct mlxsw_sp_fib_entry *fib_entry)
4386 {
4387 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4388 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
4389 }
4390 
4391 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4392 				  struct mlxsw_sp_fib_entry *fib_entry)
4393 {
4394 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4395 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
4396 }
4397 
4398 static int
4399 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4400 			     const struct fib_entry_notifier_info *fen_info,
4401 			     struct mlxsw_sp_fib_entry *fib_entry)
4402 {
4403 	struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4404 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4405 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4406 	struct mlxsw_sp_ipip_entry *ipip_entry;
4407 	struct fib_info *fi = fen_info->fi;
4408 
4409 	switch (fen_info->type) {
4410 	case RTN_LOCAL:
4411 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4412 						 MLXSW_SP_L3_PROTO_IPV4, dip);
4413 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4414 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4415 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4416 							     fib_entry,
4417 							     ipip_entry);
4418 		}
4419 		if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id,
4420 						     dip.addr4)) {
4421 			u32 t_index;
4422 
4423 			t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp);
4424 			fib_entry->decap.tunnel_index = t_index;
4425 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4426 			return 0;
4427 		}
4428 		/* fall through */
4429 	case RTN_BROADCAST:
4430 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4431 		return 0;
4432 	case RTN_BLACKHOLE:
4433 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
4434 		return 0;
4435 	case RTN_UNREACHABLE: /* fall through */
4436 	case RTN_PROHIBIT:
4437 		/* Packets hitting these routes need to be trapped, but
4438 		 * can do so with a lower priority than packets directed
4439 		 * at the host, so use action type local instead of trap.
4440 		 */
4441 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4442 		return 0;
4443 	case RTN_UNICAST:
4444 		if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4445 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4446 		else
4447 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4448 		return 0;
4449 	default:
4450 		return -EINVAL;
4451 	}
4452 }
4453 
4454 static struct mlxsw_sp_fib4_entry *
4455 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4456 			   struct mlxsw_sp_fib_node *fib_node,
4457 			   const struct fib_entry_notifier_info *fen_info)
4458 {
4459 	struct mlxsw_sp_fib4_entry *fib4_entry;
4460 	struct mlxsw_sp_fib_entry *fib_entry;
4461 	int err;
4462 
4463 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4464 	if (!fib4_entry)
4465 		return ERR_PTR(-ENOMEM);
4466 	fib_entry = &fib4_entry->common;
4467 
4468 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4469 	if (err)
4470 		goto err_fib4_entry_type_set;
4471 
4472 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4473 	if (err)
4474 		goto err_nexthop4_group_get;
4475 
4476 	fib4_entry->prio = fen_info->fi->fib_priority;
4477 	fib4_entry->tb_id = fen_info->tb_id;
4478 	fib4_entry->type = fen_info->type;
4479 	fib4_entry->tos = fen_info->tos;
4480 
4481 	fib_entry->fib_node = fib_node;
4482 
4483 	return fib4_entry;
4484 
4485 err_nexthop4_group_get:
4486 err_fib4_entry_type_set:
4487 	kfree(fib4_entry);
4488 	return ERR_PTR(err);
4489 }
4490 
4491 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4492 					struct mlxsw_sp_fib4_entry *fib4_entry)
4493 {
4494 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4495 	kfree(fib4_entry);
4496 }
4497 
4498 static struct mlxsw_sp_fib4_entry *
4499 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4500 			   const struct fib_entry_notifier_info *fen_info)
4501 {
4502 	struct mlxsw_sp_fib4_entry *fib4_entry;
4503 	struct mlxsw_sp_fib_node *fib_node;
4504 	struct mlxsw_sp_fib *fib;
4505 	struct mlxsw_sp_vr *vr;
4506 
4507 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4508 	if (!vr)
4509 		return NULL;
4510 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4511 
4512 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4513 					    sizeof(fen_info->dst),
4514 					    fen_info->dst_len);
4515 	if (!fib_node)
4516 		return NULL;
4517 
4518 	list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4519 		if (fib4_entry->tb_id == fen_info->tb_id &&
4520 		    fib4_entry->tos == fen_info->tos &&
4521 		    fib4_entry->type == fen_info->type &&
4522 		    mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4523 		    fen_info->fi) {
4524 			return fib4_entry;
4525 		}
4526 	}
4527 
4528 	return NULL;
4529 }
4530 
4531 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4532 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4533 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4534 	.key_len = sizeof(struct mlxsw_sp_fib_key),
4535 	.automatic_shrinking = true,
4536 };
4537 
4538 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4539 				    struct mlxsw_sp_fib_node *fib_node)
4540 {
4541 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4542 				      mlxsw_sp_fib_ht_params);
4543 }
4544 
4545 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4546 				     struct mlxsw_sp_fib_node *fib_node)
4547 {
4548 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4549 			       mlxsw_sp_fib_ht_params);
4550 }
4551 
4552 static struct mlxsw_sp_fib_node *
4553 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4554 			 size_t addr_len, unsigned char prefix_len)
4555 {
4556 	struct mlxsw_sp_fib_key key;
4557 
4558 	memset(&key, 0, sizeof(key));
4559 	memcpy(key.addr, addr, addr_len);
4560 	key.prefix_len = prefix_len;
4561 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4562 }
4563 
4564 static struct mlxsw_sp_fib_node *
4565 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4566 			 size_t addr_len, unsigned char prefix_len)
4567 {
4568 	struct mlxsw_sp_fib_node *fib_node;
4569 
4570 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4571 	if (!fib_node)
4572 		return NULL;
4573 
4574 	INIT_LIST_HEAD(&fib_node->entry_list);
4575 	list_add(&fib_node->list, &fib->node_list);
4576 	memcpy(fib_node->key.addr, addr, addr_len);
4577 	fib_node->key.prefix_len = prefix_len;
4578 
4579 	return fib_node;
4580 }
4581 
4582 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4583 {
4584 	list_del(&fib_node->list);
4585 	WARN_ON(!list_empty(&fib_node->entry_list));
4586 	kfree(fib_node);
4587 }
4588 
4589 static bool
4590 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4591 				 const struct mlxsw_sp_fib_entry *fib_entry)
4592 {
4593 	return list_first_entry(&fib_node->entry_list,
4594 				struct mlxsw_sp_fib_entry, list) == fib_entry;
4595 }
4596 
4597 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4598 				      struct mlxsw_sp_fib_node *fib_node)
4599 {
4600 	struct mlxsw_sp_prefix_usage req_prefix_usage;
4601 	struct mlxsw_sp_fib *fib = fib_node->fib;
4602 	struct mlxsw_sp_lpm_tree *lpm_tree;
4603 	int err;
4604 
4605 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4606 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4607 		goto out;
4608 
4609 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4610 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4611 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4612 					 fib->proto);
4613 	if (IS_ERR(lpm_tree))
4614 		return PTR_ERR(lpm_tree);
4615 
4616 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4617 	if (err)
4618 		goto err_lpm_tree_replace;
4619 
4620 out:
4621 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4622 	return 0;
4623 
4624 err_lpm_tree_replace:
4625 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4626 	return err;
4627 }
4628 
4629 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4630 					 struct mlxsw_sp_fib_node *fib_node)
4631 {
4632 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4633 	struct mlxsw_sp_prefix_usage req_prefix_usage;
4634 	struct mlxsw_sp_fib *fib = fib_node->fib;
4635 	int err;
4636 
4637 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4638 		return;
4639 	/* Try to construct a new LPM tree from the current prefix usage
4640 	 * minus the unused one. If we fail, continue using the old one.
4641 	 */
4642 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4643 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4644 				    fib_node->key.prefix_len);
4645 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4646 					 fib->proto);
4647 	if (IS_ERR(lpm_tree))
4648 		return;
4649 
4650 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4651 	if (err)
4652 		goto err_lpm_tree_replace;
4653 
4654 	return;
4655 
4656 err_lpm_tree_replace:
4657 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4658 }
4659 
4660 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4661 				  struct mlxsw_sp_fib_node *fib_node,
4662 				  struct mlxsw_sp_fib *fib)
4663 {
4664 	int err;
4665 
4666 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
4667 	if (err)
4668 		return err;
4669 	fib_node->fib = fib;
4670 
4671 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
4672 	if (err)
4673 		goto err_fib_lpm_tree_link;
4674 
4675 	return 0;
4676 
4677 err_fib_lpm_tree_link:
4678 	fib_node->fib = NULL;
4679 	mlxsw_sp_fib_node_remove(fib, fib_node);
4680 	return err;
4681 }
4682 
4683 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4684 				   struct mlxsw_sp_fib_node *fib_node)
4685 {
4686 	struct mlxsw_sp_fib *fib = fib_node->fib;
4687 
4688 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
4689 	fib_node->fib = NULL;
4690 	mlxsw_sp_fib_node_remove(fib, fib_node);
4691 }
4692 
4693 static struct mlxsw_sp_fib_node *
4694 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4695 		      size_t addr_len, unsigned char prefix_len,
4696 		      enum mlxsw_sp_l3proto proto)
4697 {
4698 	struct mlxsw_sp_fib_node *fib_node;
4699 	struct mlxsw_sp_fib *fib;
4700 	struct mlxsw_sp_vr *vr;
4701 	int err;
4702 
4703 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
4704 	if (IS_ERR(vr))
4705 		return ERR_CAST(vr);
4706 	fib = mlxsw_sp_vr_fib(vr, proto);
4707 
4708 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
4709 	if (fib_node)
4710 		return fib_node;
4711 
4712 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
4713 	if (!fib_node) {
4714 		err = -ENOMEM;
4715 		goto err_fib_node_create;
4716 	}
4717 
4718 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4719 	if (err)
4720 		goto err_fib_node_init;
4721 
4722 	return fib_node;
4723 
4724 err_fib_node_init:
4725 	mlxsw_sp_fib_node_destroy(fib_node);
4726 err_fib_node_create:
4727 	mlxsw_sp_vr_put(mlxsw_sp, vr);
4728 	return ERR_PTR(err);
4729 }
4730 
4731 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4732 				  struct mlxsw_sp_fib_node *fib_node)
4733 {
4734 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
4735 
4736 	if (!list_empty(&fib_node->entry_list))
4737 		return;
4738 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
4739 	mlxsw_sp_fib_node_destroy(fib_node);
4740 	mlxsw_sp_vr_put(mlxsw_sp, vr);
4741 }
4742 
4743 static struct mlxsw_sp_fib4_entry *
4744 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4745 			      const struct mlxsw_sp_fib4_entry *new4_entry)
4746 {
4747 	struct mlxsw_sp_fib4_entry *fib4_entry;
4748 
4749 	list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4750 		if (fib4_entry->tb_id > new4_entry->tb_id)
4751 			continue;
4752 		if (fib4_entry->tb_id != new4_entry->tb_id)
4753 			break;
4754 		if (fib4_entry->tos > new4_entry->tos)
4755 			continue;
4756 		if (fib4_entry->prio >= new4_entry->prio ||
4757 		    fib4_entry->tos < new4_entry->tos)
4758 			return fib4_entry;
4759 	}
4760 
4761 	return NULL;
4762 }
4763 
4764 static int
4765 mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4766 			       struct mlxsw_sp_fib4_entry *new4_entry)
4767 {
4768 	struct mlxsw_sp_fib_node *fib_node;
4769 
4770 	if (WARN_ON(!fib4_entry))
4771 		return -EINVAL;
4772 
4773 	fib_node = fib4_entry->common.fib_node;
4774 	list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4775 				 common.list) {
4776 		if (fib4_entry->tb_id != new4_entry->tb_id ||
4777 		    fib4_entry->tos != new4_entry->tos ||
4778 		    fib4_entry->prio != new4_entry->prio)
4779 			break;
4780 	}
4781 
4782 	list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
4783 	return 0;
4784 }
4785 
4786 static int
4787 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
4788 			       bool replace, bool append)
4789 {
4790 	struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
4791 	struct mlxsw_sp_fib4_entry *fib4_entry;
4792 
4793 	fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
4794 
4795 	if (append)
4796 		return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4797 	if (replace && WARN_ON(!fib4_entry))
4798 		return -EINVAL;
4799 
4800 	/* Insert new entry before replaced one, so that we can later
4801 	 * remove the second.
4802 	 */
4803 	if (fib4_entry) {
4804 		list_add_tail(&new4_entry->common.list,
4805 			      &fib4_entry->common.list);
4806 	} else {
4807 		struct mlxsw_sp_fib4_entry *last;
4808 
4809 		list_for_each_entry(last, &fib_node->entry_list, common.list) {
4810 			if (new4_entry->tb_id > last->tb_id)
4811 				break;
4812 			fib4_entry = last;
4813 		}
4814 
4815 		if (fib4_entry)
4816 			list_add(&new4_entry->common.list,
4817 				 &fib4_entry->common.list);
4818 		else
4819 			list_add(&new4_entry->common.list,
4820 				 &fib_node->entry_list);
4821 	}
4822 
4823 	return 0;
4824 }
4825 
4826 static void
4827 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
4828 {
4829 	list_del(&fib4_entry->common.list);
4830 }
4831 
4832 static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4833 				       struct mlxsw_sp_fib_entry *fib_entry)
4834 {
4835 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4836 
4837 	if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4838 		return 0;
4839 
4840 	/* To prevent packet loss, overwrite the previously offloaded
4841 	 * entry.
4842 	 */
4843 	if (!list_is_singular(&fib_node->entry_list)) {
4844 		enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4845 		struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4846 
4847 		mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4848 	}
4849 
4850 	return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4851 }
4852 
4853 static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4854 					struct mlxsw_sp_fib_entry *fib_entry)
4855 {
4856 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4857 
4858 	if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4859 		return;
4860 
4861 	/* Promote the next entry by overwriting the deleted entry */
4862 	if (!list_is_singular(&fib_node->entry_list)) {
4863 		struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4864 		enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4865 
4866 		mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4867 		mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4868 		return;
4869 	}
4870 
4871 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4872 }
4873 
4874 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
4875 					 struct mlxsw_sp_fib4_entry *fib4_entry,
4876 					 bool replace, bool append)
4877 {
4878 	int err;
4879 
4880 	err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
4881 	if (err)
4882 		return err;
4883 
4884 	err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
4885 	if (err)
4886 		goto err_fib_node_entry_add;
4887 
4888 	return 0;
4889 
4890 err_fib_node_entry_add:
4891 	mlxsw_sp_fib4_node_list_remove(fib4_entry);
4892 	return err;
4893 }
4894 
4895 static void
4896 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4897 				struct mlxsw_sp_fib4_entry *fib4_entry)
4898 {
4899 	mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
4900 	mlxsw_sp_fib4_node_list_remove(fib4_entry);
4901 
4902 	if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4903 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
4904 }
4905 
4906 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
4907 					struct mlxsw_sp_fib4_entry *fib4_entry,
4908 					bool replace)
4909 {
4910 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4911 	struct mlxsw_sp_fib4_entry *replaced;
4912 
4913 	if (!replace)
4914 		return;
4915 
4916 	/* We inserted the new entry before replaced one */
4917 	replaced = list_next_entry(fib4_entry, common.list);
4918 
4919 	mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4920 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
4921 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4922 }
4923 
4924 static int
4925 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
4926 			 const struct fib_entry_notifier_info *fen_info,
4927 			 bool replace, bool append)
4928 {
4929 	struct mlxsw_sp_fib4_entry *fib4_entry;
4930 	struct mlxsw_sp_fib_node *fib_node;
4931 	int err;
4932 
4933 	if (mlxsw_sp->router->aborted)
4934 		return 0;
4935 
4936 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4937 					 &fen_info->dst, sizeof(fen_info->dst),
4938 					 fen_info->dst_len,
4939 					 MLXSW_SP_L3_PROTO_IPV4);
4940 	if (IS_ERR(fib_node)) {
4941 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4942 		return PTR_ERR(fib_node);
4943 	}
4944 
4945 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4946 	if (IS_ERR(fib4_entry)) {
4947 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
4948 		err = PTR_ERR(fib4_entry);
4949 		goto err_fib4_entry_create;
4950 	}
4951 
4952 	err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
4953 					    append);
4954 	if (err) {
4955 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4956 		goto err_fib4_node_entry_link;
4957 	}
4958 
4959 	mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
4960 
4961 	return 0;
4962 
4963 err_fib4_node_entry_link:
4964 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4965 err_fib4_entry_create:
4966 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4967 	return err;
4968 }
4969 
4970 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4971 				     struct fib_entry_notifier_info *fen_info)
4972 {
4973 	struct mlxsw_sp_fib4_entry *fib4_entry;
4974 	struct mlxsw_sp_fib_node *fib_node;
4975 
4976 	if (mlxsw_sp->router->aborted)
4977 		return;
4978 
4979 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4980 	if (WARN_ON(!fib4_entry))
4981 		return;
4982 	fib_node = fib4_entry->common.fib_node;
4983 
4984 	mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4985 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
4986 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4987 }
4988 
4989 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
4990 {
4991 	/* Packets with link-local destination IP arriving to the router
4992 	 * are trapped to the CPU, so no need to program specific routes
4993 	 * for them.
4994 	 */
4995 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
4996 		return true;
4997 
4998 	/* Multicast routes aren't supported, so ignore them. Neighbour
4999 	 * Discovery packets are specifically trapped.
5000 	 */
5001 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5002 		return true;
5003 
5004 	/* Cloned routes are irrelevant in the forwarding path. */
5005 	if (rt->fib6_flags & RTF_CACHE)
5006 		return true;
5007 
5008 	return false;
5009 }
5010 
5011 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5012 {
5013 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5014 
5015 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
5016 	if (!mlxsw_sp_rt6)
5017 		return ERR_PTR(-ENOMEM);
5018 
5019 	/* In case of route replace, replaced route is deleted with
5020 	 * no notification. Take reference to prevent accessing freed
5021 	 * memory.
5022 	 */
5023 	mlxsw_sp_rt6->rt = rt;
5024 	fib6_info_hold(rt);
5025 
5026 	return mlxsw_sp_rt6;
5027 }
5028 
5029 #if IS_ENABLED(CONFIG_IPV6)
5030 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5031 {
5032 	fib6_info_release(rt);
5033 }
5034 #else
5035 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5036 {
5037 }
5038 #endif
5039 
5040 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5041 {
5042 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
5043 	kfree(mlxsw_sp_rt6);
5044 }
5045 
5046 static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
5047 {
5048 	/* RTF_CACHE routes are ignored */
5049 	return !(rt->fib6_flags & RTF_ADDRCONF) &&
5050 		rt->fib6_nh->fib_nh_gw_family;
5051 }
5052 
5053 static struct fib6_info *
5054 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
5055 {
5056 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
5057 				list)->rt;
5058 }
5059 
5060 static struct mlxsw_sp_fib6_entry *
5061 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5062 				 const struct fib6_info *nrt, bool replace)
5063 {
5064 	struct mlxsw_sp_fib6_entry *fib6_entry;
5065 
5066 	if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
5067 		return NULL;
5068 
5069 	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5070 		struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5071 
5072 		/* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
5073 		 * virtual router.
5074 		 */
5075 		if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
5076 			continue;
5077 		if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
5078 			break;
5079 		if (rt->fib6_metric < nrt->fib6_metric)
5080 			continue;
5081 		if (rt->fib6_metric == nrt->fib6_metric &&
5082 		    mlxsw_sp_fib6_rt_can_mp(rt))
5083 			return fib6_entry;
5084 		if (rt->fib6_metric > nrt->fib6_metric)
5085 			break;
5086 	}
5087 
5088 	return NULL;
5089 }
5090 
5091 static struct mlxsw_sp_rt6 *
5092 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5093 			    const struct fib6_info *rt)
5094 {
5095 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5096 
5097 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
5098 		if (mlxsw_sp_rt6->rt == rt)
5099 			return mlxsw_sp_rt6;
5100 	}
5101 
5102 	return NULL;
5103 }
5104 
5105 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5106 					const struct fib6_info *rt,
5107 					enum mlxsw_sp_ipip_type *ret)
5108 {
5109 	return rt->fib6_nh->fib_nh_dev &&
5110 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5111 }
5112 
5113 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
5114 				       struct mlxsw_sp_nexthop_group *nh_grp,
5115 				       struct mlxsw_sp_nexthop *nh,
5116 				       const struct fib6_info *rt)
5117 {
5118 	const struct mlxsw_sp_ipip_ops *ipip_ops;
5119 	struct mlxsw_sp_ipip_entry *ipip_entry;
5120 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5121 	struct mlxsw_sp_rif *rif;
5122 	int err;
5123 
5124 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
5125 	if (ipip_entry) {
5126 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5127 		if (ipip_ops->can_offload(mlxsw_sp, dev,
5128 					  MLXSW_SP_L3_PROTO_IPV6)) {
5129 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
5130 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
5131 			return 0;
5132 		}
5133 	}
5134 
5135 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
5136 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5137 	if (!rif)
5138 		return 0;
5139 	mlxsw_sp_nexthop_rif_init(nh, rif);
5140 
5141 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
5142 	if (err)
5143 		goto err_nexthop_neigh_init;
5144 
5145 	return 0;
5146 
5147 err_nexthop_neigh_init:
5148 	mlxsw_sp_nexthop_rif_fini(nh);
5149 	return err;
5150 }
5151 
5152 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
5153 					struct mlxsw_sp_nexthop *nh)
5154 {
5155 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5156 }
5157 
5158 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
5159 				  struct mlxsw_sp_nexthop_group *nh_grp,
5160 				  struct mlxsw_sp_nexthop *nh,
5161 				  const struct fib6_info *rt)
5162 {
5163 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5164 
5165 	nh->nh_grp = nh_grp;
5166 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
5167 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5168 	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5169 
5170 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5171 
5172 	if (!dev)
5173 		return 0;
5174 	nh->ifindex = dev->ifindex;
5175 
5176 	return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
5177 }
5178 
5179 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5180 				   struct mlxsw_sp_nexthop *nh)
5181 {
5182 	mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5183 	list_del(&nh->router_list_node);
5184 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5185 }
5186 
5187 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5188 				    const struct fib6_info *rt)
5189 {
5190 	return rt->fib6_nh->fib_nh_gw_family ||
5191 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5192 }
5193 
5194 static struct mlxsw_sp_nexthop_group *
5195 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5196 			       struct mlxsw_sp_fib6_entry *fib6_entry)
5197 {
5198 	struct mlxsw_sp_nexthop_group *nh_grp;
5199 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5200 	struct mlxsw_sp_nexthop *nh;
5201 	int i = 0;
5202 	int err;
5203 
5204 	nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
5205 			 GFP_KERNEL);
5206 	if (!nh_grp)
5207 		return ERR_PTR(-ENOMEM);
5208 	INIT_LIST_HEAD(&nh_grp->fib_list);
5209 #if IS_ENABLED(CONFIG_IPV6)
5210 	nh_grp->neigh_tbl = &nd_tbl;
5211 #endif
5212 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5213 					struct mlxsw_sp_rt6, list);
5214 	nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5215 	nh_grp->count = fib6_entry->nrt6;
5216 	for (i = 0; i < nh_grp->count; i++) {
5217 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5218 
5219 		nh = &nh_grp->nexthops[i];
5220 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5221 		if (err)
5222 			goto err_nexthop6_init;
5223 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5224 	}
5225 
5226 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5227 	if (err)
5228 		goto err_nexthop_group_insert;
5229 
5230 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5231 	return nh_grp;
5232 
5233 err_nexthop_group_insert:
5234 err_nexthop6_init:
5235 	for (i--; i >= 0; i--) {
5236 		nh = &nh_grp->nexthops[i];
5237 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5238 	}
5239 	kfree(nh_grp);
5240 	return ERR_PTR(err);
5241 }
5242 
5243 static void
5244 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5245 				struct mlxsw_sp_nexthop_group *nh_grp)
5246 {
5247 	struct mlxsw_sp_nexthop *nh;
5248 	int i = nh_grp->count;
5249 
5250 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5251 	for (i--; i >= 0; i--) {
5252 		nh = &nh_grp->nexthops[i];
5253 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5254 	}
5255 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5256 	WARN_ON(nh_grp->adj_index_valid);
5257 	kfree(nh_grp);
5258 }
5259 
5260 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5261 				       struct mlxsw_sp_fib6_entry *fib6_entry)
5262 {
5263 	struct mlxsw_sp_nexthop_group *nh_grp;
5264 
5265 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5266 	if (!nh_grp) {
5267 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5268 		if (IS_ERR(nh_grp))
5269 			return PTR_ERR(nh_grp);
5270 	}
5271 
5272 	list_add_tail(&fib6_entry->common.nexthop_group_node,
5273 		      &nh_grp->fib_list);
5274 	fib6_entry->common.nh_group = nh_grp;
5275 
5276 	return 0;
5277 }
5278 
5279 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5280 					struct mlxsw_sp_fib_entry *fib_entry)
5281 {
5282 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5283 
5284 	list_del(&fib_entry->nexthop_group_node);
5285 	if (!list_empty(&nh_grp->fib_list))
5286 		return;
5287 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5288 }
5289 
5290 static int
5291 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5292 			       struct mlxsw_sp_fib6_entry *fib6_entry)
5293 {
5294 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5295 	int err;
5296 
5297 	fib6_entry->common.nh_group = NULL;
5298 	list_del(&fib6_entry->common.nexthop_group_node);
5299 
5300 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5301 	if (err)
5302 		goto err_nexthop6_group_get;
5303 
5304 	/* In case this entry is offloaded, then the adjacency index
5305 	 * currently associated with it in the device's table is that
5306 	 * of the old group. Start using the new one instead.
5307 	 */
5308 	err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5309 	if (err)
5310 		goto err_fib_node_entry_add;
5311 
5312 	if (list_empty(&old_nh_grp->fib_list))
5313 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5314 
5315 	return 0;
5316 
5317 err_fib_node_entry_add:
5318 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5319 err_nexthop6_group_get:
5320 	list_add_tail(&fib6_entry->common.nexthop_group_node,
5321 		      &old_nh_grp->fib_list);
5322 	fib6_entry->common.nh_group = old_nh_grp;
5323 	return err;
5324 }
5325 
5326 static int
5327 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5328 				struct mlxsw_sp_fib6_entry *fib6_entry,
5329 				struct fib6_info **rt_arr, unsigned int nrt6)
5330 {
5331 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5332 	int err, i;
5333 
5334 	for (i = 0; i < nrt6; i++) {
5335 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5336 		if (IS_ERR(mlxsw_sp_rt6)) {
5337 			err = PTR_ERR(mlxsw_sp_rt6);
5338 			goto err_rt6_create;
5339 		}
5340 
5341 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5342 		fib6_entry->nrt6++;
5343 	}
5344 
5345 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5346 	if (err)
5347 		goto err_nexthop6_group_update;
5348 
5349 	return 0;
5350 
5351 err_nexthop6_group_update:
5352 	i = nrt6;
5353 err_rt6_create:
5354 	for (i--; i >= 0; i--) {
5355 		fib6_entry->nrt6--;
5356 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5357 					       struct mlxsw_sp_rt6, list);
5358 		list_del(&mlxsw_sp_rt6->list);
5359 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5360 	}
5361 	return err;
5362 }
5363 
5364 static void
5365 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5366 				struct mlxsw_sp_fib6_entry *fib6_entry,
5367 				struct fib6_info **rt_arr, unsigned int nrt6)
5368 {
5369 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5370 	int i;
5371 
5372 	for (i = 0; i < nrt6; i++) {
5373 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
5374 							   rt_arr[i]);
5375 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
5376 			continue;
5377 
5378 		fib6_entry->nrt6--;
5379 		list_del(&mlxsw_sp_rt6->list);
5380 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5381 	}
5382 
5383 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5384 }
5385 
5386 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5387 					 struct mlxsw_sp_fib_entry *fib_entry,
5388 					 const struct fib6_info *rt)
5389 {
5390 	/* Packets hitting RTF_REJECT routes need to be discarded by the
5391 	 * stack. We can rely on their destination device not having a
5392 	 * RIF (it's the loopback device) and can thus use action type
5393 	 * local, which will cause them to be trapped with a lower
5394 	 * priority than packets that need to be locally received.
5395 	 */
5396 	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5397 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5398 	else if (rt->fib6_type == RTN_BLACKHOLE)
5399 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5400 	else if (rt->fib6_flags & RTF_REJECT)
5401 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5402 	else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5403 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5404 	else
5405 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5406 }
5407 
5408 static void
5409 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5410 {
5411 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5412 
5413 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5414 				 list) {
5415 		fib6_entry->nrt6--;
5416 		list_del(&mlxsw_sp_rt6->list);
5417 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5418 	}
5419 }
5420 
5421 static struct mlxsw_sp_fib6_entry *
5422 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5423 			   struct mlxsw_sp_fib_node *fib_node,
5424 			   struct fib6_info **rt_arr, unsigned int nrt6)
5425 {
5426 	struct mlxsw_sp_fib6_entry *fib6_entry;
5427 	struct mlxsw_sp_fib_entry *fib_entry;
5428 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5429 	int err, i;
5430 
5431 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5432 	if (!fib6_entry)
5433 		return ERR_PTR(-ENOMEM);
5434 	fib_entry = &fib6_entry->common;
5435 
5436 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
5437 
5438 	for (i = 0; i < nrt6; i++) {
5439 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5440 		if (IS_ERR(mlxsw_sp_rt6)) {
5441 			err = PTR_ERR(mlxsw_sp_rt6);
5442 			goto err_rt6_create;
5443 		}
5444 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5445 		fib6_entry->nrt6++;
5446 	}
5447 
5448 	mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5449 
5450 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5451 	if (err)
5452 		goto err_nexthop6_group_get;
5453 
5454 	fib_entry->fib_node = fib_node;
5455 
5456 	return fib6_entry;
5457 
5458 err_nexthop6_group_get:
5459 	i = nrt6;
5460 err_rt6_create:
5461 	for (i--; i >= 0; i--) {
5462 		fib6_entry->nrt6--;
5463 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5464 					       struct mlxsw_sp_rt6, list);
5465 		list_del(&mlxsw_sp_rt6->list);
5466 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5467 	}
5468 	kfree(fib6_entry);
5469 	return ERR_PTR(err);
5470 }
5471 
5472 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5473 					struct mlxsw_sp_fib6_entry *fib6_entry)
5474 {
5475 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5476 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5477 	WARN_ON(fib6_entry->nrt6);
5478 	kfree(fib6_entry);
5479 }
5480 
5481 static struct mlxsw_sp_fib6_entry *
5482 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5483 			      const struct fib6_info *nrt, bool replace)
5484 {
5485 	struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
5486 
5487 	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5488 		struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5489 
5490 		if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
5491 			continue;
5492 		if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
5493 			break;
5494 		if (replace && rt->fib6_metric == nrt->fib6_metric) {
5495 			if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5496 			    mlxsw_sp_fib6_rt_can_mp(nrt))
5497 				return fib6_entry;
5498 			if (mlxsw_sp_fib6_rt_can_mp(nrt))
5499 				fallback = fallback ?: fib6_entry;
5500 		}
5501 		if (rt->fib6_metric > nrt->fib6_metric)
5502 			return fallback ?: fib6_entry;
5503 	}
5504 
5505 	return fallback;
5506 }
5507 
5508 static int
5509 mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5510 			       bool *p_replace)
5511 {
5512 	struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5513 	struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5514 	struct mlxsw_sp_fib6_entry *fib6_entry;
5515 
5516 	fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, *p_replace);
5517 
5518 	if (*p_replace && !fib6_entry)
5519 		*p_replace = false;
5520 
5521 	if (fib6_entry) {
5522 		list_add_tail(&new6_entry->common.list,
5523 			      &fib6_entry->common.list);
5524 	} else {
5525 		struct mlxsw_sp_fib6_entry *last;
5526 
5527 		list_for_each_entry(last, &fib_node->entry_list, common.list) {
5528 			struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5529 
5530 			if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
5531 				break;
5532 			fib6_entry = last;
5533 		}
5534 
5535 		if (fib6_entry)
5536 			list_add(&new6_entry->common.list,
5537 				 &fib6_entry->common.list);
5538 		else
5539 			list_add(&new6_entry->common.list,
5540 				 &fib_node->entry_list);
5541 	}
5542 
5543 	return 0;
5544 }
5545 
5546 static void
5547 mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5548 {
5549 	list_del(&fib6_entry->common.list);
5550 }
5551 
5552 static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
5553 					 struct mlxsw_sp_fib6_entry *fib6_entry,
5554 					 bool *p_replace)
5555 {
5556 	int err;
5557 
5558 	err = mlxsw_sp_fib6_node_list_insert(fib6_entry, p_replace);
5559 	if (err)
5560 		return err;
5561 
5562 	err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5563 	if (err)
5564 		goto err_fib_node_entry_add;
5565 
5566 	return 0;
5567 
5568 err_fib_node_entry_add:
5569 	mlxsw_sp_fib6_node_list_remove(fib6_entry);
5570 	return err;
5571 }
5572 
5573 static void
5574 mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5575 				struct mlxsw_sp_fib6_entry *fib6_entry)
5576 {
5577 	mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5578 	mlxsw_sp_fib6_node_list_remove(fib6_entry);
5579 }
5580 
5581 static struct mlxsw_sp_fib6_entry *
5582 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5583 			   const struct fib6_info *rt)
5584 {
5585 	struct mlxsw_sp_fib6_entry *fib6_entry;
5586 	struct mlxsw_sp_fib_node *fib_node;
5587 	struct mlxsw_sp_fib *fib;
5588 	struct mlxsw_sp_vr *vr;
5589 
5590 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5591 	if (!vr)
5592 		return NULL;
5593 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5594 
5595 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5596 					    sizeof(rt->fib6_dst.addr),
5597 					    rt->fib6_dst.plen);
5598 	if (!fib_node)
5599 		return NULL;
5600 
5601 	list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5602 		struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5603 
5604 		if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
5605 		    rt->fib6_metric == iter_rt->fib6_metric &&
5606 		    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5607 			return fib6_entry;
5608 	}
5609 
5610 	return NULL;
5611 }
5612 
5613 static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5614 					struct mlxsw_sp_fib6_entry *fib6_entry,
5615 					bool replace)
5616 {
5617 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5618 	struct mlxsw_sp_fib6_entry *replaced;
5619 
5620 	if (!replace)
5621 		return;
5622 
5623 	replaced = list_next_entry(fib6_entry, common.list);
5624 
5625 	mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5626 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5627 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5628 }
5629 
5630 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5631 				    struct fib6_info **rt_arr,
5632 				    unsigned int nrt6, bool replace)
5633 {
5634 	struct mlxsw_sp_fib6_entry *fib6_entry;
5635 	struct mlxsw_sp_fib_node *fib_node;
5636 	struct fib6_info *rt = rt_arr[0];
5637 	int err;
5638 
5639 	if (mlxsw_sp->router->aborted)
5640 		return 0;
5641 
5642 	if (rt->fib6_src.plen)
5643 		return -EINVAL;
5644 
5645 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5646 		return 0;
5647 
5648 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5649 					 &rt->fib6_dst.addr,
5650 					 sizeof(rt->fib6_dst.addr),
5651 					 rt->fib6_dst.plen,
5652 					 MLXSW_SP_L3_PROTO_IPV6);
5653 	if (IS_ERR(fib_node))
5654 		return PTR_ERR(fib_node);
5655 
5656 	/* Before creating a new entry, try to append route to an existing
5657 	 * multipath entry.
5658 	 */
5659 	fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
5660 	if (fib6_entry) {
5661 		err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry,
5662 						      rt_arr, nrt6);
5663 		if (err)
5664 			goto err_fib6_entry_nexthop_add;
5665 		return 0;
5666 	}
5667 
5668 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
5669 						nrt6);
5670 	if (IS_ERR(fib6_entry)) {
5671 		err = PTR_ERR(fib6_entry);
5672 		goto err_fib6_entry_create;
5673 	}
5674 
5675 	err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, &replace);
5676 	if (err)
5677 		goto err_fib6_node_entry_link;
5678 
5679 	mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5680 
5681 	return 0;
5682 
5683 err_fib6_node_entry_link:
5684 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5685 err_fib6_entry_create:
5686 err_fib6_entry_nexthop_add:
5687 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5688 	return err;
5689 }
5690 
5691 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5692 				     struct fib6_info **rt_arr,
5693 				     unsigned int nrt6)
5694 {
5695 	struct mlxsw_sp_fib6_entry *fib6_entry;
5696 	struct mlxsw_sp_fib_node *fib_node;
5697 	struct fib6_info *rt = rt_arr[0];
5698 
5699 	if (mlxsw_sp->router->aborted)
5700 		return;
5701 
5702 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5703 		return;
5704 
5705 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5706 	if (WARN_ON(!fib6_entry))
5707 		return;
5708 
5709 	/* If not all the nexthops are deleted, then only reduce the nexthop
5710 	 * group.
5711 	 */
5712 	if (nrt6 != fib6_entry->nrt6) {
5713 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
5714 						nrt6);
5715 		return;
5716 	}
5717 
5718 	fib_node = fib6_entry->common.fib_node;
5719 
5720 	mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5721 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5722 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5723 }
5724 
5725 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5726 					    enum mlxsw_reg_ralxx_protocol proto,
5727 					    u8 tree_id)
5728 {
5729 	char ralta_pl[MLXSW_REG_RALTA_LEN];
5730 	char ralst_pl[MLXSW_REG_RALST_LEN];
5731 	int i, err;
5732 
5733 	mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
5734 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5735 	if (err)
5736 		return err;
5737 
5738 	mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
5739 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5740 	if (err)
5741 		return err;
5742 
5743 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5744 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5745 		char raltb_pl[MLXSW_REG_RALTB_LEN];
5746 		char ralue_pl[MLXSW_REG_RALUE_LEN];
5747 
5748 		mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
5749 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5750 				      raltb_pl);
5751 		if (err)
5752 			return err;
5753 
5754 		mlxsw_reg_ralue_pack(ralue_pl, proto,
5755 				     MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
5756 		mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5757 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5758 				      ralue_pl);
5759 		if (err)
5760 			return err;
5761 	}
5762 
5763 	return 0;
5764 }
5765 
5766 static struct mlxsw_sp_mr_table *
5767 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5768 {
5769 	if (family == RTNL_FAMILY_IPMR)
5770 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5771 	else
5772 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5773 }
5774 
5775 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5776 				     struct mfc_entry_notifier_info *men_info,
5777 				     bool replace)
5778 {
5779 	struct mlxsw_sp_mr_table *mrt;
5780 	struct mlxsw_sp_vr *vr;
5781 
5782 	if (mlxsw_sp->router->aborted)
5783 		return 0;
5784 
5785 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5786 	if (IS_ERR(vr))
5787 		return PTR_ERR(vr);
5788 
5789 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5790 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5791 }
5792 
5793 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5794 				      struct mfc_entry_notifier_info *men_info)
5795 {
5796 	struct mlxsw_sp_mr_table *mrt;
5797 	struct mlxsw_sp_vr *vr;
5798 
5799 	if (mlxsw_sp->router->aborted)
5800 		return;
5801 
5802 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5803 	if (WARN_ON(!vr))
5804 		return;
5805 
5806 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5807 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
5808 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5809 }
5810 
5811 static int
5812 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5813 			      struct vif_entry_notifier_info *ven_info)
5814 {
5815 	struct mlxsw_sp_mr_table *mrt;
5816 	struct mlxsw_sp_rif *rif;
5817 	struct mlxsw_sp_vr *vr;
5818 
5819 	if (mlxsw_sp->router->aborted)
5820 		return 0;
5821 
5822 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
5823 	if (IS_ERR(vr))
5824 		return PTR_ERR(vr);
5825 
5826 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5827 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5828 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
5829 				   ven_info->vif_index,
5830 				   ven_info->vif_flags, rif);
5831 }
5832 
5833 static void
5834 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5835 			      struct vif_entry_notifier_info *ven_info)
5836 {
5837 	struct mlxsw_sp_mr_table *mrt;
5838 	struct mlxsw_sp_vr *vr;
5839 
5840 	if (mlxsw_sp->router->aborted)
5841 		return;
5842 
5843 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5844 	if (WARN_ON(!vr))
5845 		return;
5846 
5847 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5848 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
5849 	mlxsw_sp_vr_put(mlxsw_sp, vr);
5850 }
5851 
5852 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5853 {
5854 	enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5855 	int err;
5856 
5857 	err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5858 					       MLXSW_SP_LPM_TREE_MIN);
5859 	if (err)
5860 		return err;
5861 
5862 	/* The multicast router code does not need an abort trap as by default,
5863 	 * packets that don't match any routes are trapped to the CPU.
5864 	 */
5865 
5866 	proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5867 	return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5868 						MLXSW_SP_LPM_TREE_MIN + 1);
5869 }
5870 
5871 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5872 				     struct mlxsw_sp_fib_node *fib_node)
5873 {
5874 	struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
5875 
5876 	list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5877 				 common.list) {
5878 		bool do_break = &tmp->common.list == &fib_node->entry_list;
5879 
5880 		mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5881 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5882 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5883 		/* Break when entry list is empty and node was freed.
5884 		 * Otherwise, we'll access freed memory in the next
5885 		 * iteration.
5886 		 */
5887 		if (do_break)
5888 			break;
5889 	}
5890 }
5891 
5892 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5893 				     struct mlxsw_sp_fib_node *fib_node)
5894 {
5895 	struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5896 
5897 	list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5898 				 common.list) {
5899 		bool do_break = &tmp->common.list == &fib_node->entry_list;
5900 
5901 		mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5902 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5903 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5904 		if (do_break)
5905 			break;
5906 	}
5907 }
5908 
5909 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5910 				    struct mlxsw_sp_fib_node *fib_node)
5911 {
5912 	switch (fib_node->fib->proto) {
5913 	case MLXSW_SP_L3_PROTO_IPV4:
5914 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5915 		break;
5916 	case MLXSW_SP_L3_PROTO_IPV6:
5917 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
5918 		break;
5919 	}
5920 }
5921 
5922 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5923 				  struct mlxsw_sp_vr *vr,
5924 				  enum mlxsw_sp_l3proto proto)
5925 {
5926 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5927 	struct mlxsw_sp_fib_node *fib_node, *tmp;
5928 
5929 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5930 		bool do_break = &tmp->list == &fib->node_list;
5931 
5932 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5933 		if (do_break)
5934 			break;
5935 	}
5936 }
5937 
5938 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
5939 {
5940 	int i, j;
5941 
5942 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5943 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5944 
5945 		if (!mlxsw_sp_vr_is_used(vr))
5946 			continue;
5947 
5948 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5949 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
5950 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
5951 
5952 		/* If virtual router was only used for IPv4, then it's no
5953 		 * longer used.
5954 		 */
5955 		if (!mlxsw_sp_vr_is_used(vr))
5956 			continue;
5957 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
5958 	}
5959 }
5960 
5961 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
5962 {
5963 	int err;
5964 
5965 	if (mlxsw_sp->router->aborted)
5966 		return;
5967 	dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
5968 	mlxsw_sp_router_fib_flush(mlxsw_sp);
5969 	mlxsw_sp->router->aborted = true;
5970 	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5971 	if (err)
5972 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5973 }
5974 
5975 struct mlxsw_sp_fib6_event_work {
5976 	struct fib6_info **rt_arr;
5977 	unsigned int nrt6;
5978 };
5979 
5980 struct mlxsw_sp_fib_event_work {
5981 	struct work_struct work;
5982 	union {
5983 		struct mlxsw_sp_fib6_event_work fib6_work;
5984 		struct fib_entry_notifier_info fen_info;
5985 		struct fib_rule_notifier_info fr_info;
5986 		struct fib_nh_notifier_info fnh_info;
5987 		struct mfc_entry_notifier_info men_info;
5988 		struct vif_entry_notifier_info ven_info;
5989 	};
5990 	struct mlxsw_sp *mlxsw_sp;
5991 	unsigned long event;
5992 };
5993 
5994 static int
5995 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
5996 			       struct fib6_entry_notifier_info *fen6_info)
5997 {
5998 	struct fib6_info *rt = fen6_info->rt;
5999 	struct fib6_info **rt_arr;
6000 	struct fib6_info *iter;
6001 	unsigned int nrt6;
6002 	int i = 0;
6003 
6004 	nrt6 = fen6_info->nsiblings + 1;
6005 
6006 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
6007 	if (!rt_arr)
6008 		return -ENOMEM;
6009 
6010 	fib6_work->rt_arr = rt_arr;
6011 	fib6_work->nrt6 = nrt6;
6012 
6013 	rt_arr[0] = rt;
6014 	fib6_info_hold(rt);
6015 
6016 	if (!fen6_info->nsiblings)
6017 		return 0;
6018 
6019 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
6020 		if (i == fen6_info->nsiblings)
6021 			break;
6022 
6023 		rt_arr[i + 1] = iter;
6024 		fib6_info_hold(iter);
6025 		i++;
6026 	}
6027 	WARN_ON_ONCE(i != fen6_info->nsiblings);
6028 
6029 	return 0;
6030 }
6031 
6032 static void
6033 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
6034 {
6035 	int i;
6036 
6037 	for (i = 0; i < fib6_work->nrt6; i++)
6038 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
6039 	kfree(fib6_work->rt_arr);
6040 }
6041 
6042 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
6043 {
6044 	struct mlxsw_sp_fib_event_work *fib_work =
6045 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6046 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6047 	bool replace, append;
6048 	int err;
6049 
6050 	/* Protect internal structures from changes */
6051 	rtnl_lock();
6052 	mlxsw_sp_span_respin(mlxsw_sp);
6053 
6054 	switch (fib_work->event) {
6055 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6056 	case FIB_EVENT_ENTRY_APPEND: /* fall through */
6057 	case FIB_EVENT_ENTRY_ADD:
6058 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6059 		append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
6060 		err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
6061 					       replace, append);
6062 		if (err)
6063 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6064 		fib_info_put(fib_work->fen_info.fi);
6065 		break;
6066 	case FIB_EVENT_ENTRY_DEL:
6067 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
6068 		fib_info_put(fib_work->fen_info.fi);
6069 		break;
6070 	case FIB_EVENT_NH_ADD: /* fall through */
6071 	case FIB_EVENT_NH_DEL:
6072 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
6073 					fib_work->fnh_info.fib_nh);
6074 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
6075 		break;
6076 	}
6077 	rtnl_unlock();
6078 	kfree(fib_work);
6079 }
6080 
6081 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
6082 {
6083 	struct mlxsw_sp_fib_event_work *fib_work =
6084 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6085 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6086 	bool replace;
6087 	int err;
6088 
6089 	rtnl_lock();
6090 	mlxsw_sp_span_respin(mlxsw_sp);
6091 
6092 	switch (fib_work->event) {
6093 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6094 	case FIB_EVENT_ENTRY_ADD:
6095 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6096 		err = mlxsw_sp_router_fib6_add(mlxsw_sp,
6097 					       fib_work->fib6_work.rt_arr,
6098 					       fib_work->fib6_work.nrt6,
6099 					       replace);
6100 		if (err)
6101 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6102 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6103 		break;
6104 	case FIB_EVENT_ENTRY_DEL:
6105 		mlxsw_sp_router_fib6_del(mlxsw_sp,
6106 					 fib_work->fib6_work.rt_arr,
6107 					 fib_work->fib6_work.nrt6);
6108 		mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
6109 		break;
6110 	}
6111 	rtnl_unlock();
6112 	kfree(fib_work);
6113 }
6114 
6115 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
6116 {
6117 	struct mlxsw_sp_fib_event_work *fib_work =
6118 		container_of(work, struct mlxsw_sp_fib_event_work, work);
6119 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
6120 	bool replace;
6121 	int err;
6122 
6123 	rtnl_lock();
6124 	switch (fib_work->event) {
6125 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6126 	case FIB_EVENT_ENTRY_ADD:
6127 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
6128 
6129 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
6130 						replace);
6131 		if (err)
6132 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6133 		mr_cache_put(fib_work->men_info.mfc);
6134 		break;
6135 	case FIB_EVENT_ENTRY_DEL:
6136 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
6137 		mr_cache_put(fib_work->men_info.mfc);
6138 		break;
6139 	case FIB_EVENT_VIF_ADD:
6140 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6141 						    &fib_work->ven_info);
6142 		if (err)
6143 			mlxsw_sp_router_fib_abort(mlxsw_sp);
6144 		dev_put(fib_work->ven_info.dev);
6145 		break;
6146 	case FIB_EVENT_VIF_DEL:
6147 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
6148 					      &fib_work->ven_info);
6149 		dev_put(fib_work->ven_info.dev);
6150 		break;
6151 	}
6152 	rtnl_unlock();
6153 	kfree(fib_work);
6154 }
6155 
6156 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
6157 				       struct fib_notifier_info *info)
6158 {
6159 	struct fib_entry_notifier_info *fen_info;
6160 	struct fib_nh_notifier_info *fnh_info;
6161 
6162 	switch (fib_work->event) {
6163 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6164 	case FIB_EVENT_ENTRY_APPEND: /* fall through */
6165 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6166 	case FIB_EVENT_ENTRY_DEL:
6167 		fen_info = container_of(info, struct fib_entry_notifier_info,
6168 					info);
6169 		fib_work->fen_info = *fen_info;
6170 		/* Take reference on fib_info to prevent it from being
6171 		 * freed while work is queued. Release it afterwards.
6172 		 */
6173 		fib_info_hold(fib_work->fen_info.fi);
6174 		break;
6175 	case FIB_EVENT_NH_ADD: /* fall through */
6176 	case FIB_EVENT_NH_DEL:
6177 		fnh_info = container_of(info, struct fib_nh_notifier_info,
6178 					info);
6179 		fib_work->fnh_info = *fnh_info;
6180 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
6181 		break;
6182 	}
6183 }
6184 
6185 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
6186 				      struct fib_notifier_info *info)
6187 {
6188 	struct fib6_entry_notifier_info *fen6_info;
6189 	int err;
6190 
6191 	switch (fib_work->event) {
6192 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6193 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6194 	case FIB_EVENT_ENTRY_DEL:
6195 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
6196 					 info);
6197 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
6198 						     fen6_info);
6199 		if (err)
6200 			return err;
6201 		break;
6202 	}
6203 
6204 	return 0;
6205 }
6206 
6207 static void
6208 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
6209 			    struct fib_notifier_info *info)
6210 {
6211 	switch (fib_work->event) {
6212 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6213 	case FIB_EVENT_ENTRY_ADD: /* fall through */
6214 	case FIB_EVENT_ENTRY_DEL:
6215 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
6216 		mr_cache_hold(fib_work->men_info.mfc);
6217 		break;
6218 	case FIB_EVENT_VIF_ADD: /* fall through */
6219 	case FIB_EVENT_VIF_DEL:
6220 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
6221 		dev_hold(fib_work->ven_info.dev);
6222 		break;
6223 	}
6224 }
6225 
6226 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
6227 					  struct fib_notifier_info *info,
6228 					  struct mlxsw_sp *mlxsw_sp)
6229 {
6230 	struct netlink_ext_ack *extack = info->extack;
6231 	struct fib_rule_notifier_info *fr_info;
6232 	struct fib_rule *rule;
6233 	int err = 0;
6234 
6235 	/* nothing to do at the moment */
6236 	if (event == FIB_EVENT_RULE_DEL)
6237 		return 0;
6238 
6239 	if (mlxsw_sp->router->aborted)
6240 		return 0;
6241 
6242 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
6243 	rule = fr_info->rule;
6244 
6245 	/* Rule only affects locally generated traffic */
6246 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6247 		return 0;
6248 
6249 	switch (info->family) {
6250 	case AF_INET:
6251 		if (!fib4_rule_default(rule) && !rule->l3mdev)
6252 			err = -EOPNOTSUPP;
6253 		break;
6254 	case AF_INET6:
6255 		if (!fib6_rule_default(rule) && !rule->l3mdev)
6256 			err = -EOPNOTSUPP;
6257 		break;
6258 	case RTNL_FAMILY_IPMR:
6259 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
6260 			err = -EOPNOTSUPP;
6261 		break;
6262 	case RTNL_FAMILY_IP6MR:
6263 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6264 			err = -EOPNOTSUPP;
6265 		break;
6266 	}
6267 
6268 	if (err < 0)
6269 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6270 
6271 	return err;
6272 }
6273 
6274 /* Called with rcu_read_lock() */
6275 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6276 				     unsigned long event, void *ptr)
6277 {
6278 	struct mlxsw_sp_fib_event_work *fib_work;
6279 	struct fib_notifier_info *info = ptr;
6280 	struct mlxsw_sp_router *router;
6281 	int err;
6282 
6283 	if ((info->family != AF_INET && info->family != AF_INET6 &&
6284 	     info->family != RTNL_FAMILY_IPMR &&
6285 	     info->family != RTNL_FAMILY_IP6MR))
6286 		return NOTIFY_DONE;
6287 
6288 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6289 
6290 	switch (event) {
6291 	case FIB_EVENT_RULE_ADD: /* fall through */
6292 	case FIB_EVENT_RULE_DEL:
6293 		err = mlxsw_sp_router_fib_rule_event(event, info,
6294 						     router->mlxsw_sp);
6295 		return notifier_from_errno(err);
6296 	case FIB_EVENT_ENTRY_ADD:
6297 	case FIB_EVENT_ENTRY_REPLACE: /* fall through */
6298 	case FIB_EVENT_ENTRY_APPEND:  /* fall through */
6299 		if (router->aborted) {
6300 			NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6301 			return notifier_from_errno(-EINVAL);
6302 		}
6303 		if (info->family == AF_INET) {
6304 			struct fib_entry_notifier_info *fen_info = ptr;
6305 
6306 			if (fen_info->fi->fib_nh_is_v6) {
6307 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
6308 				return notifier_from_errno(-EINVAL);
6309 			}
6310 			if (fen_info->fi->nh) {
6311 				NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
6312 				return notifier_from_errno(-EINVAL);
6313 			}
6314 		} else if (info->family == AF_INET6) {
6315 			struct fib6_entry_notifier_info *fen6_info;
6316 
6317 			fen6_info = container_of(info,
6318 						 struct fib6_entry_notifier_info,
6319 						 info);
6320 			if (fen6_info->rt->nh) {
6321 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
6322 				return notifier_from_errno(-EINVAL);
6323 			}
6324 		}
6325 		break;
6326 	}
6327 
6328 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
6329 	if (WARN_ON(!fib_work))
6330 		return NOTIFY_BAD;
6331 
6332 	fib_work->mlxsw_sp = router->mlxsw_sp;
6333 	fib_work->event = event;
6334 
6335 	switch (info->family) {
6336 	case AF_INET:
6337 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
6338 		mlxsw_sp_router_fib4_event(fib_work, info);
6339 		break;
6340 	case AF_INET6:
6341 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
6342 		err = mlxsw_sp_router_fib6_event(fib_work, info);
6343 		if (err)
6344 			goto err_fib_event;
6345 		break;
6346 	case RTNL_FAMILY_IP6MR:
6347 	case RTNL_FAMILY_IPMR:
6348 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
6349 		mlxsw_sp_router_fibmr_event(fib_work, info);
6350 		break;
6351 	}
6352 
6353 	mlxsw_core_schedule_work(&fib_work->work);
6354 
6355 	return NOTIFY_DONE;
6356 
6357 err_fib_event:
6358 	kfree(fib_work);
6359 	return NOTIFY_BAD;
6360 }
6361 
6362 struct mlxsw_sp_rif *
6363 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6364 			 const struct net_device *dev)
6365 {
6366 	int i;
6367 
6368 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6369 		if (mlxsw_sp->router->rifs[i] &&
6370 		    mlxsw_sp->router->rifs[i]->dev == dev)
6371 			return mlxsw_sp->router->rifs[i];
6372 
6373 	return NULL;
6374 }
6375 
6376 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6377 {
6378 	char ritr_pl[MLXSW_REG_RITR_LEN];
6379 	int err;
6380 
6381 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6382 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6383 	if (err)
6384 		return err;
6385 
6386 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
6387 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6388 }
6389 
6390 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6391 					  struct mlxsw_sp_rif *rif)
6392 {
6393 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6394 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6395 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6396 }
6397 
6398 static bool
6399 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6400 			   unsigned long event)
6401 {
6402 	struct inet6_dev *inet6_dev;
6403 	bool addr_list_empty = true;
6404 	struct in_device *idev;
6405 
6406 	switch (event) {
6407 	case NETDEV_UP:
6408 		return rif == NULL;
6409 	case NETDEV_DOWN:
6410 		idev = __in_dev_get_rtnl(dev);
6411 		if (idev && idev->ifa_list)
6412 			addr_list_empty = false;
6413 
6414 		inet6_dev = __in6_dev_get(dev);
6415 		if (addr_list_empty && inet6_dev &&
6416 		    !list_empty(&inet6_dev->addr_list))
6417 			addr_list_empty = false;
6418 
6419 		/* macvlans do not have a RIF, but rather piggy back on the
6420 		 * RIF of their lower device.
6421 		 */
6422 		if (netif_is_macvlan(dev) && addr_list_empty)
6423 			return true;
6424 
6425 		if (rif && addr_list_empty &&
6426 		    !netif_is_l3_slave(rif->dev))
6427 			return true;
6428 		/* It is possible we already removed the RIF ourselves
6429 		 * if it was assigned to a netdev that is now a bridge
6430 		 * or LAG slave.
6431 		 */
6432 		return false;
6433 	}
6434 
6435 	return false;
6436 }
6437 
6438 static enum mlxsw_sp_rif_type
6439 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6440 		      const struct net_device *dev)
6441 {
6442 	enum mlxsw_sp_fid_type type;
6443 
6444 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6445 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
6446 
6447 	/* Otherwise RIF type is derived from the type of the underlying FID. */
6448 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6449 		type = MLXSW_SP_FID_TYPE_8021Q;
6450 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6451 		type = MLXSW_SP_FID_TYPE_8021Q;
6452 	else if (netif_is_bridge_master(dev))
6453 		type = MLXSW_SP_FID_TYPE_8021D;
6454 	else
6455 		type = MLXSW_SP_FID_TYPE_RFID;
6456 
6457 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6458 }
6459 
6460 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6461 {
6462 	int i;
6463 
6464 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6465 		if (!mlxsw_sp->router->rifs[i]) {
6466 			*p_rif_index = i;
6467 			return 0;
6468 		}
6469 	}
6470 
6471 	return -ENOBUFS;
6472 }
6473 
6474 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6475 					       u16 vr_id,
6476 					       struct net_device *l3_dev)
6477 {
6478 	struct mlxsw_sp_rif *rif;
6479 
6480 	rif = kzalloc(rif_size, GFP_KERNEL);
6481 	if (!rif)
6482 		return NULL;
6483 
6484 	INIT_LIST_HEAD(&rif->nexthop_list);
6485 	INIT_LIST_HEAD(&rif->neigh_list);
6486 	if (l3_dev) {
6487 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
6488 		rif->mtu = l3_dev->mtu;
6489 		rif->dev = l3_dev;
6490 	}
6491 	rif->vr_id = vr_id;
6492 	rif->rif_index = rif_index;
6493 
6494 	return rif;
6495 }
6496 
6497 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6498 					   u16 rif_index)
6499 {
6500 	return mlxsw_sp->router->rifs[rif_index];
6501 }
6502 
6503 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6504 {
6505 	return rif->rif_index;
6506 }
6507 
6508 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6509 {
6510 	return lb_rif->common.rif_index;
6511 }
6512 
6513 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6514 {
6515 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
6516 	struct mlxsw_sp_vr *ul_vr;
6517 
6518 	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
6519 	if (WARN_ON(IS_ERR(ul_vr)))
6520 		return 0;
6521 
6522 	return ul_vr->id;
6523 }
6524 
6525 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6526 {
6527 	return lb_rif->ul_rif_id;
6528 }
6529 
6530 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6531 {
6532 	return rif->dev->ifindex;
6533 }
6534 
6535 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6536 {
6537 	return rif->dev;
6538 }
6539 
6540 struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
6541 {
6542 	return rif->fid;
6543 }
6544 
6545 static struct mlxsw_sp_rif *
6546 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6547 		    const struct mlxsw_sp_rif_params *params,
6548 		    struct netlink_ext_ack *extack)
6549 {
6550 	u32 tb_id = l3mdev_fib_table(params->dev);
6551 	const struct mlxsw_sp_rif_ops *ops;
6552 	struct mlxsw_sp_fid *fid = NULL;
6553 	enum mlxsw_sp_rif_type type;
6554 	struct mlxsw_sp_rif *rif;
6555 	struct mlxsw_sp_vr *vr;
6556 	u16 rif_index;
6557 	int i, err;
6558 
6559 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6560 	ops = mlxsw_sp->rif_ops_arr[type];
6561 
6562 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6563 	if (IS_ERR(vr))
6564 		return ERR_CAST(vr);
6565 	vr->rif_count++;
6566 
6567 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6568 	if (err) {
6569 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6570 		goto err_rif_index_alloc;
6571 	}
6572 
6573 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6574 	if (!rif) {
6575 		err = -ENOMEM;
6576 		goto err_rif_alloc;
6577 	}
6578 	dev_hold(rif->dev);
6579 	mlxsw_sp->router->rifs[rif_index] = rif;
6580 	rif->mlxsw_sp = mlxsw_sp;
6581 	rif->ops = ops;
6582 
6583 	if (ops->fid_get) {
6584 		fid = ops->fid_get(rif, extack);
6585 		if (IS_ERR(fid)) {
6586 			err = PTR_ERR(fid);
6587 			goto err_fid_get;
6588 		}
6589 		rif->fid = fid;
6590 	}
6591 
6592 	if (ops->setup)
6593 		ops->setup(rif, params);
6594 
6595 	err = ops->configure(rif);
6596 	if (err)
6597 		goto err_configure;
6598 
6599 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6600 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6601 		if (err)
6602 			goto err_mr_rif_add;
6603 	}
6604 
6605 	mlxsw_sp_rif_counters_alloc(rif);
6606 
6607 	return rif;
6608 
6609 err_mr_rif_add:
6610 	for (i--; i >= 0; i--)
6611 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6612 	ops->deconfigure(rif);
6613 err_configure:
6614 	if (fid)
6615 		mlxsw_sp_fid_put(fid);
6616 err_fid_get:
6617 	mlxsw_sp->router->rifs[rif_index] = NULL;
6618 	dev_put(rif->dev);
6619 	kfree(rif);
6620 err_rif_alloc:
6621 err_rif_index_alloc:
6622 	vr->rif_count--;
6623 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6624 	return ERR_PTR(err);
6625 }
6626 
6627 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6628 {
6629 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
6630 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6631 	struct mlxsw_sp_fid *fid = rif->fid;
6632 	struct mlxsw_sp_vr *vr;
6633 	int i;
6634 
6635 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6636 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
6637 
6638 	mlxsw_sp_rif_counters_free(rif);
6639 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6640 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6641 	ops->deconfigure(rif);
6642 	if (fid)
6643 		/* Loopback RIFs are not associated with a FID. */
6644 		mlxsw_sp_fid_put(fid);
6645 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6646 	dev_put(rif->dev);
6647 	kfree(rif);
6648 	vr->rif_count--;
6649 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6650 }
6651 
6652 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6653 				 struct net_device *dev)
6654 {
6655 	struct mlxsw_sp_rif *rif;
6656 
6657 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6658 	if (!rif)
6659 		return;
6660 	mlxsw_sp_rif_destroy(rif);
6661 }
6662 
6663 static void
6664 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6665 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6666 {
6667 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6668 
6669 	params->vid = mlxsw_sp_port_vlan->vid;
6670 	params->lag = mlxsw_sp_port->lagged;
6671 	if (params->lag)
6672 		params->lag_id = mlxsw_sp_port->lag_id;
6673 	else
6674 		params->system_port = mlxsw_sp_port->local_port;
6675 }
6676 
6677 static struct mlxsw_sp_rif_subport *
6678 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
6679 {
6680 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
6681 }
6682 
6683 static struct mlxsw_sp_rif *
6684 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
6685 			 const struct mlxsw_sp_rif_params *params,
6686 			 struct netlink_ext_ack *extack)
6687 {
6688 	struct mlxsw_sp_rif_subport *rif_subport;
6689 	struct mlxsw_sp_rif *rif;
6690 
6691 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
6692 	if (!rif)
6693 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
6694 
6695 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
6696 	refcount_inc(&rif_subport->ref_count);
6697 	return rif;
6698 }
6699 
6700 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
6701 {
6702 	struct mlxsw_sp_rif_subport *rif_subport;
6703 
6704 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
6705 	if (!refcount_dec_and_test(&rif_subport->ref_count))
6706 		return;
6707 
6708 	mlxsw_sp_rif_destroy(rif);
6709 }
6710 
6711 static int
6712 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6713 			       struct net_device *l3_dev,
6714 			       struct netlink_ext_ack *extack)
6715 {
6716 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6717 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
6718 	struct mlxsw_sp_rif_params params = {
6719 		.dev = l3_dev,
6720 	};
6721 	u16 vid = mlxsw_sp_port_vlan->vid;
6722 	struct mlxsw_sp_rif *rif;
6723 	struct mlxsw_sp_fid *fid;
6724 	int err;
6725 
6726 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
6727 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
6728 	if (IS_ERR(rif))
6729 		return PTR_ERR(rif);
6730 
6731 	/* FID was already created, just take a reference */
6732 	fid = rif->ops->fid_get(rif, extack);
6733 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6734 	if (err)
6735 		goto err_fid_port_vid_map;
6736 
6737 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
6738 	if (err)
6739 		goto err_port_vid_learning_set;
6740 
6741 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
6742 					BR_STATE_FORWARDING);
6743 	if (err)
6744 		goto err_port_vid_stp_set;
6745 
6746 	mlxsw_sp_port_vlan->fid = fid;
6747 
6748 	return 0;
6749 
6750 err_port_vid_stp_set:
6751 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6752 err_port_vid_learning_set:
6753 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6754 err_fid_port_vid_map:
6755 	mlxsw_sp_fid_put(fid);
6756 	mlxsw_sp_rif_subport_put(rif);
6757 	return err;
6758 }
6759 
6760 void
6761 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6762 {
6763 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6764 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
6765 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
6766 	u16 vid = mlxsw_sp_port_vlan->vid;
6767 
6768 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6769 		return;
6770 
6771 	mlxsw_sp_port_vlan->fid = NULL;
6772 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6773 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
6774 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6775 	mlxsw_sp_fid_put(fid);
6776 	mlxsw_sp_rif_subport_put(rif);
6777 }
6778 
6779 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6780 					     struct net_device *port_dev,
6781 					     unsigned long event, u16 vid,
6782 					     struct netlink_ext_ack *extack)
6783 {
6784 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
6785 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
6786 
6787 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
6788 	if (WARN_ON(!mlxsw_sp_port_vlan))
6789 		return -EINVAL;
6790 
6791 	switch (event) {
6792 	case NETDEV_UP:
6793 		return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
6794 						      l3_dev, extack);
6795 	case NETDEV_DOWN:
6796 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
6797 		break;
6798 	}
6799 
6800 	return 0;
6801 }
6802 
6803 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
6804 					unsigned long event,
6805 					struct netlink_ext_ack *extack)
6806 {
6807 	if (netif_is_bridge_port(port_dev) ||
6808 	    netif_is_lag_port(port_dev) ||
6809 	    netif_is_ovs_port(port_dev))
6810 		return 0;
6811 
6812 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
6813 						 MLXSW_SP_DEFAULT_VID, extack);
6814 }
6815 
6816 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6817 					 struct net_device *lag_dev,
6818 					 unsigned long event, u16 vid,
6819 					 struct netlink_ext_ack *extack)
6820 {
6821 	struct net_device *port_dev;
6822 	struct list_head *iter;
6823 	int err;
6824 
6825 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6826 		if (mlxsw_sp_port_dev_check(port_dev)) {
6827 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6828 								port_dev,
6829 								event, vid,
6830 								extack);
6831 			if (err)
6832 				return err;
6833 		}
6834 	}
6835 
6836 	return 0;
6837 }
6838 
6839 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
6840 				       unsigned long event,
6841 				       struct netlink_ext_ack *extack)
6842 {
6843 	if (netif_is_bridge_port(lag_dev))
6844 		return 0;
6845 
6846 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
6847 					     MLXSW_SP_DEFAULT_VID, extack);
6848 }
6849 
6850 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
6851 					  struct net_device *l3_dev,
6852 					  unsigned long event,
6853 					  struct netlink_ext_ack *extack)
6854 {
6855 	struct mlxsw_sp_rif_params params = {
6856 		.dev = l3_dev,
6857 	};
6858 	struct mlxsw_sp_rif *rif;
6859 
6860 	switch (event) {
6861 	case NETDEV_UP:
6862 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
6863 		if (IS_ERR(rif))
6864 			return PTR_ERR(rif);
6865 		break;
6866 	case NETDEV_DOWN:
6867 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6868 		mlxsw_sp_rif_destroy(rif);
6869 		break;
6870 	}
6871 
6872 	return 0;
6873 }
6874 
6875 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
6876 					struct net_device *vlan_dev,
6877 					unsigned long event,
6878 					struct netlink_ext_ack *extack)
6879 {
6880 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
6881 	u16 vid = vlan_dev_vlan_id(vlan_dev);
6882 
6883 	if (netif_is_bridge_port(vlan_dev))
6884 		return 0;
6885 
6886 	if (mlxsw_sp_port_dev_check(real_dev))
6887 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
6888 							 event, vid, extack);
6889 	else if (netif_is_lag_master(real_dev))
6890 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
6891 						     vid, extack);
6892 	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
6893 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
6894 						      extack);
6895 
6896 	return 0;
6897 }
6898 
6899 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
6900 {
6901 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
6902 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6903 
6904 	return ether_addr_equal_masked(mac, vrrp4, mask);
6905 }
6906 
6907 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
6908 {
6909 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
6910 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6911 
6912 	return ether_addr_equal_masked(mac, vrrp6, mask);
6913 }
6914 
6915 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
6916 				const u8 *mac, bool adding)
6917 {
6918 	char ritr_pl[MLXSW_REG_RITR_LEN];
6919 	u8 vrrp_id = adding ? mac[5] : 0;
6920 	int err;
6921 
6922 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
6923 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
6924 		return 0;
6925 
6926 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
6927 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6928 	if (err)
6929 		return err;
6930 
6931 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
6932 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
6933 	else
6934 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
6935 
6936 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6937 }
6938 
6939 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
6940 				    const struct net_device *macvlan_dev,
6941 				    struct netlink_ext_ack *extack)
6942 {
6943 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6944 	struct mlxsw_sp_rif *rif;
6945 	int err;
6946 
6947 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6948 	if (!rif) {
6949 		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6950 		return -EOPNOTSUPP;
6951 	}
6952 
6953 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6954 				  mlxsw_sp_fid_index(rif->fid), true);
6955 	if (err)
6956 		return err;
6957 
6958 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
6959 				   macvlan_dev->dev_addr, true);
6960 	if (err)
6961 		goto err_rif_vrrp_add;
6962 
6963 	/* Make sure the bridge driver does not have this MAC pointing at
6964 	 * some other port.
6965 	 */
6966 	if (rif->ops->fdb_del)
6967 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
6968 
6969 	return 0;
6970 
6971 err_rif_vrrp_add:
6972 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6973 			    mlxsw_sp_fid_index(rif->fid), false);
6974 	return err;
6975 }
6976 
6977 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6978 			      const struct net_device *macvlan_dev)
6979 {
6980 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6981 	struct mlxsw_sp_rif *rif;
6982 
6983 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6984 	/* If we do not have a RIF, then we already took care of
6985 	 * removing the macvlan's MAC during RIF deletion.
6986 	 */
6987 	if (!rif)
6988 		return;
6989 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
6990 			     false);
6991 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6992 			    mlxsw_sp_fid_index(rif->fid), false);
6993 }
6994 
6995 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
6996 					   struct net_device *macvlan_dev,
6997 					   unsigned long event,
6998 					   struct netlink_ext_ack *extack)
6999 {
7000 	switch (event) {
7001 	case NETDEV_UP:
7002 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
7003 	case NETDEV_DOWN:
7004 		mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
7005 		break;
7006 	}
7007 
7008 	return 0;
7009 }
7010 
7011 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
7012 					       struct net_device *dev,
7013 					       const unsigned char *dev_addr,
7014 					       struct netlink_ext_ack *extack)
7015 {
7016 	struct mlxsw_sp_rif *rif;
7017 	int i;
7018 
7019 	/* A RIF is not created for macvlan netdevs. Their MAC is used to
7020 	 * populate the FDB
7021 	 */
7022 	if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7023 		return 0;
7024 
7025 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7026 		rif = mlxsw_sp->router->rifs[i];
7027 		if (rif && rif->dev && rif->dev != dev &&
7028 		    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
7029 					     mlxsw_sp->mac_mask)) {
7030 			NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
7031 			return -EINVAL;
7032 		}
7033 	}
7034 
7035 	return 0;
7036 }
7037 
7038 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
7039 				     struct net_device *dev,
7040 				     unsigned long event,
7041 				     struct netlink_ext_ack *extack)
7042 {
7043 	if (mlxsw_sp_port_dev_check(dev))
7044 		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7045 	else if (netif_is_lag_master(dev))
7046 		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7047 	else if (netif_is_bridge_master(dev))
7048 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
7049 						      extack);
7050 	else if (is_vlan_dev(dev))
7051 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
7052 						    extack);
7053 	else if (netif_is_macvlan(dev))
7054 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
7055 						       extack);
7056 	else
7057 		return 0;
7058 }
7059 
7060 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
7061 				   unsigned long event, void *ptr)
7062 {
7063 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
7064 	struct net_device *dev = ifa->ifa_dev->dev;
7065 	struct mlxsw_sp_router *router;
7066 	struct mlxsw_sp_rif *rif;
7067 	int err = 0;
7068 
7069 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
7070 	if (event == NETDEV_UP)
7071 		goto out;
7072 
7073 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7074 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7075 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7076 		goto out;
7077 
7078 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7079 out:
7080 	return notifier_from_errno(err);
7081 }
7082 
7083 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
7084 				  unsigned long event, void *ptr)
7085 {
7086 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
7087 	struct net_device *dev = ivi->ivi_dev->dev;
7088 	struct mlxsw_sp *mlxsw_sp;
7089 	struct mlxsw_sp_rif *rif;
7090 	int err = 0;
7091 
7092 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7093 	if (!mlxsw_sp)
7094 		goto out;
7095 
7096 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7097 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7098 		goto out;
7099 
7100 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7101 						  ivi->extack);
7102 	if (err)
7103 		goto out;
7104 
7105 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7106 out:
7107 	return notifier_from_errno(err);
7108 }
7109 
7110 struct mlxsw_sp_inet6addr_event_work {
7111 	struct work_struct work;
7112 	struct mlxsw_sp *mlxsw_sp;
7113 	struct net_device *dev;
7114 	unsigned long event;
7115 };
7116 
7117 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
7118 {
7119 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
7120 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7121 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7122 	struct net_device *dev = inet6addr_work->dev;
7123 	unsigned long event = inet6addr_work->event;
7124 	struct mlxsw_sp_rif *rif;
7125 
7126 	rtnl_lock();
7127 
7128 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7129 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7130 		goto out;
7131 
7132 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7133 out:
7134 	rtnl_unlock();
7135 	dev_put(dev);
7136 	kfree(inet6addr_work);
7137 }
7138 
7139 /* Called with rcu_read_lock() */
7140 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
7141 				    unsigned long event, void *ptr)
7142 {
7143 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
7144 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
7145 	struct net_device *dev = if6->idev->dev;
7146 	struct mlxsw_sp_router *router;
7147 
7148 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
7149 	if (event == NETDEV_UP)
7150 		return NOTIFY_DONE;
7151 
7152 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
7153 	if (!inet6addr_work)
7154 		return NOTIFY_BAD;
7155 
7156 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7157 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7158 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7159 	inet6addr_work->dev = dev;
7160 	inet6addr_work->event = event;
7161 	dev_hold(dev);
7162 	mlxsw_core_schedule_work(&inet6addr_work->work);
7163 
7164 	return NOTIFY_DONE;
7165 }
7166 
7167 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
7168 				   unsigned long event, void *ptr)
7169 {
7170 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
7171 	struct net_device *dev = i6vi->i6vi_dev->dev;
7172 	struct mlxsw_sp *mlxsw_sp;
7173 	struct mlxsw_sp_rif *rif;
7174 	int err = 0;
7175 
7176 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7177 	if (!mlxsw_sp)
7178 		goto out;
7179 
7180 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7181 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7182 		goto out;
7183 
7184 	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7185 						  i6vi->extack);
7186 	if (err)
7187 		goto out;
7188 
7189 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7190 out:
7191 	return notifier_from_errno(err);
7192 }
7193 
7194 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7195 			     const char *mac, int mtu)
7196 {
7197 	char ritr_pl[MLXSW_REG_RITR_LEN];
7198 	int err;
7199 
7200 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7201 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7202 	if (err)
7203 		return err;
7204 
7205 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
7206 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
7207 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
7208 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7209 }
7210 
7211 static int
7212 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
7213 				  struct mlxsw_sp_rif *rif)
7214 {
7215 	struct net_device *dev = rif->dev;
7216 	u16 fid_index;
7217 	int err;
7218 
7219 	fid_index = mlxsw_sp_fid_index(rif->fid);
7220 
7221 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7222 	if (err)
7223 		return err;
7224 
7225 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
7226 				dev->mtu);
7227 	if (err)
7228 		goto err_rif_edit;
7229 
7230 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7231 	if (err)
7232 		goto err_rif_fdb_op;
7233 
7234 	if (rif->mtu != dev->mtu) {
7235 		struct mlxsw_sp_vr *vr;
7236 		int i;
7237 
7238 		/* The RIF is relevant only to its mr_table instance, as unlike
7239 		 * unicast routing, in multicast routing a RIF cannot be shared
7240 		 * between several multicast routing tables.
7241 		 */
7242 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
7243 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7244 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
7245 						   rif, dev->mtu);
7246 	}
7247 
7248 	ether_addr_copy(rif->addr, dev->dev_addr);
7249 	rif->mtu = dev->mtu;
7250 
7251 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7252 
7253 	return 0;
7254 
7255 err_rif_fdb_op:
7256 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7257 err_rif_edit:
7258 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7259 	return err;
7260 }
7261 
7262 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
7263 			    struct netdev_notifier_pre_changeaddr_info *info)
7264 {
7265 	struct netlink_ext_ack *extack;
7266 
7267 	extack = netdev_notifier_info_to_extack(&info->info);
7268 	return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
7269 						   info->dev_addr, extack);
7270 }
7271 
7272 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
7273 					 unsigned long event, void *ptr)
7274 {
7275 	struct mlxsw_sp *mlxsw_sp;
7276 	struct mlxsw_sp_rif *rif;
7277 
7278 	mlxsw_sp = mlxsw_sp_lower_get(dev);
7279 	if (!mlxsw_sp)
7280 		return 0;
7281 
7282 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7283 	if (!rif)
7284 		return 0;
7285 
7286 	switch (event) {
7287 	case NETDEV_CHANGEMTU: /* fall through */
7288 	case NETDEV_CHANGEADDR:
7289 		return mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
7290 	case NETDEV_PRE_CHANGEADDR:
7291 		return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
7292 	}
7293 
7294 	return 0;
7295 }
7296 
7297 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7298 				  struct net_device *l3_dev,
7299 				  struct netlink_ext_ack *extack)
7300 {
7301 	struct mlxsw_sp_rif *rif;
7302 
7303 	/* If netdev is already associated with a RIF, then we need to
7304 	 * destroy it and create a new one with the new virtual router ID.
7305 	 */
7306 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7307 	if (rif)
7308 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
7309 					  extack);
7310 
7311 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7312 }
7313 
7314 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
7315 				    struct net_device *l3_dev)
7316 {
7317 	struct mlxsw_sp_rif *rif;
7318 
7319 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7320 	if (!rif)
7321 		return;
7322 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7323 }
7324 
7325 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
7326 				 struct netdev_notifier_changeupper_info *info)
7327 {
7328 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
7329 	int err = 0;
7330 
7331 	/* We do not create a RIF for a macvlan, but only use it to
7332 	 * direct more MAC addresses to the router.
7333 	 */
7334 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7335 		return 0;
7336 
7337 	switch (event) {
7338 	case NETDEV_PRECHANGEUPPER:
7339 		return 0;
7340 	case NETDEV_CHANGEUPPER:
7341 		if (info->linking) {
7342 			struct netlink_ext_ack *extack;
7343 
7344 			extack = netdev_notifier_info_to_extack(&info->info);
7345 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
7346 		} else {
7347 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7348 		}
7349 		break;
7350 	}
7351 
7352 	return err;
7353 }
7354 
7355 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
7356 {
7357 	struct mlxsw_sp_rif *rif = data;
7358 
7359 	if (!netif_is_macvlan(dev))
7360 		return 0;
7361 
7362 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
7363 				   mlxsw_sp_fid_index(rif->fid), false);
7364 }
7365 
7366 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
7367 {
7368 	if (!netif_is_macvlan_port(rif->dev))
7369 		return 0;
7370 
7371 	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
7372 	return netdev_walk_all_upper_dev_rcu(rif->dev,
7373 					     __mlxsw_sp_rif_macvlan_flush, rif);
7374 }
7375 
7376 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7377 				       const struct mlxsw_sp_rif_params *params)
7378 {
7379 	struct mlxsw_sp_rif_subport *rif_subport;
7380 
7381 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7382 	refcount_set(&rif_subport->ref_count, 1);
7383 	rif_subport->vid = params->vid;
7384 	rif_subport->lag = params->lag;
7385 	if (params->lag)
7386 		rif_subport->lag_id = params->lag_id;
7387 	else
7388 		rif_subport->system_port = params->system_port;
7389 }
7390 
7391 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7392 {
7393 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7394 	struct mlxsw_sp_rif_subport *rif_subport;
7395 	char ritr_pl[MLXSW_REG_RITR_LEN];
7396 
7397 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7398 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7399 			    rif->rif_index, rif->vr_id, rif->dev->mtu);
7400 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7401 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7402 				  rif_subport->lag ? rif_subport->lag_id :
7403 						     rif_subport->system_port,
7404 				  rif_subport->vid);
7405 
7406 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7407 }
7408 
7409 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7410 {
7411 	int err;
7412 
7413 	err = mlxsw_sp_rif_subport_op(rif, true);
7414 	if (err)
7415 		return err;
7416 
7417 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7418 				  mlxsw_sp_fid_index(rif->fid), true);
7419 	if (err)
7420 		goto err_rif_fdb_op;
7421 
7422 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7423 	return 0;
7424 
7425 err_rif_fdb_op:
7426 	mlxsw_sp_rif_subport_op(rif, false);
7427 	return err;
7428 }
7429 
7430 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7431 {
7432 	struct mlxsw_sp_fid *fid = rif->fid;
7433 
7434 	mlxsw_sp_fid_rif_set(fid, NULL);
7435 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7436 			    mlxsw_sp_fid_index(fid), false);
7437 	mlxsw_sp_rif_macvlan_flush(rif);
7438 	mlxsw_sp_rif_subport_op(rif, false);
7439 }
7440 
7441 static struct mlxsw_sp_fid *
7442 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7443 			     struct netlink_ext_ack *extack)
7444 {
7445 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7446 }
7447 
7448 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7449 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
7450 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
7451 	.setup			= mlxsw_sp_rif_subport_setup,
7452 	.configure		= mlxsw_sp_rif_subport_configure,
7453 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
7454 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
7455 };
7456 
7457 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7458 				    enum mlxsw_reg_ritr_if_type type,
7459 				    u16 vid_fid, bool enable)
7460 {
7461 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7462 	char ritr_pl[MLXSW_REG_RITR_LEN];
7463 
7464 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7465 			    rif->dev->mtu);
7466 	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7467 	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7468 
7469 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7470 }
7471 
7472 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7473 {
7474 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7475 }
7476 
7477 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
7478 {
7479 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7480 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7481 	int err;
7482 
7483 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
7484 	if (err)
7485 		return err;
7486 
7487 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7488 				     mlxsw_sp_router_port(mlxsw_sp), true);
7489 	if (err)
7490 		goto err_fid_mc_flood_set;
7491 
7492 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7493 				     mlxsw_sp_router_port(mlxsw_sp), true);
7494 	if (err)
7495 		goto err_fid_bc_flood_set;
7496 
7497 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7498 				  mlxsw_sp_fid_index(rif->fid), true);
7499 	if (err)
7500 		goto err_rif_fdb_op;
7501 
7502 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7503 	return 0;
7504 
7505 err_rif_fdb_op:
7506 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7507 			       mlxsw_sp_router_port(mlxsw_sp), false);
7508 err_fid_bc_flood_set:
7509 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7510 			       mlxsw_sp_router_port(mlxsw_sp), false);
7511 err_fid_mc_flood_set:
7512 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
7513 	return err;
7514 }
7515 
7516 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
7517 {
7518 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7519 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7520 	struct mlxsw_sp_fid *fid = rif->fid;
7521 
7522 	mlxsw_sp_fid_rif_set(fid, NULL);
7523 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7524 			    mlxsw_sp_fid_index(fid), false);
7525 	mlxsw_sp_rif_macvlan_flush(rif);
7526 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7527 			       mlxsw_sp_router_port(mlxsw_sp), false);
7528 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7529 			       mlxsw_sp_router_port(mlxsw_sp), false);
7530 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
7531 }
7532 
7533 static struct mlxsw_sp_fid *
7534 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7535 			  struct netlink_ext_ack *extack)
7536 {
7537 	struct net_device *br_dev = rif->dev;
7538 	u16 vid;
7539 	int err;
7540 
7541 	if (is_vlan_dev(rif->dev)) {
7542 		vid = vlan_dev_vlan_id(rif->dev);
7543 		br_dev = vlan_dev_real_dev(rif->dev);
7544 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
7545 			return ERR_PTR(-EINVAL);
7546 	} else {
7547 		err = br_vlan_get_pvid(rif->dev, &vid);
7548 		if (err < 0 || !vid) {
7549 			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7550 			return ERR_PTR(-EINVAL);
7551 		}
7552 	}
7553 
7554 	return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack);
7555 }
7556 
7557 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7558 {
7559 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7560 	struct switchdev_notifier_fdb_info info;
7561 	struct net_device *br_dev;
7562 	struct net_device *dev;
7563 
7564 	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7565 	dev = br_fdb_find_port(br_dev, mac, vid);
7566 	if (!dev)
7567 		return;
7568 
7569 	info.addr = mac;
7570 	info.vid = vid;
7571 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7572 				 NULL);
7573 }
7574 
7575 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
7576 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
7577 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7578 	.configure		= mlxsw_sp_rif_vlan_configure,
7579 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
7580 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
7581 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
7582 };
7583 
7584 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7585 {
7586 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7587 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7588 	int err;
7589 
7590 	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7591 				       true);
7592 	if (err)
7593 		return err;
7594 
7595 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7596 				     mlxsw_sp_router_port(mlxsw_sp), true);
7597 	if (err)
7598 		goto err_fid_mc_flood_set;
7599 
7600 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7601 				     mlxsw_sp_router_port(mlxsw_sp), true);
7602 	if (err)
7603 		goto err_fid_bc_flood_set;
7604 
7605 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7606 				  mlxsw_sp_fid_index(rif->fid), true);
7607 	if (err)
7608 		goto err_rif_fdb_op;
7609 
7610 	mlxsw_sp_fid_rif_set(rif->fid, rif);
7611 	return 0;
7612 
7613 err_rif_fdb_op:
7614 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7615 			       mlxsw_sp_router_port(mlxsw_sp), false);
7616 err_fid_bc_flood_set:
7617 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7618 			       mlxsw_sp_router_port(mlxsw_sp), false);
7619 err_fid_mc_flood_set:
7620 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7621 	return err;
7622 }
7623 
7624 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7625 {
7626 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7627 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7628 	struct mlxsw_sp_fid *fid = rif->fid;
7629 
7630 	mlxsw_sp_fid_rif_set(fid, NULL);
7631 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7632 			    mlxsw_sp_fid_index(fid), false);
7633 	mlxsw_sp_rif_macvlan_flush(rif);
7634 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7635 			       mlxsw_sp_router_port(mlxsw_sp), false);
7636 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7637 			       mlxsw_sp_router_port(mlxsw_sp), false);
7638 	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7639 }
7640 
7641 static struct mlxsw_sp_fid *
7642 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7643 			 struct netlink_ext_ack *extack)
7644 {
7645 	return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack);
7646 }
7647 
7648 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7649 {
7650 	struct switchdev_notifier_fdb_info info;
7651 	struct net_device *dev;
7652 
7653 	dev = br_fdb_find_port(rif->dev, mac, 0);
7654 	if (!dev)
7655 		return;
7656 
7657 	info.addr = mac;
7658 	info.vid = 0;
7659 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7660 				 NULL);
7661 }
7662 
7663 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7664 	.type			= MLXSW_SP_RIF_TYPE_FID,
7665 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7666 	.configure		= mlxsw_sp_rif_fid_configure,
7667 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
7668 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
7669 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
7670 };
7671 
7672 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7673 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
7674 	.rif_size		= sizeof(struct mlxsw_sp_rif),
7675 	.configure		= mlxsw_sp_rif_fid_configure,
7676 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
7677 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
7678 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
7679 };
7680 
7681 static struct mlxsw_sp_rif_ipip_lb *
7682 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7683 {
7684 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7685 }
7686 
7687 static void
7688 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7689 			   const struct mlxsw_sp_rif_params *params)
7690 {
7691 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7692 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
7693 
7694 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7695 				 common);
7696 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7697 	rif_lb->lb_config = params_lb->lb_config;
7698 }
7699 
7700 static int
7701 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7702 {
7703 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7704 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7705 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7706 	struct mlxsw_sp_vr *ul_vr;
7707 	int err;
7708 
7709 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7710 	if (IS_ERR(ul_vr))
7711 		return PTR_ERR(ul_vr);
7712 
7713 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
7714 	if (err)
7715 		goto err_loopback_op;
7716 
7717 	lb_rif->ul_vr_id = ul_vr->id;
7718 	lb_rif->ul_rif_id = 0;
7719 	++ul_vr->rif_count;
7720 	return 0;
7721 
7722 err_loopback_op:
7723 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7724 	return err;
7725 }
7726 
7727 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7728 {
7729 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7730 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7731 	struct mlxsw_sp_vr *ul_vr;
7732 
7733 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7734 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
7735 
7736 	--ul_vr->rif_count;
7737 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7738 }
7739 
7740 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
7741 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
7742 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
7743 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
7744 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
7745 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
7746 };
7747 
7748 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
7749 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
7750 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
7751 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
7752 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
7753 };
7754 
7755 static int
7756 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
7757 {
7758 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7759 	char ritr_pl[MLXSW_REG_RITR_LEN];
7760 
7761 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
7762 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
7763 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
7764 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
7765 
7766 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7767 }
7768 
7769 static struct mlxsw_sp_rif *
7770 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
7771 		       struct netlink_ext_ack *extack)
7772 {
7773 	struct mlxsw_sp_rif *ul_rif;
7774 	u16 rif_index;
7775 	int err;
7776 
7777 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
7778 	if (err) {
7779 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
7780 		return ERR_PTR(err);
7781 	}
7782 
7783 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
7784 	if (!ul_rif)
7785 		return ERR_PTR(-ENOMEM);
7786 
7787 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
7788 	ul_rif->mlxsw_sp = mlxsw_sp;
7789 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
7790 	if (err)
7791 		goto ul_rif_op_err;
7792 
7793 	return ul_rif;
7794 
7795 ul_rif_op_err:
7796 	mlxsw_sp->router->rifs[rif_index] = NULL;
7797 	kfree(ul_rif);
7798 	return ERR_PTR(err);
7799 }
7800 
7801 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
7802 {
7803 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7804 
7805 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
7806 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
7807 	kfree(ul_rif);
7808 }
7809 
7810 static struct mlxsw_sp_rif *
7811 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
7812 		    struct netlink_ext_ack *extack)
7813 {
7814 	struct mlxsw_sp_vr *vr;
7815 	int err;
7816 
7817 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
7818 	if (IS_ERR(vr))
7819 		return ERR_CAST(vr);
7820 
7821 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
7822 		return vr->ul_rif;
7823 
7824 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
7825 	if (IS_ERR(vr->ul_rif)) {
7826 		err = PTR_ERR(vr->ul_rif);
7827 		goto err_ul_rif_create;
7828 	}
7829 
7830 	vr->rif_count++;
7831 	refcount_set(&vr->ul_rif_refcnt, 1);
7832 
7833 	return vr->ul_rif;
7834 
7835 err_ul_rif_create:
7836 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7837 	return ERR_PTR(err);
7838 }
7839 
7840 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
7841 {
7842 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
7843 	struct mlxsw_sp_vr *vr;
7844 
7845 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
7846 
7847 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
7848 		return;
7849 
7850 	vr->rif_count--;
7851 	mlxsw_sp_ul_rif_destroy(ul_rif);
7852 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7853 }
7854 
7855 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
7856 			       u16 *ul_rif_index)
7857 {
7858 	struct mlxsw_sp_rif *ul_rif;
7859 
7860 	ASSERT_RTNL();
7861 
7862 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7863 	if (IS_ERR(ul_rif))
7864 		return PTR_ERR(ul_rif);
7865 	*ul_rif_index = ul_rif->rif_index;
7866 
7867 	return 0;
7868 }
7869 
7870 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
7871 {
7872 	struct mlxsw_sp_rif *ul_rif;
7873 
7874 	ASSERT_RTNL();
7875 
7876 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
7877 	if (WARN_ON(!ul_rif))
7878 		return;
7879 
7880 	mlxsw_sp_ul_rif_put(ul_rif);
7881 }
7882 
7883 static int
7884 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7885 {
7886 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7887 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7888 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7889 	struct mlxsw_sp_rif *ul_rif;
7890 	int err;
7891 
7892 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
7893 	if (IS_ERR(ul_rif))
7894 		return PTR_ERR(ul_rif);
7895 
7896 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
7897 	if (err)
7898 		goto err_loopback_op;
7899 
7900 	lb_rif->ul_vr_id = 0;
7901 	lb_rif->ul_rif_id = ul_rif->rif_index;
7902 
7903 	return 0;
7904 
7905 err_loopback_op:
7906 	mlxsw_sp_ul_rif_put(ul_rif);
7907 	return err;
7908 }
7909 
7910 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7911 {
7912 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7913 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7914 	struct mlxsw_sp_rif *ul_rif;
7915 
7916 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
7917 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
7918 	mlxsw_sp_ul_rif_put(ul_rif);
7919 }
7920 
7921 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
7922 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
7923 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
7924 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
7925 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
7926 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
7927 };
7928 
7929 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
7930 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
7931 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
7932 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
7933 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
7934 };
7935 
7936 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7937 {
7938 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7939 
7940 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
7941 					 sizeof(struct mlxsw_sp_rif *),
7942 					 GFP_KERNEL);
7943 	if (!mlxsw_sp->router->rifs)
7944 		return -ENOMEM;
7945 
7946 	return 0;
7947 }
7948 
7949 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7950 {
7951 	int i;
7952 
7953 	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7954 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7955 
7956 	kfree(mlxsw_sp->router->rifs);
7957 }
7958 
7959 static int
7960 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7961 {
7962 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7963 
7964 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7965 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7966 }
7967 
7968 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7969 {
7970 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
7971 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
7972 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
7973 }
7974 
7975 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7976 {
7977 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
7978 }
7979 
7980 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7981 {
7982 	struct mlxsw_sp_router *router;
7983 
7984 	/* Flush pending FIB notifications and then flush the device's
7985 	 * table before requesting another dump. The FIB notification
7986 	 * block is unregistered, so no need to take RTNL.
7987 	 */
7988 	mlxsw_core_flush_owq();
7989 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7990 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
7991 }
7992 
7993 #ifdef CONFIG_IP_ROUTE_MULTIPATH
7994 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7995 {
7996 	mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7997 }
7998 
7999 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
8000 {
8001 	mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
8002 }
8003 
8004 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8005 {
8006 	struct net *net = mlxsw_sp_net(mlxsw_sp);
8007 	bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
8008 
8009 	mlxsw_sp_mp_hash_header_set(recr2_pl,
8010 				    MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
8011 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
8012 	mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
8013 	mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
8014 	if (only_l3)
8015 		return;
8016 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
8017 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
8018 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
8019 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
8020 }
8021 
8022 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8023 {
8024 	bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
8025 
8026 	mlxsw_sp_mp_hash_header_set(recr2_pl,
8027 				    MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
8028 	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
8029 	mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
8030 	mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
8031 	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
8032 	if (only_l3) {
8033 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8034 					   MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
8035 	} else {
8036 		mlxsw_sp_mp_hash_header_set(recr2_pl,
8037 					    MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
8038 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8039 					   MLXSW_REG_RECR2_TCP_UDP_SPORT);
8040 		mlxsw_sp_mp_hash_field_set(recr2_pl,
8041 					   MLXSW_REG_RECR2_TCP_UDP_DPORT);
8042 	}
8043 }
8044 
8045 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8046 {
8047 	char recr2_pl[MLXSW_REG_RECR2_LEN];
8048 	u32 seed;
8049 
8050 	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
8051 	mlxsw_reg_recr2_pack(recr2_pl, seed);
8052 	mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
8053 	mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8054 
8055 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
8056 }
8057 #else
8058 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8059 {
8060 	return 0;
8061 }
8062 #endif
8063 
8064 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
8065 {
8066 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
8067 	unsigned int i;
8068 
8069 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
8070 
8071 	/* HW is determining switch priority based on DSCP-bits, but the
8072 	 * kernel is still doing that based on the ToS. Since there's a
8073 	 * mismatch in bits we need to make sure to translate the right
8074 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
8075 	 */
8076 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
8077 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
8078 
8079 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
8080 }
8081 
8082 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
8083 {
8084 	struct net *net = mlxsw_sp_net(mlxsw_sp);
8085 	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8086 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
8087 	u64 max_rifs;
8088 	int err;
8089 
8090 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
8091 		return -EIO;
8092 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8093 
8094 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8095 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8096 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8097 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8098 	if (err)
8099 		return err;
8100 	return 0;
8101 }
8102 
8103 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8104 {
8105 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
8106 
8107 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8108 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8109 }
8110 
8111 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
8112 			 struct netlink_ext_ack *extack)
8113 {
8114 	struct mlxsw_sp_router *router;
8115 	int err;
8116 
8117 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
8118 	if (!router)
8119 		return -ENOMEM;
8120 	mlxsw_sp->router = router;
8121 	router->mlxsw_sp = mlxsw_sp;
8122 
8123 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
8124 	err = register_inetaddr_notifier(&router->inetaddr_nb);
8125 	if (err)
8126 		goto err_register_inetaddr_notifier;
8127 
8128 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
8129 	err = register_inet6addr_notifier(&router->inet6addr_nb);
8130 	if (err)
8131 		goto err_register_inet6addr_notifier;
8132 
8133 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8134 	err = __mlxsw_sp_router_init(mlxsw_sp);
8135 	if (err)
8136 		goto err_router_init;
8137 
8138 	err = mlxsw_sp_rifs_init(mlxsw_sp);
8139 	if (err)
8140 		goto err_rifs_init;
8141 
8142 	err = mlxsw_sp_ipips_init(mlxsw_sp);
8143 	if (err)
8144 		goto err_ipips_init;
8145 
8146 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8147 			      &mlxsw_sp_nexthop_ht_params);
8148 	if (err)
8149 		goto err_nexthop_ht_init;
8150 
8151 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8152 			      &mlxsw_sp_nexthop_group_ht_params);
8153 	if (err)
8154 		goto err_nexthop_group_ht_init;
8155 
8156 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8157 	err = mlxsw_sp_lpm_init(mlxsw_sp);
8158 	if (err)
8159 		goto err_lpm_init;
8160 
8161 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
8162 	if (err)
8163 		goto err_mr_init;
8164 
8165 	err = mlxsw_sp_vrs_init(mlxsw_sp);
8166 	if (err)
8167 		goto err_vrs_init;
8168 
8169 	err = mlxsw_sp_neigh_init(mlxsw_sp);
8170 	if (err)
8171 		goto err_neigh_init;
8172 
8173 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
8174 				  &router->adj_discard_index);
8175 	if (err)
8176 		goto err_adj_discard_index_alloc;
8177 
8178 	mlxsw_sp->router->netevent_nb.notifier_call =
8179 		mlxsw_sp_router_netevent_event;
8180 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8181 	if (err)
8182 		goto err_register_netevent_notifier;
8183 
8184 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
8185 	if (err)
8186 		goto err_mp_hash_init;
8187 
8188 	err = mlxsw_sp_dscp_init(mlxsw_sp);
8189 	if (err)
8190 		goto err_dscp_init;
8191 
8192 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8193 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8194 				    &mlxsw_sp->router->fib_nb,
8195 				    mlxsw_sp_router_fib_dump_flush, extack);
8196 	if (err)
8197 		goto err_register_fib_notifier;
8198 
8199 	return 0;
8200 
8201 err_register_fib_notifier:
8202 err_dscp_init:
8203 err_mp_hash_init:
8204 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8205 err_register_netevent_notifier:
8206 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
8207 			   router->adj_discard_index);
8208 err_adj_discard_index_alloc:
8209 	mlxsw_sp_neigh_fini(mlxsw_sp);
8210 err_neigh_init:
8211 	mlxsw_sp_vrs_fini(mlxsw_sp);
8212 err_vrs_init:
8213 	mlxsw_sp_mr_fini(mlxsw_sp);
8214 err_mr_init:
8215 	mlxsw_sp_lpm_fini(mlxsw_sp);
8216 err_lpm_init:
8217 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8218 err_nexthop_group_ht_init:
8219 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8220 err_nexthop_ht_init:
8221 	mlxsw_sp_ipips_fini(mlxsw_sp);
8222 err_ipips_init:
8223 	mlxsw_sp_rifs_fini(mlxsw_sp);
8224 err_rifs_init:
8225 	__mlxsw_sp_router_fini(mlxsw_sp);
8226 err_router_init:
8227 	unregister_inet6addr_notifier(&router->inet6addr_nb);
8228 err_register_inet6addr_notifier:
8229 	unregister_inetaddr_notifier(&router->inetaddr_nb);
8230 err_register_inetaddr_notifier:
8231 	kfree(mlxsw_sp->router);
8232 	return err;
8233 }
8234 
8235 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8236 {
8237 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8238 				&mlxsw_sp->router->fib_nb);
8239 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8240 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
8241 			   mlxsw_sp->router->adj_discard_index);
8242 	mlxsw_sp_neigh_fini(mlxsw_sp);
8243 	mlxsw_sp_vrs_fini(mlxsw_sp);
8244 	mlxsw_sp_mr_fini(mlxsw_sp);
8245 	mlxsw_sp_lpm_fini(mlxsw_sp);
8246 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8247 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8248 	mlxsw_sp_ipips_fini(mlxsw_sp);
8249 	mlxsw_sp_rifs_fini(mlxsw_sp);
8250 	__mlxsw_sp_router_fini(mlxsw_sp);
8251 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
8252 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
8253 	kfree(mlxsw_sp->router);
8254 }
8255