xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c (revision f2161d5f1aae21a42b0a64d87e10cb31db423f42)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <linux/xarray.h>
23 #include <net/netevent.h>
24 #include <net/neighbour.h>
25 #include <net/arp.h>
26 #include <net/inet_dscp.h>
27 #include <net/ip_fib.h>
28 #include <net/ip6_fib.h>
29 #include <net/nexthop.h>
30 #include <net/fib_rules.h>
31 #include <net/ip_tunnels.h>
32 #include <net/l3mdev.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ipv6.h>
36 #include <net/fib_notifier.h>
37 #include <net/switchdev.h>
38 
39 #include "spectrum.h"
40 #include "core.h"
41 #include "reg.h"
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_ipip.h"
45 #include "spectrum_mr.h"
46 #include "spectrum_mr_tcam.h"
47 #include "spectrum_router.h"
48 #include "spectrum_span.h"
49 
50 struct mlxsw_sp_fib;
51 struct mlxsw_sp_vr;
52 struct mlxsw_sp_lpm_tree;
53 struct mlxsw_sp_rif_ops;
54 
55 struct mlxsw_sp_crif_key {
56 	struct net_device *dev;
57 };
58 
59 struct mlxsw_sp_crif {
60 	struct mlxsw_sp_crif_key key;
61 	struct rhash_head ht_node;
62 	bool can_destroy;
63 	struct list_head nexthop_list;
64 	struct mlxsw_sp_rif *rif;
65 };
66 
67 static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
68 	.key_offset = offsetof(struct mlxsw_sp_crif, key),
69 	.key_len = sizeof_field(struct mlxsw_sp_crif, key),
70 	.head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
71 };
72 
73 struct mlxsw_sp_rif {
74 	struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
75 	netdevice_tracker dev_tracker;
76 	struct list_head neigh_list;
77 	struct mlxsw_sp_fid *fid;
78 	unsigned char addr[ETH_ALEN];
79 	int mtu;
80 	u16 rif_index;
81 	u8 mac_profile_id;
82 	u8 rif_entries;
83 	u16 vr_id;
84 	const struct mlxsw_sp_rif_ops *ops;
85 	struct mlxsw_sp *mlxsw_sp;
86 
87 	unsigned int counter_ingress;
88 	bool counter_ingress_valid;
89 	unsigned int counter_egress;
90 	bool counter_egress_valid;
91 };
92 
mlxsw_sp_rif_dev(const struct mlxsw_sp_rif * rif)93 static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
94 {
95 	if (!rif->crif)
96 		return NULL;
97 	return rif->crif->key.dev;
98 }
99 
100 struct mlxsw_sp_rif_params {
101 	struct net_device *dev;
102 	union {
103 		u16 system_port;
104 		u16 lag_id;
105 	};
106 	u16 vid;
107 	bool lag;
108 	bool double_entry;
109 };
110 
111 struct mlxsw_sp_rif_subport {
112 	struct mlxsw_sp_rif common;
113 	refcount_t ref_count;
114 	union {
115 		u16 system_port;
116 		u16 lag_id;
117 	};
118 	u16 vid;
119 	bool lag;
120 };
121 
122 struct mlxsw_sp_rif_ipip_lb {
123 	struct mlxsw_sp_rif common;
124 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
125 	u16 ul_vr_id;	/* Spectrum-1. */
126 	u16 ul_rif_id;	/* Spectrum-2+. */
127 };
128 
129 struct mlxsw_sp_rif_params_ipip_lb {
130 	struct mlxsw_sp_rif_params common;
131 	struct mlxsw_sp_rif_ipip_lb_config lb_config;
132 };
133 
134 struct mlxsw_sp_rif_ops {
135 	enum mlxsw_sp_rif_type type;
136 	size_t rif_size;
137 
138 	void (*setup)(struct mlxsw_sp_rif *rif,
139 		      const struct mlxsw_sp_rif_params *params);
140 	int (*configure)(struct mlxsw_sp_rif *rif,
141 			 struct netlink_ext_ack *extack);
142 	void (*deconfigure)(struct mlxsw_sp_rif *rif);
143 	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
144 					 const struct mlxsw_sp_rif_params *params,
145 					 struct netlink_ext_ack *extack);
146 	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
147 };
148 
149 struct mlxsw_sp_rif_mac_profile {
150 	unsigned char mac_prefix[ETH_ALEN];
151 	refcount_t ref_count;
152 	u8 id;
153 };
154 
155 struct mlxsw_sp_router_ops {
156 	int (*init)(struct mlxsw_sp *mlxsw_sp);
157 	int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
158 };
159 
160 static struct mlxsw_sp_rif *
161 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
162 			 const struct net_device *dev);
163 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
164 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
165 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
166 				  struct mlxsw_sp_lpm_tree *lpm_tree);
167 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
168 				     const struct mlxsw_sp_fib *fib,
169 				     u8 tree_id);
170 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
171 				       const struct mlxsw_sp_fib *fib);
172 
173 static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)174 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
175 			   enum mlxsw_sp_rif_counter_dir dir)
176 {
177 	switch (dir) {
178 	case MLXSW_SP_RIF_COUNTER_EGRESS:
179 		return &rif->counter_egress;
180 	case MLXSW_SP_RIF_COUNTER_INGRESS:
181 		return &rif->counter_ingress;
182 	}
183 	return NULL;
184 }
185 
186 static bool
mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)187 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
188 			       enum mlxsw_sp_rif_counter_dir dir)
189 {
190 	switch (dir) {
191 	case MLXSW_SP_RIF_COUNTER_EGRESS:
192 		return rif->counter_egress_valid;
193 	case MLXSW_SP_RIF_COUNTER_INGRESS:
194 		return rif->counter_ingress_valid;
195 	}
196 	return false;
197 }
198 
199 static void
mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,bool valid)200 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
201 			       enum mlxsw_sp_rif_counter_dir dir,
202 			       bool valid)
203 {
204 	switch (dir) {
205 	case MLXSW_SP_RIF_COUNTER_EGRESS:
206 		rif->counter_egress_valid = valid;
207 		break;
208 	case MLXSW_SP_RIF_COUNTER_INGRESS:
209 		rif->counter_ingress_valid = valid;
210 		break;
211 	}
212 }
213 
mlxsw_sp_rif_counter_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,unsigned int counter_index,bool enable,enum mlxsw_sp_rif_counter_dir dir)214 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
215 				     unsigned int counter_index, bool enable,
216 				     enum mlxsw_sp_rif_counter_dir dir)
217 {
218 	char ritr_pl[MLXSW_REG_RITR_LEN];
219 	bool is_egress = false;
220 	int err;
221 
222 	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
223 		is_egress = true;
224 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
225 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
226 	if (err)
227 		return err;
228 
229 	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
230 				    is_egress);
231 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
232 }
233 
mlxsw_sp_rif_counter_value_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,u64 * cnt)234 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
235 				   struct mlxsw_sp_rif *rif,
236 				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
237 {
238 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
239 	unsigned int *p_counter_index;
240 	bool valid;
241 	int err;
242 
243 	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
244 	if (!valid)
245 		return -EINVAL;
246 
247 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
248 	if (!p_counter_index)
249 		return -EINVAL;
250 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
251 			     MLXSW_REG_RICNT_OPCODE_NOP);
252 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
253 	if (err)
254 		return err;
255 	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
256 	return 0;
257 }
258 
259 struct mlxsw_sp_rif_counter_set_basic {
260 	u64 good_unicast_packets;
261 	u64 good_multicast_packets;
262 	u64 good_broadcast_packets;
263 	u64 good_unicast_bytes;
264 	u64 good_multicast_bytes;
265 	u64 good_broadcast_bytes;
266 	u64 error_packets;
267 	u64 discard_packets;
268 	u64 error_bytes;
269 	u64 discard_bytes;
270 };
271 
272 static int
mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,struct mlxsw_sp_rif_counter_set_basic * set)273 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
274 				 enum mlxsw_sp_rif_counter_dir dir,
275 				 struct mlxsw_sp_rif_counter_set_basic *set)
276 {
277 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
278 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
279 	unsigned int *p_counter_index;
280 	int err;
281 
282 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
283 		return -EINVAL;
284 
285 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
286 	if (!p_counter_index)
287 		return -EINVAL;
288 
289 	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
290 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
291 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
292 	if (err)
293 		return err;
294 
295 	if (!set)
296 		return 0;
297 
298 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME)				\
299 		(set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
300 
301 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
302 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
303 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
304 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
305 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
306 	MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
307 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
308 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
309 	MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
310 	MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
311 
312 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
313 
314 	return 0;
315 }
316 
mlxsw_sp_rif_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)317 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
318 				      unsigned int counter_index)
319 {
320 	char ricnt_pl[MLXSW_REG_RICNT_LEN];
321 
322 	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
323 			     MLXSW_REG_RICNT_OPCODE_CLEAR);
324 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
325 }
326 
mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)327 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
328 			       enum mlxsw_sp_rif_counter_dir dir)
329 {
330 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
331 	unsigned int *p_counter_index;
332 	int err;
333 
334 	if (mlxsw_sp_rif_counter_valid_get(rif, dir))
335 		return 0;
336 
337 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
338 	if (!p_counter_index)
339 		return -EINVAL;
340 
341 	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
342 				     p_counter_index);
343 	if (err)
344 		return err;
345 
346 	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
347 	if (err)
348 		goto err_counter_clear;
349 
350 	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
351 					*p_counter_index, true, dir);
352 	if (err)
353 		goto err_counter_edit;
354 	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
355 	return 0;
356 
357 err_counter_edit:
358 err_counter_clear:
359 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
360 			      *p_counter_index);
361 	return err;
362 }
363 
mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)364 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
365 			       enum mlxsw_sp_rif_counter_dir dir)
366 {
367 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
368 	unsigned int *p_counter_index;
369 
370 	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
371 		return;
372 
373 	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
374 	if (WARN_ON(!p_counter_index))
375 		return;
376 	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
377 				  *p_counter_index, false, dir);
378 	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
379 			      *p_counter_index);
380 	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
381 }
382 
mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif * rif)383 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
384 {
385 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
386 	struct devlink *devlink;
387 
388 	devlink = priv_to_devlink(mlxsw_sp->core);
389 	if (!devlink_dpipe_table_counter_enabled(devlink,
390 						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
391 		return;
392 	mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
393 }
394 
mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif * rif)395 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
396 {
397 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
398 }
399 
400 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
401 
402 struct mlxsw_sp_prefix_usage {
403 	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
404 };
405 
406 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
407 	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
408 
409 static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)410 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
411 			 struct mlxsw_sp_prefix_usage *prefix_usage2)
412 {
413 	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
414 }
415 
416 static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)417 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
418 			  struct mlxsw_sp_prefix_usage *prefix_usage2)
419 {
420 	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
421 }
422 
423 static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)424 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
425 			  unsigned char prefix_len)
426 {
427 	set_bit(prefix_len, prefix_usage->b);
428 }
429 
430 static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)431 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
432 			    unsigned char prefix_len)
433 {
434 	clear_bit(prefix_len, prefix_usage->b);
435 }
436 
437 struct mlxsw_sp_fib_key {
438 	unsigned char addr[sizeof(struct in6_addr)];
439 	unsigned char prefix_len;
440 };
441 
442 enum mlxsw_sp_fib_entry_type {
443 	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
444 	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
445 	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
446 	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
447 	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
448 
449 	/* This is a special case of local delivery, where a packet should be
450 	 * decapsulated on reception. Note that there is no corresponding ENCAP,
451 	 * because that's a type of next hop, not of FIB entry. (There can be
452 	 * several next hops in a REMOTE entry, and some of them may be
453 	 * encapsulating entries.)
454 	 */
455 	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
456 	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
457 };
458 
459 struct mlxsw_sp_nexthop_group_info;
460 struct mlxsw_sp_nexthop_group;
461 struct mlxsw_sp_fib_entry;
462 
463 struct mlxsw_sp_fib_node {
464 	struct mlxsw_sp_fib_entry *fib_entry;
465 	struct list_head list;
466 	struct rhash_head ht_node;
467 	struct mlxsw_sp_fib *fib;
468 	struct mlxsw_sp_fib_key key;
469 };
470 
471 struct mlxsw_sp_fib_entry_decap {
472 	struct mlxsw_sp_ipip_entry *ipip_entry;
473 	u32 tunnel_index;
474 };
475 
476 struct mlxsw_sp_fib_entry {
477 	struct mlxsw_sp_fib_node *fib_node;
478 	enum mlxsw_sp_fib_entry_type type;
479 	struct list_head nexthop_group_node;
480 	struct mlxsw_sp_nexthop_group *nh_group;
481 	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
482 };
483 
484 struct mlxsw_sp_fib4_entry {
485 	struct mlxsw_sp_fib_entry common;
486 	struct fib_info *fi;
487 	u32 tb_id;
488 	dscp_t dscp;
489 	u8 type;
490 };
491 
492 struct mlxsw_sp_fib6_entry {
493 	struct mlxsw_sp_fib_entry common;
494 	struct list_head rt6_list;
495 	unsigned int nrt6;
496 };
497 
498 struct mlxsw_sp_rt6 {
499 	struct list_head list;
500 	struct fib6_info *rt;
501 };
502 
503 struct mlxsw_sp_lpm_tree {
504 	u8 id; /* tree ID */
505 	refcount_t ref_count;
506 	enum mlxsw_sp_l3proto proto;
507 	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
508 	struct mlxsw_sp_prefix_usage prefix_usage;
509 };
510 
511 struct mlxsw_sp_fib {
512 	struct rhashtable ht;
513 	struct list_head node_list;
514 	struct mlxsw_sp_vr *vr;
515 	struct mlxsw_sp_lpm_tree *lpm_tree;
516 	enum mlxsw_sp_l3proto proto;
517 };
518 
519 struct mlxsw_sp_vr {
520 	u16 id; /* virtual router ID */
521 	u32 tb_id; /* kernel fib table id */
522 	unsigned int rif_count;
523 	struct mlxsw_sp_fib *fib4;
524 	struct mlxsw_sp_fib *fib6;
525 	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
526 	struct mlxsw_sp_rif *ul_rif;
527 	refcount_t ul_rif_refcnt;
528 };
529 
530 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
531 
mlxsw_sp_fib_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)532 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
533 						struct mlxsw_sp_vr *vr,
534 						enum mlxsw_sp_l3proto proto)
535 {
536 	struct mlxsw_sp_lpm_tree *lpm_tree;
537 	struct mlxsw_sp_fib *fib;
538 	int err;
539 
540 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
541 	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
542 	if (!fib)
543 		return ERR_PTR(-ENOMEM);
544 	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
545 	if (err)
546 		goto err_rhashtable_init;
547 	INIT_LIST_HEAD(&fib->node_list);
548 	fib->proto = proto;
549 	fib->vr = vr;
550 	fib->lpm_tree = lpm_tree;
551 	mlxsw_sp_lpm_tree_hold(lpm_tree);
552 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
553 	if (err)
554 		goto err_lpm_tree_bind;
555 	return fib;
556 
557 err_lpm_tree_bind:
558 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
559 err_rhashtable_init:
560 	kfree(fib);
561 	return ERR_PTR(err);
562 }
563 
mlxsw_sp_fib_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib)564 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
565 				 struct mlxsw_sp_fib *fib)
566 {
567 	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
568 	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
569 	WARN_ON(!list_empty(&fib->node_list));
570 	rhashtable_destroy(&fib->ht);
571 	kfree(fib);
572 }
573 
574 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp * mlxsw_sp)575 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
576 {
577 	static struct mlxsw_sp_lpm_tree *lpm_tree;
578 	int i;
579 
580 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
581 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
582 		if (refcount_read(&lpm_tree->ref_count) == 0)
583 			return lpm_tree;
584 	}
585 	return NULL;
586 }
587 
mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)588 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
589 				   struct mlxsw_sp_lpm_tree *lpm_tree)
590 {
591 	char ralta_pl[MLXSW_REG_RALTA_LEN];
592 
593 	mlxsw_reg_ralta_pack(ralta_pl, true,
594 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
595 			     lpm_tree->id);
596 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
597 }
598 
mlxsw_sp_lpm_tree_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)599 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
600 				   struct mlxsw_sp_lpm_tree *lpm_tree)
601 {
602 	char ralta_pl[MLXSW_REG_RALTA_LEN];
603 
604 	mlxsw_reg_ralta_pack(ralta_pl, false,
605 			     (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
606 			     lpm_tree->id);
607 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
608 }
609 
610 static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,struct mlxsw_sp_lpm_tree * lpm_tree)611 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
612 				  struct mlxsw_sp_prefix_usage *prefix_usage,
613 				  struct mlxsw_sp_lpm_tree *lpm_tree)
614 {
615 	char ralst_pl[MLXSW_REG_RALST_LEN];
616 	u8 root_bin = 0;
617 	u8 prefix;
618 	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
619 
620 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
621 		root_bin = prefix;
622 
623 	mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
624 	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
625 		if (prefix == 0)
626 			continue;
627 		mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
628 					 MLXSW_REG_RALST_BIN_NO_CHILD);
629 		last_prefix = prefix;
630 	}
631 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
632 }
633 
634 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)635 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
636 			 struct mlxsw_sp_prefix_usage *prefix_usage,
637 			 enum mlxsw_sp_l3proto proto)
638 {
639 	struct mlxsw_sp_lpm_tree *lpm_tree;
640 	int err;
641 
642 	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
643 	if (!lpm_tree)
644 		return ERR_PTR(-EBUSY);
645 	lpm_tree->proto = proto;
646 	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
647 	if (err)
648 		return ERR_PTR(err);
649 
650 	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
651 						lpm_tree);
652 	if (err)
653 		goto err_left_struct_set;
654 	memcpy(&lpm_tree->prefix_usage, prefix_usage,
655 	       sizeof(lpm_tree->prefix_usage));
656 	memset(&lpm_tree->prefix_ref_count, 0,
657 	       sizeof(lpm_tree->prefix_ref_count));
658 	refcount_set(&lpm_tree->ref_count, 1);
659 	return lpm_tree;
660 
661 err_left_struct_set:
662 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
663 	return ERR_PTR(err);
664 }
665 
mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)666 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
667 				      struct mlxsw_sp_lpm_tree *lpm_tree)
668 {
669 	mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
670 }
671 
672 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)673 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
674 		      struct mlxsw_sp_prefix_usage *prefix_usage,
675 		      enum mlxsw_sp_l3proto proto)
676 {
677 	struct mlxsw_sp_lpm_tree *lpm_tree;
678 	int i;
679 
680 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
681 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
682 		if (refcount_read(&lpm_tree->ref_count) &&
683 		    lpm_tree->proto == proto &&
684 		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
685 					     prefix_usage)) {
686 			mlxsw_sp_lpm_tree_hold(lpm_tree);
687 			return lpm_tree;
688 		}
689 	}
690 	return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
691 }
692 
mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree * lpm_tree)693 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
694 {
695 	refcount_inc(&lpm_tree->ref_count);
696 }
697 
mlxsw_sp_lpm_tree_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)698 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
699 				  struct mlxsw_sp_lpm_tree *lpm_tree)
700 {
701 	if (!refcount_dec_and_test(&lpm_tree->ref_count))
702 		return;
703 	mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
704 }
705 
706 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
707 
mlxsw_sp_lpm_init(struct mlxsw_sp * mlxsw_sp)708 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
709 {
710 	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
711 	struct mlxsw_sp_lpm_tree *lpm_tree;
712 	u64 max_trees;
713 	int err, i;
714 
715 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
716 		return -EIO;
717 
718 	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
719 	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
720 	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
721 					     sizeof(struct mlxsw_sp_lpm_tree),
722 					     GFP_KERNEL);
723 	if (!mlxsw_sp->router->lpm.trees)
724 		return -ENOMEM;
725 
726 	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
727 		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
728 		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
729 	}
730 
731 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
732 					 MLXSW_SP_L3_PROTO_IPV4);
733 	if (IS_ERR(lpm_tree)) {
734 		err = PTR_ERR(lpm_tree);
735 		goto err_ipv4_tree_get;
736 	}
737 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
738 
739 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
740 					 MLXSW_SP_L3_PROTO_IPV6);
741 	if (IS_ERR(lpm_tree)) {
742 		err = PTR_ERR(lpm_tree);
743 		goto err_ipv6_tree_get;
744 	}
745 	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
746 
747 	return 0;
748 
749 err_ipv6_tree_get:
750 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
751 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
752 err_ipv4_tree_get:
753 	kfree(mlxsw_sp->router->lpm.trees);
754 	return err;
755 }
756 
mlxsw_sp_lpm_fini(struct mlxsw_sp * mlxsw_sp)757 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
758 {
759 	struct mlxsw_sp_lpm_tree *lpm_tree;
760 
761 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
762 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
763 
764 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
765 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
766 
767 	kfree(mlxsw_sp->router->lpm.trees);
768 }
769 
mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr * vr)770 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
771 {
772 	return !!vr->fib4 || !!vr->fib6 ||
773 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
774 	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
775 }
776 
mlxsw_sp_vr_find_unused(struct mlxsw_sp * mlxsw_sp)777 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
778 {
779 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
780 	struct mlxsw_sp_vr *vr;
781 	int i;
782 
783 	for (i = 0; i < max_vrs; i++) {
784 		vr = &mlxsw_sp->router->vrs[i];
785 		if (!mlxsw_sp_vr_is_used(vr))
786 			return vr;
787 	}
788 	return NULL;
789 }
790 
mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib,u8 tree_id)791 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
792 				     const struct mlxsw_sp_fib *fib, u8 tree_id)
793 {
794 	char raltb_pl[MLXSW_REG_RALTB_LEN];
795 
796 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
797 			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
798 			     tree_id);
799 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
800 }
801 
mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib)802 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
803 				       const struct mlxsw_sp_fib *fib)
804 {
805 	char raltb_pl[MLXSW_REG_RALTB_LEN];
806 
807 	/* Bind to tree 0 which is default */
808 	mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
809 			     (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
810 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
811 }
812 
mlxsw_sp_fix_tb_id(u32 tb_id)813 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
814 {
815 	/* For our purpose, squash main, default and local tables into one */
816 	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
817 		tb_id = RT_TABLE_MAIN;
818 	return tb_id;
819 }
820 
mlxsw_sp_vr_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id)821 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
822 					    u32 tb_id)
823 {
824 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
825 	struct mlxsw_sp_vr *vr;
826 	int i;
827 
828 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
829 
830 	for (i = 0; i < max_vrs; i++) {
831 		vr = &mlxsw_sp->router->vrs[i];
832 		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
833 			return vr;
834 	}
835 	return NULL;
836 }
837 
mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp * mlxsw_sp,u32 tb_id,u16 * vr_id)838 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
839 				u16 *vr_id)
840 {
841 	struct mlxsw_sp_vr *vr;
842 	int err = 0;
843 
844 	mutex_lock(&mlxsw_sp->router->lock);
845 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
846 	if (!vr) {
847 		err = -ESRCH;
848 		goto out;
849 	}
850 	*vr_id = vr->id;
851 out:
852 	mutex_unlock(&mlxsw_sp->router->lock);
853 	return err;
854 }
855 
mlxsw_sp_vr_fib(const struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)856 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
857 					    enum mlxsw_sp_l3proto proto)
858 {
859 	switch (proto) {
860 	case MLXSW_SP_L3_PROTO_IPV4:
861 		return vr->fib4;
862 	case MLXSW_SP_L3_PROTO_IPV6:
863 		return vr->fib6;
864 	}
865 	return NULL;
866 }
867 
mlxsw_sp_vr_create(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)868 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
869 					      u32 tb_id,
870 					      struct netlink_ext_ack *extack)
871 {
872 	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
873 	struct mlxsw_sp_fib *fib4;
874 	struct mlxsw_sp_fib *fib6;
875 	struct mlxsw_sp_vr *vr;
876 	int err;
877 
878 	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
879 	if (!vr) {
880 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
881 		return ERR_PTR(-EBUSY);
882 	}
883 	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
884 	if (IS_ERR(fib4))
885 		return ERR_CAST(fib4);
886 	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
887 	if (IS_ERR(fib6)) {
888 		err = PTR_ERR(fib6);
889 		goto err_fib6_create;
890 	}
891 	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
892 					     MLXSW_SP_L3_PROTO_IPV4);
893 	if (IS_ERR(mr4_table)) {
894 		err = PTR_ERR(mr4_table);
895 		goto err_mr4_table_create;
896 	}
897 	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
898 					     MLXSW_SP_L3_PROTO_IPV6);
899 	if (IS_ERR(mr6_table)) {
900 		err = PTR_ERR(mr6_table);
901 		goto err_mr6_table_create;
902 	}
903 
904 	vr->fib4 = fib4;
905 	vr->fib6 = fib6;
906 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
907 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
908 	vr->tb_id = tb_id;
909 	return vr;
910 
911 err_mr6_table_create:
912 	mlxsw_sp_mr_table_destroy(mr4_table);
913 err_mr4_table_create:
914 	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
915 err_fib6_create:
916 	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
917 	return ERR_PTR(err);
918 }
919 
mlxsw_sp_vr_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)920 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
921 				struct mlxsw_sp_vr *vr)
922 {
923 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
924 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
925 	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
926 	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
927 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
928 	vr->fib6 = NULL;
929 	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
930 	vr->fib4 = NULL;
931 }
932 
mlxsw_sp_vr_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)933 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
934 					   struct netlink_ext_ack *extack)
935 {
936 	struct mlxsw_sp_vr *vr;
937 
938 	tb_id = mlxsw_sp_fix_tb_id(tb_id);
939 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
940 	if (!vr)
941 		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
942 	return vr;
943 }
944 
mlxsw_sp_vr_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)945 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
946 {
947 	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
948 	    list_empty(&vr->fib6->node_list) &&
949 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
950 	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
951 		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
952 }
953 
954 static bool
mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto,u8 tree_id)955 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
956 				    enum mlxsw_sp_l3proto proto, u8 tree_id)
957 {
958 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
959 
960 	if (!mlxsw_sp_vr_is_used(vr))
961 		return false;
962 	if (fib->lpm_tree->id == tree_id)
963 		return true;
964 	return false;
965 }
966 
mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)967 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
968 					struct mlxsw_sp_fib *fib,
969 					struct mlxsw_sp_lpm_tree *new_tree)
970 {
971 	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
972 	int err;
973 
974 	fib->lpm_tree = new_tree;
975 	mlxsw_sp_lpm_tree_hold(new_tree);
976 	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
977 	if (err)
978 		goto err_tree_bind;
979 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
980 	return 0;
981 
982 err_tree_bind:
983 	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
984 	fib->lpm_tree = old_tree;
985 	return err;
986 }
987 
mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)988 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
989 					 struct mlxsw_sp_fib *fib,
990 					 struct mlxsw_sp_lpm_tree *new_tree)
991 {
992 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
993 	enum mlxsw_sp_l3proto proto = fib->proto;
994 	struct mlxsw_sp_lpm_tree *old_tree;
995 	u8 old_id, new_id = new_tree->id;
996 	struct mlxsw_sp_vr *vr;
997 	int i, err;
998 
999 	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
1000 	old_id = old_tree->id;
1001 
1002 	for (i = 0; i < max_vrs; i++) {
1003 		vr = &mlxsw_sp->router->vrs[i];
1004 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1005 			continue;
1006 		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1007 						   mlxsw_sp_vr_fib(vr, proto),
1008 						   new_tree);
1009 		if (err)
1010 			goto err_tree_replace;
1011 	}
1012 
1013 	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1014 	       sizeof(new_tree->prefix_ref_count));
1015 	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1016 	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1017 
1018 	return 0;
1019 
1020 err_tree_replace:
1021 	for (i--; i >= 0; i--) {
1022 		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1023 			continue;
1024 		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1025 					     mlxsw_sp_vr_fib(vr, proto),
1026 					     old_tree);
1027 	}
1028 	return err;
1029 }
1030 
mlxsw_sp_vrs_init(struct mlxsw_sp * mlxsw_sp)1031 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1032 {
1033 	struct mlxsw_sp_vr *vr;
1034 	u64 max_vrs;
1035 	int i;
1036 
1037 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1038 		return -EIO;
1039 
1040 	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1041 	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1042 					GFP_KERNEL);
1043 	if (!mlxsw_sp->router->vrs)
1044 		return -ENOMEM;
1045 
1046 	for (i = 0; i < max_vrs; i++) {
1047 		vr = &mlxsw_sp->router->vrs[i];
1048 		vr->id = i;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1055 
mlxsw_sp_vrs_fini(struct mlxsw_sp * mlxsw_sp)1056 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1057 {
1058 	/* At this stage we're guaranteed not to have new incoming
1059 	 * FIB notifications and the work queue is free from FIBs
1060 	 * sitting on top of mlxsw netdevs. However, we can still
1061 	 * have other FIBs queued. Flush the queue before flushing
1062 	 * the device's tables. No need for locks, as we're the only
1063 	 * writer.
1064 	 */
1065 	mlxsw_core_flush_owq();
1066 	mlxsw_sp_router_fib_flush(mlxsw_sp);
1067 	kfree(mlxsw_sp->router->vrs);
1068 }
1069 
mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device * ol_dev)1070 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1071 {
1072 	struct net_device *d;
1073 	u32 tb_id;
1074 
1075 	rcu_read_lock();
1076 	d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1077 	if (d)
1078 		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1079 	else
1080 		tb_id = RT_TABLE_MAIN;
1081 	rcu_read_unlock();
1082 
1083 	return tb_id;
1084 }
1085 
1086 static void
mlxsw_sp_crif_init(struct mlxsw_sp_crif * crif,struct net_device * dev)1087 mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
1088 {
1089 	crif->key.dev = dev;
1090 	INIT_LIST_HEAD(&crif->nexthop_list);
1091 }
1092 
1093 static struct mlxsw_sp_crif *
mlxsw_sp_crif_alloc(struct net_device * dev)1094 mlxsw_sp_crif_alloc(struct net_device *dev)
1095 {
1096 	struct mlxsw_sp_crif *crif;
1097 
1098 	crif = kzalloc(sizeof(*crif), GFP_KERNEL);
1099 	if (!crif)
1100 		return NULL;
1101 
1102 	mlxsw_sp_crif_init(crif, dev);
1103 	return crif;
1104 }
1105 
mlxsw_sp_crif_free(struct mlxsw_sp_crif * crif)1106 static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
1107 {
1108 	if (WARN_ON(crif->rif))
1109 		return;
1110 
1111 	WARN_ON(!list_empty(&crif->nexthop_list));
1112 	kfree(crif);
1113 }
1114 
mlxsw_sp_crif_insert(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)1115 static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
1116 				struct mlxsw_sp_crif *crif)
1117 {
1118 	return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
1119 				      mlxsw_sp_crif_ht_params);
1120 }
1121 
mlxsw_sp_crif_remove(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)1122 static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
1123 				 struct mlxsw_sp_crif *crif)
1124 {
1125 	rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
1126 			       mlxsw_sp_crif_ht_params);
1127 }
1128 
1129 static struct mlxsw_sp_crif *
mlxsw_sp_crif_lookup(struct mlxsw_sp_router * router,const struct net_device * dev)1130 mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
1131 		     const struct net_device *dev)
1132 {
1133 	struct mlxsw_sp_crif_key key = {
1134 		.dev = (struct net_device *)dev,
1135 	};
1136 
1137 	return rhashtable_lookup_fast(&router->crif_ht, &key,
1138 				      mlxsw_sp_crif_ht_params);
1139 }
1140 
1141 static struct mlxsw_sp_rif *
1142 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1143 		    const struct mlxsw_sp_rif_params *params,
1144 		    struct netlink_ext_ack *extack);
1145 
1146 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev,struct netlink_ext_ack * extack)1147 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1148 				enum mlxsw_sp_ipip_type ipipt,
1149 				struct net_device *ol_dev,
1150 				struct netlink_ext_ack *extack)
1151 {
1152 	struct mlxsw_sp_rif_params_ipip_lb lb_params;
1153 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1154 	struct mlxsw_sp_rif *rif;
1155 
1156 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1157 	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1158 		.common.dev = ol_dev,
1159 		.common.lag = false,
1160 		.common.double_entry = ipip_ops->double_rif_entry,
1161 		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1162 	};
1163 
1164 	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1165 	if (IS_ERR(rif))
1166 		return ERR_CAST(rif);
1167 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1168 }
1169 
1170 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1171 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1172 			  enum mlxsw_sp_ipip_type ipipt,
1173 			  struct net_device *ol_dev)
1174 {
1175 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1176 	struct mlxsw_sp_ipip_entry *ipip_entry;
1177 	struct mlxsw_sp_ipip_entry *ret = NULL;
1178 	int err;
1179 
1180 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1181 	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1182 	if (!ipip_entry)
1183 		return ERR_PTR(-ENOMEM);
1184 
1185 	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1186 							    ol_dev, NULL);
1187 	if (IS_ERR(ipip_entry->ol_lb)) {
1188 		ret = ERR_CAST(ipip_entry->ol_lb);
1189 		goto err_ol_ipip_lb_create;
1190 	}
1191 
1192 	ipip_entry->ipipt = ipipt;
1193 	ipip_entry->ol_dev = ol_dev;
1194 	ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1195 
1196 	err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1197 	if (err) {
1198 		ret = ERR_PTR(err);
1199 		goto err_rem_ip_addr_set;
1200 	}
1201 
1202 	return ipip_entry;
1203 
1204 err_rem_ip_addr_set:
1205 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1206 err_ol_ipip_lb_create:
1207 	kfree(ipip_entry);
1208 	return ret;
1209 }
1210 
mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1211 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1212 					struct mlxsw_sp_ipip_entry *ipip_entry)
1213 {
1214 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1215 		mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1216 
1217 	ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1218 	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1219 	kfree(ipip_entry);
1220 }
1221 
1222 static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp * mlxsw_sp,const enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,struct mlxsw_sp_ipip_entry * ipip_entry)1223 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1224 				  const enum mlxsw_sp_l3proto ul_proto,
1225 				  union mlxsw_sp_l3addr saddr,
1226 				  u32 ul_tb_id,
1227 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1228 {
1229 	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1230 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1231 	union mlxsw_sp_l3addr tun_saddr;
1232 
1233 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1234 		return false;
1235 
1236 	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1237 	return tun_ul_tb_id == ul_tb_id &&
1238 	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1239 }
1240 
mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1241 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1242 						 enum mlxsw_sp_ipip_type ipipt)
1243 {
1244 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1245 
1246 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1247 
1248 	/* Not all tunnels require to increase the default pasing depth
1249 	 * (96 bytes).
1250 	 */
1251 	if (ipip_ops->inc_parsing_depth)
1252 		return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1253 
1254 	return 0;
1255 }
1256 
mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1257 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1258 						  enum mlxsw_sp_ipip_type ipipt)
1259 {
1260 	const struct mlxsw_sp_ipip_ops *ipip_ops =
1261 		mlxsw_sp->router->ipip_ops_arr[ipipt];
1262 
1263 	if (ipip_ops->inc_parsing_depth)
1264 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1265 }
1266 
1267 static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct mlxsw_sp_ipip_entry * ipip_entry)1268 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1269 			      struct mlxsw_sp_fib_entry *fib_entry,
1270 			      struct mlxsw_sp_ipip_entry *ipip_entry)
1271 {
1272 	u32 tunnel_index;
1273 	int err;
1274 
1275 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1276 				  1, &tunnel_index);
1277 	if (err)
1278 		return err;
1279 
1280 	err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1281 						    ipip_entry->ipipt);
1282 	if (err)
1283 		goto err_parsing_depth_inc;
1284 
1285 	ipip_entry->decap_fib_entry = fib_entry;
1286 	fib_entry->decap.ipip_entry = ipip_entry;
1287 	fib_entry->decap.tunnel_index = tunnel_index;
1288 
1289 	return 0;
1290 
1291 err_parsing_depth_inc:
1292 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1293 			   fib_entry->decap.tunnel_index);
1294 	return err;
1295 }
1296 
mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)1297 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1298 					  struct mlxsw_sp_fib_entry *fib_entry)
1299 {
1300 	enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1301 
1302 	/* Unlink this node from the IPIP entry that it's the decap entry of. */
1303 	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1304 	fib_entry->decap.ipip_entry = NULL;
1305 	mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1306 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1307 			   1, fib_entry->decap.tunnel_index);
1308 }
1309 
1310 static struct mlxsw_sp_fib_node *
1311 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1312 			 size_t addr_len, unsigned char prefix_len);
1313 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1314 				     struct mlxsw_sp_fib_entry *fib_entry);
1315 
1316 static void
mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1317 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1318 				 struct mlxsw_sp_ipip_entry *ipip_entry)
1319 {
1320 	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1321 
1322 	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1323 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1324 
1325 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1326 }
1327 
1328 static void
mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct mlxsw_sp_fib_entry * decap_fib_entry)1329 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1330 				  struct mlxsw_sp_ipip_entry *ipip_entry,
1331 				  struct mlxsw_sp_fib_entry *decap_fib_entry)
1332 {
1333 	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1334 					  ipip_entry))
1335 		return;
1336 	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1337 
1338 	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1339 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1340 }
1341 
1342 static struct mlxsw_sp_fib_entry *
mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,enum mlxsw_sp_fib_entry_type type)1343 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1344 				     enum mlxsw_sp_l3proto proto,
1345 				     const union mlxsw_sp_l3addr *addr,
1346 				     enum mlxsw_sp_fib_entry_type type)
1347 {
1348 	struct mlxsw_sp_fib_node *fib_node;
1349 	unsigned char addr_prefix_len;
1350 	struct mlxsw_sp_fib *fib;
1351 	struct mlxsw_sp_vr *vr;
1352 	const void *addrp;
1353 	size_t addr_len;
1354 	u32 addr4;
1355 
1356 	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1357 	if (!vr)
1358 		return NULL;
1359 	fib = mlxsw_sp_vr_fib(vr, proto);
1360 
1361 	switch (proto) {
1362 	case MLXSW_SP_L3_PROTO_IPV4:
1363 		addr4 = be32_to_cpu(addr->addr4);
1364 		addrp = &addr4;
1365 		addr_len = 4;
1366 		addr_prefix_len = 32;
1367 		break;
1368 	case MLXSW_SP_L3_PROTO_IPV6:
1369 		addrp = &addr->addr6;
1370 		addr_len = 16;
1371 		addr_prefix_len = 128;
1372 		break;
1373 	default:
1374 		WARN_ON(1);
1375 		return NULL;
1376 	}
1377 
1378 	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1379 					    addr_prefix_len);
1380 	if (!fib_node || fib_node->fib_entry->type != type)
1381 		return NULL;
1382 
1383 	return fib_node->fib_entry;
1384 }
1385 
1386 /* Given an IPIP entry, find the corresponding decap route. */
1387 static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1388 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1389 			       struct mlxsw_sp_ipip_entry *ipip_entry)
1390 {
1391 	static struct mlxsw_sp_fib_node *fib_node;
1392 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1393 	unsigned char saddr_prefix_len;
1394 	union mlxsw_sp_l3addr saddr;
1395 	struct mlxsw_sp_fib *ul_fib;
1396 	struct mlxsw_sp_vr *ul_vr;
1397 	const void *saddrp;
1398 	size_t saddr_len;
1399 	u32 ul_tb_id;
1400 	u32 saddr4;
1401 
1402 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1403 
1404 	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1405 	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1406 	if (!ul_vr)
1407 		return NULL;
1408 
1409 	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1410 	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1411 					   ipip_entry->ol_dev);
1412 
1413 	switch (ipip_ops->ul_proto) {
1414 	case MLXSW_SP_L3_PROTO_IPV4:
1415 		saddr4 = be32_to_cpu(saddr.addr4);
1416 		saddrp = &saddr4;
1417 		saddr_len = 4;
1418 		saddr_prefix_len = 32;
1419 		break;
1420 	case MLXSW_SP_L3_PROTO_IPV6:
1421 		saddrp = &saddr.addr6;
1422 		saddr_len = 16;
1423 		saddr_prefix_len = 128;
1424 		break;
1425 	default:
1426 		WARN_ON(1);
1427 		return NULL;
1428 	}
1429 
1430 	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1431 					    saddr_prefix_len);
1432 	if (!fib_node ||
1433 	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1434 		return NULL;
1435 
1436 	return fib_node->fib_entry;
1437 }
1438 
1439 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1440 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1441 			   enum mlxsw_sp_ipip_type ipipt,
1442 			   struct net_device *ol_dev)
1443 {
1444 	struct mlxsw_sp_ipip_entry *ipip_entry;
1445 
1446 	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1447 	if (IS_ERR(ipip_entry))
1448 		return ipip_entry;
1449 
1450 	list_add_tail(&ipip_entry->ipip_list_node,
1451 		      &mlxsw_sp->router->ipip_list);
1452 
1453 	return ipip_entry;
1454 }
1455 
1456 static void
mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1457 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1458 			    struct mlxsw_sp_ipip_entry *ipip_entry)
1459 {
1460 	list_del(&ipip_entry->ipip_list_node);
1461 	mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1462 }
1463 
1464 static bool
mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip,struct mlxsw_sp_ipip_entry * ipip_entry)1465 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1466 				  const struct net_device *ul_dev,
1467 				  enum mlxsw_sp_l3proto ul_proto,
1468 				  union mlxsw_sp_l3addr ul_dip,
1469 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1470 {
1471 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1472 	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1473 
1474 	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1475 		return false;
1476 
1477 	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1478 						 ul_tb_id, ipip_entry);
1479 }
1480 
1481 /* Given decap parameters, find the corresponding IPIP entry. */
1482 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp * mlxsw_sp,int ul_dev_ifindex,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip)1483 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1484 				  enum mlxsw_sp_l3proto ul_proto,
1485 				  union mlxsw_sp_l3addr ul_dip)
1486 {
1487 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1488 	struct net_device *ul_dev;
1489 
1490 	rcu_read_lock();
1491 
1492 	ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1493 	if (!ul_dev)
1494 		goto out_unlock;
1495 
1496 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1497 			    ipip_list_node)
1498 		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1499 						      ul_proto, ul_dip,
1500 						      ipip_entry))
1501 			goto out_unlock;
1502 
1503 	rcu_read_unlock();
1504 
1505 	return NULL;
1506 
1507 out_unlock:
1508 	rcu_read_unlock();
1509 	return ipip_entry;
1510 }
1511 
mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev,enum mlxsw_sp_ipip_type * p_type)1512 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1513 				      const struct net_device *dev,
1514 				      enum mlxsw_sp_ipip_type *p_type)
1515 {
1516 	struct mlxsw_sp_router *router = mlxsw_sp->router;
1517 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1518 	enum mlxsw_sp_ipip_type ipipt;
1519 
1520 	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1521 		ipip_ops = router->ipip_ops_arr[ipipt];
1522 		if (dev->type == ipip_ops->dev_type) {
1523 			if (p_type)
1524 				*p_type = ipipt;
1525 			return true;
1526 		}
1527 	}
1528 	return false;
1529 }
1530 
mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1531 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1532 				       const struct net_device *dev)
1533 {
1534 	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1535 }
1536 
1537 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev)1538 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1539 				   const struct net_device *ol_dev)
1540 {
1541 	struct mlxsw_sp_ipip_entry *ipip_entry;
1542 
1543 	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1544 			    ipip_list_node)
1545 		if (ipip_entry->ol_dev == ol_dev)
1546 			return ipip_entry;
1547 
1548 	return NULL;
1549 }
1550 
1551 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,struct mlxsw_sp_ipip_entry * start)1552 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1553 				   const struct net_device *ul_dev,
1554 				   struct mlxsw_sp_ipip_entry *start)
1555 {
1556 	struct mlxsw_sp_ipip_entry *ipip_entry;
1557 
1558 	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1559 					ipip_list_node);
1560 	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1561 				     ipip_list_node) {
1562 		struct net_device *ol_dev = ipip_entry->ol_dev;
1563 		struct net_device *ipip_ul_dev;
1564 
1565 		rcu_read_lock();
1566 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1567 		rcu_read_unlock();
1568 
1569 		if (ipip_ul_dev == ul_dev)
1570 			return ipip_entry;
1571 	}
1572 
1573 	return NULL;
1574 }
1575 
mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1576 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1577 				       const struct net_device *dev)
1578 {
1579 	return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1580 }
1581 
mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev,enum mlxsw_sp_ipip_type ipipt)1582 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1583 						const struct net_device *ol_dev,
1584 						enum mlxsw_sp_ipip_type ipipt)
1585 {
1586 	const struct mlxsw_sp_ipip_ops *ops
1587 		= mlxsw_sp->router->ipip_ops_arr[ipipt];
1588 
1589 	return ops->can_offload(mlxsw_sp, ol_dev);
1590 }
1591 
mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1592 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1593 						struct net_device *ol_dev)
1594 {
1595 	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1596 	struct mlxsw_sp_ipip_entry *ipip_entry;
1597 	enum mlxsw_sp_l3proto ul_proto;
1598 	union mlxsw_sp_l3addr saddr;
1599 	u32 ul_tb_id;
1600 
1601 	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1602 	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1603 		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1604 		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1605 		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1606 		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1607 							  saddr, ul_tb_id,
1608 							  NULL)) {
1609 			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1610 								ol_dev);
1611 			if (IS_ERR(ipip_entry))
1612 				return PTR_ERR(ipip_entry);
1613 		}
1614 	}
1615 
1616 	return 0;
1617 }
1618 
mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1619 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1620 						   struct net_device *ol_dev)
1621 {
1622 	struct mlxsw_sp_ipip_entry *ipip_entry;
1623 
1624 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1625 	if (ipip_entry)
1626 		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1627 }
1628 
1629 static void
mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1630 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1631 				struct mlxsw_sp_ipip_entry *ipip_entry)
1632 {
1633 	struct mlxsw_sp_fib_entry *decap_fib_entry;
1634 
1635 	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1636 	if (decap_fib_entry)
1637 		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1638 						  decap_fib_entry);
1639 }
1640 
1641 static int
mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb * lb_rif,u16 ul_vr_id,u16 ul_rif_id,bool enable)1642 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1643 			u16 ul_rif_id, bool enable)
1644 {
1645 	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1646 	struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
1647 	enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1648 	struct mlxsw_sp_rif *rif = &lb_rif->common;
1649 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1650 	char ritr_pl[MLXSW_REG_RITR_LEN];
1651 	struct in6_addr *saddr6;
1652 	u32 saddr4;
1653 
1654 	ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1655 	switch (lb_cf.ul_protocol) {
1656 	case MLXSW_SP_L3_PROTO_IPV4:
1657 		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1658 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1659 				    rif->rif_index, rif->vr_id, dev->mtu);
1660 		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1661 						   ipip_options, ul_vr_id,
1662 						   ul_rif_id, saddr4,
1663 						   lb_cf.okey);
1664 		break;
1665 
1666 	case MLXSW_SP_L3_PROTO_IPV6:
1667 		saddr6 = &lb_cf.saddr.addr6;
1668 		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1669 				    rif->rif_index, rif->vr_id, dev->mtu);
1670 		mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1671 						   ipip_options, ul_vr_id,
1672 						   ul_rif_id, saddr6,
1673 						   lb_cf.okey);
1674 		break;
1675 	}
1676 
1677 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1678 }
1679 
mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1680 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1681 						 struct net_device *ol_dev)
1682 {
1683 	struct mlxsw_sp_ipip_entry *ipip_entry;
1684 	struct mlxsw_sp_rif_ipip_lb *lb_rif;
1685 	int err = 0;
1686 
1687 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1688 	if (ipip_entry) {
1689 		lb_rif = ipip_entry->ol_lb;
1690 		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1691 					      lb_rif->ul_rif_id, true);
1692 		if (err)
1693 			goto out;
1694 		lb_rif->common.mtu = ol_dev->mtu;
1695 	}
1696 
1697 out:
1698 	return err;
1699 }
1700 
mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1701 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1702 						struct net_device *ol_dev)
1703 {
1704 	struct mlxsw_sp_ipip_entry *ipip_entry;
1705 
1706 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1707 	if (ipip_entry)
1708 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1709 }
1710 
1711 static void
mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1712 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1713 				  struct mlxsw_sp_ipip_entry *ipip_entry)
1714 {
1715 	if (ipip_entry->decap_fib_entry)
1716 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1717 }
1718 
mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1719 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1720 						  struct net_device *ol_dev)
1721 {
1722 	struct mlxsw_sp_ipip_entry *ipip_entry;
1723 
1724 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1725 	if (ipip_entry)
1726 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1727 }
1728 
1729 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1730 					struct mlxsw_sp_rif *rif);
1731 
mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * old_rif,struct mlxsw_sp_rif * new_rif,bool migrate_nhs)1732 static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
1733 					 struct mlxsw_sp_rif *old_rif,
1734 					 struct mlxsw_sp_rif *new_rif,
1735 					 bool migrate_nhs)
1736 {
1737 	struct mlxsw_sp_crif *crif = old_rif->crif;
1738 	struct mlxsw_sp_crif mock_crif = {};
1739 
1740 	if (migrate_nhs)
1741 		mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
1742 
1743 	/* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
1744 	 * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
1745 	 */
1746 	mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
1747 	old_rif->crif = &mock_crif;
1748 	mock_crif.rif = old_rif;
1749 	mlxsw_sp_rif_destroy(old_rif);
1750 }
1751 
1752 static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool keep_encap,struct netlink_ext_ack * extack)1753 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1754 				 struct mlxsw_sp_ipip_entry *ipip_entry,
1755 				 bool keep_encap,
1756 				 struct netlink_ext_ack *extack)
1757 {
1758 	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1759 	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1760 
1761 	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1762 						     ipip_entry->ipipt,
1763 						     ipip_entry->ol_dev,
1764 						     extack);
1765 	if (IS_ERR(new_lb_rif))
1766 		return PTR_ERR(new_lb_rif);
1767 	ipip_entry->ol_lb = new_lb_rif;
1768 
1769 	mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
1770 				     &new_lb_rif->common, keep_encap);
1771 	return 0;
1772 }
1773 
1774 /**
1775  * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1776  * @mlxsw_sp: mlxsw_sp.
1777  * @ipip_entry: IPIP entry.
1778  * @recreate_loopback: Recreates the associated loopback RIF.
1779  * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1780  *              relevant when recreate_loopback is true.
1781  * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1782  *                   is only relevant when recreate_loopback is false.
1783  * @extack: extack.
1784  *
1785  * Return: Non-zero value on failure.
1786  */
__mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool recreate_loopback,bool keep_encap,bool update_nexthops,struct netlink_ext_ack * extack)1787 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1788 					struct mlxsw_sp_ipip_entry *ipip_entry,
1789 					bool recreate_loopback,
1790 					bool keep_encap,
1791 					bool update_nexthops,
1792 					struct netlink_ext_ack *extack)
1793 {
1794 	int err;
1795 
1796 	/* RIFs can't be edited, so to update loopback, we need to destroy and
1797 	 * recreate it. That creates a window of opportunity where RALUE and
1798 	 * RATR registers end up referencing a RIF that's already gone. RATRs
1799 	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1800 	 * of RALUE, demote the decap route back.
1801 	 */
1802 	if (ipip_entry->decap_fib_entry)
1803 		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1804 
1805 	if (recreate_loopback) {
1806 		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1807 						       keep_encap, extack);
1808 		if (err)
1809 			return err;
1810 	} else if (update_nexthops) {
1811 		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1812 					    &ipip_entry->ol_lb->common);
1813 	}
1814 
1815 	if (ipip_entry->ol_dev->flags & IFF_UP)
1816 		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1817 
1818 	return 0;
1819 }
1820 
mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1821 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1822 						struct net_device *ol_dev,
1823 						struct netlink_ext_ack *extack)
1824 {
1825 	struct mlxsw_sp_ipip_entry *ipip_entry =
1826 		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1827 
1828 	if (!ipip_entry)
1829 		return 0;
1830 
1831 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1832 						   true, false, false, extack);
1833 }
1834 
1835 static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,struct netlink_ext_ack * extack)1836 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1837 				     struct mlxsw_sp_ipip_entry *ipip_entry,
1838 				     struct net_device *ul_dev,
1839 				     bool *demote_this,
1840 				     struct netlink_ext_ack *extack)
1841 {
1842 	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1843 	enum mlxsw_sp_l3proto ul_proto;
1844 	union mlxsw_sp_l3addr saddr;
1845 
1846 	/* Moving underlay to a different VRF might cause local address
1847 	 * conflict, and the conflicting tunnels need to be demoted.
1848 	 */
1849 	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1850 	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1851 	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1852 						 saddr, ul_tb_id,
1853 						 ipip_entry)) {
1854 		*demote_this = true;
1855 		return 0;
1856 	}
1857 
1858 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1859 						   true, true, false, extack);
1860 }
1861 
1862 static int
mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1863 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1864 				    struct mlxsw_sp_ipip_entry *ipip_entry,
1865 				    struct net_device *ul_dev)
1866 {
1867 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1868 						   false, false, true, NULL);
1869 }
1870 
1871 static int
mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1872 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1873 				      struct mlxsw_sp_ipip_entry *ipip_entry,
1874 				      struct net_device *ul_dev)
1875 {
1876 	/* A down underlay device causes encapsulated packets to not be
1877 	 * forwarded, but decap still works. So refresh next hops without
1878 	 * touching anything else.
1879 	 */
1880 	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1881 						   false, false, true, NULL);
1882 }
1883 
1884 static int
mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1885 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1886 					struct net_device *ol_dev,
1887 					struct netlink_ext_ack *extack)
1888 {
1889 	const struct mlxsw_sp_ipip_ops *ipip_ops;
1890 	struct mlxsw_sp_ipip_entry *ipip_entry;
1891 	int err;
1892 
1893 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1894 	if (!ipip_entry)
1895 		/* A change might make a tunnel eligible for offloading, but
1896 		 * that is currently not implemented. What falls to slow path
1897 		 * stays there.
1898 		 */
1899 		return 0;
1900 
1901 	/* A change might make a tunnel not eligible for offloading. */
1902 	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1903 						 ipip_entry->ipipt)) {
1904 		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1905 		return 0;
1906 	}
1907 
1908 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1909 	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1910 	return err;
1911 }
1912 
mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1913 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1914 				       struct mlxsw_sp_ipip_entry *ipip_entry)
1915 {
1916 	struct net_device *ol_dev = ipip_entry->ol_dev;
1917 
1918 	if (ol_dev->flags & IFF_UP)
1919 		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1920 	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1921 }
1922 
1923 /* The configuration where several tunnels have the same local address in the
1924  * same underlay table needs special treatment in the HW. That is currently not
1925  * implemented in the driver. This function finds and demotes the first tunnel
1926  * with a given source address, except the one passed in the argument
1927  * `except'.
1928  */
1929 bool
mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,const struct mlxsw_sp_ipip_entry * except)1930 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1931 				     enum mlxsw_sp_l3proto ul_proto,
1932 				     union mlxsw_sp_l3addr saddr,
1933 				     u32 ul_tb_id,
1934 				     const struct mlxsw_sp_ipip_entry *except)
1935 {
1936 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1937 
1938 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1939 				 ipip_list_node) {
1940 		if (ipip_entry != except &&
1941 		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1942 						      ul_tb_id, ipip_entry)) {
1943 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1944 			return true;
1945 		}
1946 	}
1947 
1948 	return false;
1949 }
1950 
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev)1951 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1952 						     struct net_device *ul_dev)
1953 {
1954 	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1955 
1956 	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1957 				 ipip_list_node) {
1958 		struct net_device *ol_dev = ipip_entry->ol_dev;
1959 		struct net_device *ipip_ul_dev;
1960 
1961 		rcu_read_lock();
1962 		ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1963 		rcu_read_unlock();
1964 		if (ipip_ul_dev == ul_dev)
1965 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1966 	}
1967 }
1968 
mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,unsigned long event,struct netdev_notifier_info * info)1969 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1970 					    struct net_device *ol_dev,
1971 					    unsigned long event,
1972 					    struct netdev_notifier_info *info)
1973 {
1974 	struct netdev_notifier_changeupper_info *chup;
1975 	struct netlink_ext_ack *extack;
1976 	int err = 0;
1977 
1978 	switch (event) {
1979 	case NETDEV_REGISTER:
1980 		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1981 		break;
1982 	case NETDEV_UNREGISTER:
1983 		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1984 		break;
1985 	case NETDEV_UP:
1986 		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1987 		break;
1988 	case NETDEV_DOWN:
1989 		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1990 		break;
1991 	case NETDEV_CHANGEUPPER:
1992 		chup = container_of(info, typeof(*chup), info);
1993 		extack = info->extack;
1994 		if (netif_is_l3_master(chup->upper_dev))
1995 			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1996 								   ol_dev,
1997 								   extack);
1998 		break;
1999 	case NETDEV_CHANGE:
2000 		extack = info->extack;
2001 		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
2002 							      ol_dev, extack);
2003 		break;
2004 	case NETDEV_CHANGEMTU:
2005 		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
2006 		break;
2007 	}
2008 	return err;
2009 }
2010 
2011 static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,unsigned long event,struct netdev_notifier_info * info)2012 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2013 				   struct mlxsw_sp_ipip_entry *ipip_entry,
2014 				   struct net_device *ul_dev,
2015 				   bool *demote_this,
2016 				   unsigned long event,
2017 				   struct netdev_notifier_info *info)
2018 {
2019 	struct netdev_notifier_changeupper_info *chup;
2020 	struct netlink_ext_ack *extack;
2021 
2022 	switch (event) {
2023 	case NETDEV_CHANGEUPPER:
2024 		chup = container_of(info, typeof(*chup), info);
2025 		extack = info->extack;
2026 		if (netif_is_l3_master(chup->upper_dev))
2027 			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2028 								    ipip_entry,
2029 								    ul_dev,
2030 								    demote_this,
2031 								    extack);
2032 		break;
2033 
2034 	case NETDEV_UP:
2035 		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2036 							   ul_dev);
2037 	case NETDEV_DOWN:
2038 		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2039 							     ipip_entry,
2040 							     ul_dev);
2041 	}
2042 	return 0;
2043 }
2044 
2045 static int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev,unsigned long event,struct netdev_notifier_info * info)2046 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2047 				 struct net_device *ul_dev,
2048 				 unsigned long event,
2049 				 struct netdev_notifier_info *info)
2050 {
2051 	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2052 	int err;
2053 
2054 	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2055 								ul_dev,
2056 								ipip_entry))) {
2057 		struct mlxsw_sp_ipip_entry *prev;
2058 		bool demote_this = false;
2059 
2060 		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2061 							 ul_dev, &demote_this,
2062 							 event, info);
2063 		if (err) {
2064 			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2065 								 ul_dev);
2066 			return err;
2067 		}
2068 
2069 		if (demote_this) {
2070 			if (list_is_first(&ipip_entry->ipip_list_node,
2071 					  &mlxsw_sp->router->ipip_list))
2072 				prev = NULL;
2073 			else
2074 				/* This can't be cached from previous iteration,
2075 				 * because that entry could be gone now.
2076 				 */
2077 				prev = list_prev_entry(ipip_entry,
2078 						       ipip_list_node);
2079 			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2080 			ipip_entry = prev;
2081 		}
2082 	}
2083 
2084 	return 0;
2085 }
2086 
mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip,u32 tunnel_index)2087 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2088 				      enum mlxsw_sp_l3proto ul_proto,
2089 				      const union mlxsw_sp_l3addr *ul_sip,
2090 				      u32 tunnel_index)
2091 {
2092 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2093 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2094 	struct mlxsw_sp_fib_entry *fib_entry;
2095 	int err = 0;
2096 
2097 	mutex_lock(&mlxsw_sp->router->lock);
2098 
2099 	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2100 		err = -EINVAL;
2101 		goto out;
2102 	}
2103 
2104 	router->nve_decap_config.ul_tb_id = ul_tb_id;
2105 	router->nve_decap_config.tunnel_index = tunnel_index;
2106 	router->nve_decap_config.ul_proto = ul_proto;
2107 	router->nve_decap_config.ul_sip = *ul_sip;
2108 	router->nve_decap_config.valid = true;
2109 
2110 	/* It is valid to create a tunnel with a local IP and only later
2111 	 * assign this IP address to a local interface
2112 	 */
2113 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2114 							 ul_proto, ul_sip,
2115 							 type);
2116 	if (!fib_entry)
2117 		goto out;
2118 
2119 	fib_entry->decap.tunnel_index = tunnel_index;
2120 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2121 
2122 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2123 	if (err)
2124 		goto err_fib_entry_update;
2125 
2126 	goto out;
2127 
2128 err_fib_entry_update:
2129 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2130 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2131 out:
2132 	mutex_unlock(&mlxsw_sp->router->lock);
2133 	return err;
2134 }
2135 
mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2136 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2137 				      enum mlxsw_sp_l3proto ul_proto,
2138 				      const union mlxsw_sp_l3addr *ul_sip)
2139 {
2140 	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2141 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2142 	struct mlxsw_sp_fib_entry *fib_entry;
2143 
2144 	mutex_lock(&mlxsw_sp->router->lock);
2145 
2146 	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2147 		goto out;
2148 
2149 	router->nve_decap_config.valid = false;
2150 
2151 	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2152 							 ul_proto, ul_sip,
2153 							 type);
2154 	if (!fib_entry)
2155 		goto out;
2156 
2157 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2158 	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2159 out:
2160 	mutex_unlock(&mlxsw_sp->router->lock);
2161 }
2162 
mlxsw_sp_router_nve_is_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2163 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2164 					 u32 ul_tb_id,
2165 					 enum mlxsw_sp_l3proto ul_proto,
2166 					 const union mlxsw_sp_l3addr *ul_sip)
2167 {
2168 	struct mlxsw_sp_router *router = mlxsw_sp->router;
2169 
2170 	return router->nve_decap_config.valid &&
2171 	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
2172 	       router->nve_decap_config.ul_proto == ul_proto &&
2173 	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2174 		       sizeof(*ul_sip));
2175 }
2176 
2177 struct mlxsw_sp_neigh_key {
2178 	struct neighbour *n;
2179 };
2180 
2181 struct mlxsw_sp_neigh_entry {
2182 	struct list_head rif_list_node;
2183 	struct rhash_head ht_node;
2184 	struct mlxsw_sp_neigh_key key;
2185 	u16 rif;
2186 	bool connected;
2187 	unsigned char ha[ETH_ALEN];
2188 	struct list_head nexthop_list; /* list of nexthops using
2189 					* this neigh entry
2190 					*/
2191 	struct list_head nexthop_neighs_list_node;
2192 	unsigned int counter_index;
2193 	bool counter_valid;
2194 };
2195 
2196 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2197 	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2198 	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2199 	.key_len = sizeof(struct mlxsw_sp_neigh_key),
2200 };
2201 
2202 struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif * rif,struct mlxsw_sp_neigh_entry * neigh_entry)2203 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2204 			struct mlxsw_sp_neigh_entry *neigh_entry)
2205 {
2206 	if (!neigh_entry) {
2207 		if (list_empty(&rif->neigh_list))
2208 			return NULL;
2209 		else
2210 			return list_first_entry(&rif->neigh_list,
2211 						typeof(*neigh_entry),
2212 						rif_list_node);
2213 	}
2214 	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2215 		return NULL;
2216 	return list_next_entry(neigh_entry, rif_list_node);
2217 }
2218 
mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry * neigh_entry)2219 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2220 {
2221 	return neigh_entry->key.n->tbl->family;
2222 }
2223 
2224 unsigned char *
mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry * neigh_entry)2225 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2226 {
2227 	return neigh_entry->ha;
2228 }
2229 
mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2230 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2231 {
2232 	struct neighbour *n;
2233 
2234 	n = neigh_entry->key.n;
2235 	return ntohl(*((__be32 *) n->primary_key));
2236 }
2237 
2238 struct in6_addr *
mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2239 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2240 {
2241 	struct neighbour *n;
2242 
2243 	n = neigh_entry->key.n;
2244 	return (struct in6_addr *) &n->primary_key;
2245 }
2246 
mlxsw_sp_neigh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,u64 * p_counter)2247 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2248 			       struct mlxsw_sp_neigh_entry *neigh_entry,
2249 			       u64 *p_counter)
2250 {
2251 	if (!neigh_entry->counter_valid)
2252 		return -EINVAL;
2253 
2254 	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2255 					 false, p_counter, NULL);
2256 }
2257 
2258 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp * mlxsw_sp,struct neighbour * n,u16 rif)2259 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2260 			   u16 rif)
2261 {
2262 	struct mlxsw_sp_neigh_entry *neigh_entry;
2263 
2264 	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2265 	if (!neigh_entry)
2266 		return NULL;
2267 
2268 	neigh_hold(n);
2269 	neigh_entry->key.n = n;
2270 	neigh_entry->rif = rif;
2271 	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2272 
2273 	return neigh_entry;
2274 }
2275 
mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry * neigh_entry)2276 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2277 {
2278 	neigh_release(neigh_entry->key.n);
2279 	kfree(neigh_entry);
2280 }
2281 
2282 static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2283 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2284 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2285 {
2286 	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2287 				      &neigh_entry->ht_node,
2288 				      mlxsw_sp_neigh_ht_params);
2289 }
2290 
2291 static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2292 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2293 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2294 {
2295 	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2296 			       &neigh_entry->ht_node,
2297 			       mlxsw_sp_neigh_ht_params);
2298 }
2299 
2300 static bool
mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2301 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2302 				    struct mlxsw_sp_neigh_entry *neigh_entry)
2303 {
2304 	struct devlink *devlink;
2305 	const char *table_name;
2306 
2307 	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2308 	case AF_INET:
2309 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2310 		break;
2311 	case AF_INET6:
2312 		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2313 		break;
2314 	default:
2315 		WARN_ON(1);
2316 		return false;
2317 	}
2318 
2319 	devlink = priv_to_devlink(mlxsw_sp->core);
2320 	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2321 }
2322 
2323 static void
mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2324 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2325 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2326 {
2327 	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2328 		return;
2329 
2330 	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2331 		return;
2332 
2333 	neigh_entry->counter_valid = true;
2334 }
2335 
2336 static void
mlxsw_sp_neigh_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2337 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2338 			    struct mlxsw_sp_neigh_entry *neigh_entry)
2339 {
2340 	if (!neigh_entry->counter_valid)
2341 		return;
2342 	mlxsw_sp_flow_counter_free(mlxsw_sp,
2343 				   neigh_entry->counter_index);
2344 	neigh_entry->counter_valid = false;
2345 }
2346 
2347 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2348 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2349 {
2350 	struct mlxsw_sp_neigh_entry *neigh_entry;
2351 	struct mlxsw_sp_rif *rif;
2352 	int err;
2353 
2354 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2355 	if (!rif)
2356 		return ERR_PTR(-EINVAL);
2357 
2358 	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2359 	if (!neigh_entry)
2360 		return ERR_PTR(-ENOMEM);
2361 
2362 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2363 	if (err)
2364 		goto err_neigh_entry_insert;
2365 
2366 	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2367 	atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2368 	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2369 
2370 	return neigh_entry;
2371 
2372 err_neigh_entry_insert:
2373 	mlxsw_sp_neigh_entry_free(neigh_entry);
2374 	return ERR_PTR(err);
2375 }
2376 
2377 static void
mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2378 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2379 			     struct mlxsw_sp_neigh_entry *neigh_entry)
2380 {
2381 	list_del(&neigh_entry->rif_list_node);
2382 	atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2383 	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2384 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2385 	mlxsw_sp_neigh_entry_free(neigh_entry);
2386 }
2387 
2388 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2389 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2390 {
2391 	struct mlxsw_sp_neigh_key key;
2392 
2393 	key.n = n;
2394 	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2395 				      &key, mlxsw_sp_neigh_ht_params);
2396 }
2397 
2398 static void
mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp * mlxsw_sp)2399 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2400 {
2401 	unsigned long interval;
2402 
2403 #if IS_ENABLED(CONFIG_IPV6)
2404 	interval = min_t(unsigned long,
2405 			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2406 			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2407 #else
2408 	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2409 #endif
2410 	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2411 }
2412 
mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int ent_index)2413 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2414 						   char *rauhtd_pl,
2415 						   int ent_index)
2416 {
2417 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2418 	struct net_device *dev;
2419 	struct neighbour *n;
2420 	__be32 dipn;
2421 	u32 dip;
2422 	u16 rif;
2423 
2424 	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2425 
2426 	if (WARN_ON_ONCE(rif >= max_rifs))
2427 		return;
2428 	if (!mlxsw_sp->router->rifs[rif]) {
2429 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2430 		return;
2431 	}
2432 
2433 	dipn = htonl(dip);
2434 	dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2435 	n = neigh_lookup(&arp_tbl, &dipn, dev);
2436 	if (!n)
2437 		return;
2438 
2439 	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2440 	neigh_event_send(n, NULL);
2441 	neigh_release(n);
2442 }
2443 
2444 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2445 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2446 						   char *rauhtd_pl,
2447 						   int rec_index)
2448 {
2449 	struct net_device *dev;
2450 	struct neighbour *n;
2451 	struct in6_addr dip;
2452 	u16 rif;
2453 
2454 	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2455 					 (char *) &dip);
2456 
2457 	if (!mlxsw_sp->router->rifs[rif]) {
2458 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2459 		return;
2460 	}
2461 
2462 	dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
2463 	n = neigh_lookup(&nd_tbl, &dip, dev);
2464 	if (!n)
2465 		return;
2466 
2467 	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2468 	neigh_event_send(n, NULL);
2469 	neigh_release(n);
2470 }
2471 #else
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2472 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2473 						   char *rauhtd_pl,
2474 						   int rec_index)
2475 {
2476 }
2477 #endif
2478 
mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2479 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2480 						   char *rauhtd_pl,
2481 						   int rec_index)
2482 {
2483 	u8 num_entries;
2484 	int i;
2485 
2486 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2487 								rec_index);
2488 	/* Hardware starts counting at 0, so add 1. */
2489 	num_entries++;
2490 
2491 	/* Each record consists of several neighbour entries. */
2492 	for (i = 0; i < num_entries; i++) {
2493 		int ent_index;
2494 
2495 		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2496 		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2497 						       ent_index);
2498 	}
2499 
2500 }
2501 
mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2502 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2503 						   char *rauhtd_pl,
2504 						   int rec_index)
2505 {
2506 	/* One record contains one entry. */
2507 	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2508 					       rec_index);
2509 }
2510 
mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2511 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2512 					      char *rauhtd_pl, int rec_index)
2513 {
2514 	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2515 	case MLXSW_REG_RAUHTD_TYPE_IPV4:
2516 		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2517 						       rec_index);
2518 		break;
2519 	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2520 		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2521 						       rec_index);
2522 		break;
2523 	}
2524 }
2525 
mlxsw_sp_router_rauhtd_is_full(char * rauhtd_pl)2526 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2527 {
2528 	u8 num_rec, last_rec_index, num_entries;
2529 
2530 	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2531 	last_rec_index = num_rec - 1;
2532 
2533 	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2534 		return false;
2535 	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2536 	    MLXSW_REG_RAUHTD_TYPE_IPV6)
2537 		return true;
2538 
2539 	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2540 								last_rec_index);
2541 	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2542 		return true;
2543 	return false;
2544 }
2545 
2546 static int
__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,enum mlxsw_reg_rauhtd_type type)2547 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2548 				       char *rauhtd_pl,
2549 				       enum mlxsw_reg_rauhtd_type type)
2550 {
2551 	int i, num_rec;
2552 	int err;
2553 
2554 	/* Ensure the RIF we read from the device does not change mid-dump. */
2555 	mutex_lock(&mlxsw_sp->router->lock);
2556 	do {
2557 		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2558 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2559 				      rauhtd_pl);
2560 		if (err) {
2561 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2562 			break;
2563 		}
2564 		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2565 		for (i = 0; i < num_rec; i++)
2566 			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2567 							  i);
2568 	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2569 	mutex_unlock(&mlxsw_sp->router->lock);
2570 
2571 	return err;
2572 }
2573 
mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp)2574 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2575 {
2576 	enum mlxsw_reg_rauhtd_type type;
2577 	char *rauhtd_pl;
2578 	int err;
2579 
2580 	if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2581 		return 0;
2582 
2583 	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2584 	if (!rauhtd_pl)
2585 		return -ENOMEM;
2586 
2587 	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2588 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2589 	if (err)
2590 		goto out;
2591 
2592 	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2593 	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2594 out:
2595 	kfree(rauhtd_pl);
2596 	return err;
2597 }
2598 
mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp * mlxsw_sp)2599 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2600 {
2601 	struct mlxsw_sp_neigh_entry *neigh_entry;
2602 
2603 	mutex_lock(&mlxsw_sp->router->lock);
2604 	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2605 			    nexthop_neighs_list_node)
2606 		/* If this neigh have nexthops, make the kernel think this neigh
2607 		 * is active regardless of the traffic.
2608 		 */
2609 		neigh_event_send(neigh_entry->key.n, NULL);
2610 	mutex_unlock(&mlxsw_sp->router->lock);
2611 }
2612 
2613 static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp * mlxsw_sp)2614 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2615 {
2616 	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2617 
2618 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2619 			       msecs_to_jiffies(interval));
2620 }
2621 
mlxsw_sp_router_neighs_update_work(struct work_struct * work)2622 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2623 {
2624 	struct mlxsw_sp_router *router;
2625 	int err;
2626 
2627 	router = container_of(work, struct mlxsw_sp_router,
2628 			      neighs_update.dw.work);
2629 	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2630 	if (err)
2631 		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2632 
2633 	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2634 
2635 	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2636 }
2637 
mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct * work)2638 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2639 {
2640 	struct mlxsw_sp_neigh_entry *neigh_entry;
2641 	struct mlxsw_sp_router *router;
2642 
2643 	router = container_of(work, struct mlxsw_sp_router,
2644 			      nexthop_probe_dw.work);
2645 	/* Iterate over nexthop neighbours, find those who are unresolved and
2646 	 * send arp on them. This solves the chicken-egg problem when
2647 	 * the nexthop wouldn't get offloaded until the neighbor is resolved
2648 	 * but it wouldn't get resolved ever in case traffic is flowing in HW
2649 	 * using different nexthop.
2650 	 */
2651 	mutex_lock(&router->lock);
2652 	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2653 			    nexthop_neighs_list_node)
2654 		if (!neigh_entry->connected)
2655 			neigh_event_send(neigh_entry->key.n, NULL);
2656 	mutex_unlock(&router->lock);
2657 
2658 	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2659 			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2660 }
2661 
2662 static void
2663 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2664 			      struct mlxsw_sp_neigh_entry *neigh_entry,
2665 			      bool removing, bool dead);
2666 
mlxsw_sp_rauht_op(bool adding)2667 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2668 {
2669 	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2670 			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2671 }
2672 
2673 static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2674 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2675 				struct mlxsw_sp_neigh_entry *neigh_entry,
2676 				enum mlxsw_reg_rauht_op op)
2677 {
2678 	struct neighbour *n = neigh_entry->key.n;
2679 	u32 dip = ntohl(*((__be32 *) n->primary_key));
2680 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2681 
2682 	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2683 			      dip);
2684 	if (neigh_entry->counter_valid)
2685 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2686 					     neigh_entry->counter_index);
2687 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2688 }
2689 
2690 static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2691 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2692 				struct mlxsw_sp_neigh_entry *neigh_entry,
2693 				enum mlxsw_reg_rauht_op op)
2694 {
2695 	struct neighbour *n = neigh_entry->key.n;
2696 	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2697 	const char *dip = n->primary_key;
2698 
2699 	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2700 			      dip);
2701 	if (neigh_entry->counter_valid)
2702 		mlxsw_reg_rauht_pack_counter(rauht_pl,
2703 					     neigh_entry->counter_index);
2704 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2705 }
2706 
mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry * neigh_entry)2707 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2708 {
2709 	struct neighbour *n = neigh_entry->key.n;
2710 
2711 	/* Packets with a link-local destination address are trapped
2712 	 * after LPM lookup and never reach the neighbour table, so
2713 	 * there is no need to program such neighbours to the device.
2714 	 */
2715 	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2716 	    IPV6_ADDR_LINKLOCAL)
2717 		return true;
2718 	return false;
2719 }
2720 
2721 static void
mlxsw_sp_neigh_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2722 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2723 			    struct mlxsw_sp_neigh_entry *neigh_entry,
2724 			    bool adding)
2725 {
2726 	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2727 	int err;
2728 
2729 	if (!adding && !neigh_entry->connected)
2730 		return;
2731 	neigh_entry->connected = adding;
2732 	if (neigh_entry->key.n->tbl->family == AF_INET) {
2733 		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2734 						      op);
2735 		if (err)
2736 			return;
2737 	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2738 		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2739 			return;
2740 		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2741 						      op);
2742 		if (err)
2743 			return;
2744 	} else {
2745 		WARN_ON_ONCE(1);
2746 		return;
2747 	}
2748 
2749 	if (adding)
2750 		neigh_entry->key.n->flags |= NTF_OFFLOADED;
2751 	else
2752 		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2753 }
2754 
2755 void
mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2756 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2757 				    struct mlxsw_sp_neigh_entry *neigh_entry,
2758 				    bool adding)
2759 {
2760 	if (adding)
2761 		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2762 	else
2763 		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2764 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2765 }
2766 
2767 struct mlxsw_sp_netevent_work {
2768 	struct work_struct work;
2769 	struct mlxsw_sp *mlxsw_sp;
2770 	struct neighbour *n;
2771 };
2772 
mlxsw_sp_router_neigh_event_work(struct work_struct * work)2773 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2774 {
2775 	struct mlxsw_sp_netevent_work *net_work =
2776 		container_of(work, struct mlxsw_sp_netevent_work, work);
2777 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2778 	struct mlxsw_sp_neigh_entry *neigh_entry;
2779 	struct neighbour *n = net_work->n;
2780 	unsigned char ha[ETH_ALEN];
2781 	bool entry_connected;
2782 	u8 nud_state, dead;
2783 
2784 	/* If these parameters are changed after we release the lock,
2785 	 * then we are guaranteed to receive another event letting us
2786 	 * know about it.
2787 	 */
2788 	read_lock_bh(&n->lock);
2789 	memcpy(ha, n->ha, ETH_ALEN);
2790 	nud_state = n->nud_state;
2791 	dead = n->dead;
2792 	read_unlock_bh(&n->lock);
2793 
2794 	mutex_lock(&mlxsw_sp->router->lock);
2795 	mlxsw_sp_span_respin(mlxsw_sp);
2796 
2797 	entry_connected = nud_state & NUD_VALID && !dead;
2798 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2799 	if (!entry_connected && !neigh_entry)
2800 		goto out;
2801 	if (!neigh_entry) {
2802 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2803 		if (IS_ERR(neigh_entry))
2804 			goto out;
2805 	}
2806 
2807 	if (neigh_entry->connected && entry_connected &&
2808 	    !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2809 		goto out;
2810 
2811 	memcpy(neigh_entry->ha, ha, ETH_ALEN);
2812 	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2813 	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2814 				      dead);
2815 
2816 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2817 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2818 
2819 out:
2820 	mutex_unlock(&mlxsw_sp->router->lock);
2821 	neigh_release(n);
2822 	kfree(net_work);
2823 }
2824 
2825 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2826 
mlxsw_sp_router_mp_hash_event_work(struct work_struct * work)2827 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2828 {
2829 	struct mlxsw_sp_netevent_work *net_work =
2830 		container_of(work, struct mlxsw_sp_netevent_work, work);
2831 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2832 
2833 	mlxsw_sp_mp_hash_init(mlxsw_sp);
2834 	kfree(net_work);
2835 }
2836 
2837 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2838 
mlxsw_sp_router_update_priority_work(struct work_struct * work)2839 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2840 {
2841 	struct mlxsw_sp_netevent_work *net_work =
2842 		container_of(work, struct mlxsw_sp_netevent_work, work);
2843 	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2844 
2845 	__mlxsw_sp_router_init(mlxsw_sp);
2846 	kfree(net_work);
2847 }
2848 
mlxsw_sp_router_schedule_work(struct net * net,struct mlxsw_sp_router * router,struct neighbour * n,void (* cb)(struct work_struct *))2849 static int mlxsw_sp_router_schedule_work(struct net *net,
2850 					 struct mlxsw_sp_router *router,
2851 					 struct neighbour *n,
2852 					 void (*cb)(struct work_struct *))
2853 {
2854 	struct mlxsw_sp_netevent_work *net_work;
2855 
2856 	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2857 		return NOTIFY_DONE;
2858 
2859 	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2860 	if (!net_work)
2861 		return NOTIFY_BAD;
2862 
2863 	/* Take a reference to ensure the neighbour won't be destructed until
2864 	 * we drop the reference in the work item.
2865 	 */
2866 	neigh_clone(n);
2867 
2868 	INIT_WORK(&net_work->work, cb);
2869 	net_work->mlxsw_sp = router->mlxsw_sp;
2870 	net_work->n = n;
2871 	mlxsw_core_schedule_work(&net_work->work);
2872 	return NOTIFY_DONE;
2873 }
2874 
mlxsw_sp_dev_lower_is_port(struct net_device * dev)2875 static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
2876 {
2877 	struct mlxsw_sp_port *mlxsw_sp_port;
2878 
2879 	rcu_read_lock();
2880 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2881 	rcu_read_unlock();
2882 	return !!mlxsw_sp_port;
2883 }
2884 
mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router * router,struct neighbour * n)2885 static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
2886 					       struct neighbour *n)
2887 {
2888 	struct net *net;
2889 
2890 	net = neigh_parms_net(n->parms);
2891 	return mlxsw_sp_router_schedule_work(net, router, n,
2892 					     mlxsw_sp_router_neigh_event_work);
2893 }
2894 
mlxsw_sp_router_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)2895 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2896 					  unsigned long event, void *ptr)
2897 {
2898 	struct mlxsw_sp_router *router;
2899 	unsigned long interval;
2900 	struct neigh_parms *p;
2901 	struct neighbour *n;
2902 
2903 	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2904 
2905 	switch (event) {
2906 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2907 		p = ptr;
2908 
2909 		/* We don't care about changes in the default table. */
2910 		if (!p->dev || (p->tbl->family != AF_INET &&
2911 				p->tbl->family != AF_INET6))
2912 			return NOTIFY_DONE;
2913 
2914 		/* We are in atomic context and can't take RTNL mutex,
2915 		 * so use RCU variant to walk the device chain.
2916 		 */
2917 		if (!mlxsw_sp_dev_lower_is_port(p->dev))
2918 			return NOTIFY_DONE;
2919 
2920 		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2921 		router->neighs_update.interval = interval;
2922 		break;
2923 	case NETEVENT_NEIGH_UPDATE:
2924 		n = ptr;
2925 
2926 		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2927 			return NOTIFY_DONE;
2928 
2929 		if (!mlxsw_sp_dev_lower_is_port(n->dev))
2930 			return NOTIFY_DONE;
2931 
2932 		return mlxsw_sp_router_schedule_neigh_work(router, n);
2933 
2934 	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2935 	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2936 		return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2937 				mlxsw_sp_router_mp_hash_event_work);
2938 
2939 	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2940 		return mlxsw_sp_router_schedule_work(ptr, router, NULL,
2941 				mlxsw_sp_router_update_priority_work);
2942 	}
2943 
2944 	return NOTIFY_DONE;
2945 }
2946 
mlxsw_sp_neigh_init(struct mlxsw_sp * mlxsw_sp)2947 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2948 {
2949 	int err;
2950 
2951 	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2952 			      &mlxsw_sp_neigh_ht_params);
2953 	if (err)
2954 		return err;
2955 
2956 	/* Initialize the polling interval according to the default
2957 	 * table.
2958 	 */
2959 	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2960 
2961 	/* Create the delayed works for the activity_update */
2962 	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2963 			  mlxsw_sp_router_neighs_update_work);
2964 	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2965 			  mlxsw_sp_router_probe_unresolved_nexthops);
2966 	atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2967 	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2968 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2969 	return 0;
2970 }
2971 
mlxsw_sp_neigh_fini(struct mlxsw_sp * mlxsw_sp)2972 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2973 {
2974 	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2975 	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2976 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2977 }
2978 
mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)2979 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2980 					 struct mlxsw_sp_rif *rif)
2981 {
2982 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2983 
2984 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2985 				 rif_list_node) {
2986 		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2987 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2988 	}
2989 }
2990 
2991 struct mlxsw_sp_neigh_rif_made_sync {
2992 	struct mlxsw_sp *mlxsw_sp;
2993 	struct mlxsw_sp_rif *rif;
2994 	int err;
2995 };
2996 
mlxsw_sp_neigh_rif_made_sync_each(struct neighbour * n,void * data)2997 static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data)
2998 {
2999 	struct mlxsw_sp_neigh_rif_made_sync *rms = data;
3000 	int rc;
3001 
3002 	if (rms->err)
3003 		return;
3004 	if (n->dev != mlxsw_sp_rif_dev(rms->rif))
3005 		return;
3006 	rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n);
3007 	if (rc != NOTIFY_DONE)
3008 		rms->err = -ENOMEM;
3009 }
3010 
mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)3011 static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
3012 					struct mlxsw_sp_rif *rif)
3013 {
3014 	struct mlxsw_sp_neigh_rif_made_sync rms = {
3015 		.mlxsw_sp = mlxsw_sp,
3016 		.rif = rif,
3017 	};
3018 
3019 	if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
3020 		return 0;
3021 
3022 	neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3023 	if (rms.err)
3024 		goto err_arp;
3025 
3026 #if IS_ENABLED(CONFIG_IPV6)
3027 	neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
3028 #endif
3029 	if (rms.err)
3030 		goto err_nd;
3031 
3032 	return 0;
3033 
3034 err_nd:
3035 err_arp:
3036 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
3037 	return rms.err;
3038 }
3039 
3040 enum mlxsw_sp_nexthop_type {
3041 	MLXSW_SP_NEXTHOP_TYPE_ETH,
3042 	MLXSW_SP_NEXTHOP_TYPE_IPIP,
3043 };
3044 
3045 enum mlxsw_sp_nexthop_action {
3046 	/* Nexthop forwards packets to an egress RIF */
3047 	MLXSW_SP_NEXTHOP_ACTION_FORWARD,
3048 	/* Nexthop discards packets */
3049 	MLXSW_SP_NEXTHOP_ACTION_DISCARD,
3050 	/* Nexthop traps packets */
3051 	MLXSW_SP_NEXTHOP_ACTION_TRAP,
3052 };
3053 
3054 struct mlxsw_sp_nexthop_key {
3055 	struct fib_nh *fib_nh;
3056 };
3057 
3058 struct mlxsw_sp_nexthop_counter;
3059 
3060 struct mlxsw_sp_nexthop {
3061 	struct list_head neigh_list_node; /* member of neigh entry list */
3062 	struct list_head crif_list_node;
3063 	struct list_head router_list_node;
3064 	struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
3065 						   * this nexthop belongs to
3066 						   */
3067 	struct rhash_head ht_node;
3068 	struct neigh_table *neigh_tbl;
3069 	struct mlxsw_sp_nexthop_key key;
3070 	unsigned char gw_addr[sizeof(struct in6_addr)];
3071 	int ifindex;
3072 	int nh_weight;
3073 	int norm_nh_weight;
3074 	int num_adj_entries;
3075 	struct mlxsw_sp_crif *crif;
3076 	u8 should_offload:1, /* set indicates this nexthop should be written
3077 			      * to the adjacency table.
3078 			      */
3079 	   offloaded:1, /* set indicates this nexthop was written to the
3080 			 * adjacency table.
3081 			 */
3082 	   update:1; /* set indicates this nexthop should be updated in the
3083 		      * adjacency table (f.e., its MAC changed).
3084 		      */
3085 	enum mlxsw_sp_nexthop_action action;
3086 	enum mlxsw_sp_nexthop_type type;
3087 	union {
3088 		struct mlxsw_sp_neigh_entry *neigh_entry;
3089 		struct mlxsw_sp_ipip_entry *ipip_entry;
3090 	};
3091 	struct mlxsw_sp_nexthop_counter *counter;
3092 	u32 id;		/* NH ID for members of a NH object group. */
3093 };
3094 
3095 static struct net_device *
mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop * nh)3096 mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
3097 {
3098 	if (!nh->crif)
3099 		return NULL;
3100 	return nh->crif->key.dev;
3101 }
3102 
3103 enum mlxsw_sp_nexthop_group_type {
3104 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3105 	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3106 	MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3107 };
3108 
3109 struct mlxsw_sp_nexthop_group_info {
3110 	struct mlxsw_sp_nexthop_group *nh_grp;
3111 	u32 adj_index;
3112 	u16 ecmp_size;
3113 	u16 count;
3114 	int sum_norm_weight;
3115 	u8 adj_index_valid:1,
3116 	   gateway:1, /* routes using the group use a gateway */
3117 	   is_resilient:1,
3118 	   hw_stats:1;
3119 	struct list_head list; /* member in nh_res_grp_list */
3120 	struct xarray nexthop_counters;
3121 	struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
3122 };
3123 
3124 static struct mlxsw_sp_rif *
mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info * nhgi)3125 mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
3126 {
3127 	struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
3128 
3129 	if (!crif)
3130 		return NULL;
3131 	return crif->rif;
3132 }
3133 
3134 struct mlxsw_sp_nexthop_group_vr_key {
3135 	u16 vr_id;
3136 	enum mlxsw_sp_l3proto proto;
3137 };
3138 
3139 struct mlxsw_sp_nexthop_group_vr_entry {
3140 	struct list_head list; /* member in vr_list */
3141 	struct rhash_head ht_node; /* member in vr_ht */
3142 	refcount_t ref_count;
3143 	struct mlxsw_sp_nexthop_group_vr_key key;
3144 };
3145 
3146 struct mlxsw_sp_nexthop_group {
3147 	struct rhash_head ht_node;
3148 	struct list_head fib_list; /* list of fib entries that use this group */
3149 	union {
3150 		struct {
3151 			struct fib_info *fi;
3152 		} ipv4;
3153 		struct {
3154 			u32 id;
3155 		} obj;
3156 	};
3157 	struct mlxsw_sp_nexthop_group_info *nhgi;
3158 	struct list_head vr_list;
3159 	struct rhashtable vr_ht;
3160 	enum mlxsw_sp_nexthop_group_type type;
3161 	bool can_destroy;
3162 };
3163 
3164 struct mlxsw_sp_nexthop_counter {
3165 	unsigned int counter_index;
3166 	refcount_t ref_count;
3167 };
3168 
3169 static struct mlxsw_sp_nexthop_counter *
mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp * mlxsw_sp)3170 mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp)
3171 {
3172 	struct mlxsw_sp_nexthop_counter *nhct;
3173 	int err;
3174 
3175 	nhct = kzalloc(sizeof(*nhct), GFP_KERNEL);
3176 	if (!nhct)
3177 		return ERR_PTR(-ENOMEM);
3178 
3179 	err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nhct->counter_index);
3180 	if (err)
3181 		goto err_counter_alloc;
3182 
3183 	refcount_set(&nhct->ref_count, 1);
3184 	return nhct;
3185 
3186 err_counter_alloc:
3187 	kfree(nhct);
3188 	return ERR_PTR(err);
3189 }
3190 
3191 static void
mlxsw_sp_nexthop_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_counter * nhct)3192 mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3193 			      struct mlxsw_sp_nexthop_counter *nhct)
3194 {
3195 	mlxsw_sp_flow_counter_free(mlxsw_sp, nhct->counter_index);
3196 	kfree(nhct);
3197 }
3198 
3199 static struct mlxsw_sp_nexthop_counter *
mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3200 mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
3201 				struct mlxsw_sp_nexthop *nh)
3202 {
3203 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3204 	struct mlxsw_sp_nexthop_counter *nhct;
3205 	int err;
3206 
3207 	nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
3208 	if (nhct) {
3209 		refcount_inc(&nhct->ref_count);
3210 		return nhct;
3211 	}
3212 
3213 	nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
3214 	if (IS_ERR(nhct))
3215 		return nhct;
3216 
3217 	err = xa_err(xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
3218 			      GFP_KERNEL));
3219 	if (err)
3220 		goto err_store;
3221 
3222 	return nhct;
3223 
3224 err_store:
3225 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
3226 	return ERR_PTR(err);
3227 }
3228 
mlxsw_sp_nexthop_sh_counter_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3229 static void mlxsw_sp_nexthop_sh_counter_put(struct mlxsw_sp *mlxsw_sp,
3230 					    struct mlxsw_sp_nexthop *nh)
3231 {
3232 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3233 	struct mlxsw_sp_nexthop_counter *nhct;
3234 
3235 	nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
3236 	if (WARN_ON(!nhct))
3237 		return;
3238 
3239 	if (!refcount_dec_and_test(&nhct->ref_count))
3240 		return;
3241 
3242 	xa_erase(&nh_grp->nhgi->nexthop_counters, nh->id);
3243 	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
3244 }
3245 
mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3246 int mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp *mlxsw_sp,
3247 				    struct mlxsw_sp_nexthop *nh)
3248 {
3249 	const char *table_adj = MLXSW_SP_DPIPE_TABLE_NAME_ADJ;
3250 	struct mlxsw_sp_nexthop_counter *nhct;
3251 	struct devlink *devlink;
3252 	bool dpipe_stats;
3253 
3254 	if (nh->counter)
3255 		return 0;
3256 
3257 	devlink = priv_to_devlink(mlxsw_sp->core);
3258 	dpipe_stats = devlink_dpipe_table_counter_enabled(devlink, table_adj);
3259 	if (!(nh->nhgi->hw_stats || dpipe_stats))
3260 		return 0;
3261 
3262 	if (nh->id)
3263 		nhct = mlxsw_sp_nexthop_sh_counter_get(mlxsw_sp, nh);
3264 	else
3265 		nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
3266 	if (IS_ERR(nhct))
3267 		return PTR_ERR(nhct);
3268 
3269 	nh->counter = nhct;
3270 	return 0;
3271 }
3272 
mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3273 void mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp *mlxsw_sp,
3274 				      struct mlxsw_sp_nexthop *nh)
3275 {
3276 	if (!nh->counter)
3277 		return;
3278 
3279 	if (nh->id)
3280 		mlxsw_sp_nexthop_sh_counter_put(mlxsw_sp, nh);
3281 	else
3282 		mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh->counter);
3283 	nh->counter = NULL;
3284 }
3285 
mlxsw_sp_nexthop_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3286 static int mlxsw_sp_nexthop_counter_update(struct mlxsw_sp *mlxsw_sp,
3287 					   struct mlxsw_sp_nexthop *nh)
3288 {
3289 	if (nh->nhgi->hw_stats)
3290 		return mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
3291 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
3292 	return 0;
3293 }
3294 
mlxsw_sp_nexthop_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,u64 * p_counter)3295 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3296 				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3297 {
3298 	if (!nh->counter)
3299 		return -EINVAL;
3300 
3301 	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter->counter_index,
3302 					 true, p_counter, NULL);
3303 }
3304 
mlxsw_sp_nexthop_next(struct mlxsw_sp_router * router,struct mlxsw_sp_nexthop * nh)3305 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3306 					       struct mlxsw_sp_nexthop *nh)
3307 {
3308 	if (!nh) {
3309 		if (list_empty(&router->nexthop_list))
3310 			return NULL;
3311 		else
3312 			return list_first_entry(&router->nexthop_list,
3313 						typeof(*nh), router_list_node);
3314 	}
3315 	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3316 		return NULL;
3317 	return list_next_entry(nh, router_list_node);
3318 }
3319 
mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop * nh)3320 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3321 {
3322 	return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3323 }
3324 
mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop * nh)3325 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3326 {
3327 	if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3328 	    !mlxsw_sp_nexthop_is_forward(nh))
3329 		return NULL;
3330 	return nh->neigh_entry->ha;
3331 }
3332 
mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop * nh,u32 * p_adj_index,u32 * p_adj_size,u32 * p_adj_hash_index)3333 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3334 			     u32 *p_adj_size, u32 *p_adj_hash_index)
3335 {
3336 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3337 	u32 adj_hash_index = 0;
3338 	int i;
3339 
3340 	if (!nh->offloaded || !nhgi->adj_index_valid)
3341 		return -EINVAL;
3342 
3343 	*p_adj_index = nhgi->adj_index;
3344 	*p_adj_size = nhgi->ecmp_size;
3345 
3346 	for (i = 0; i < nhgi->count; i++) {
3347 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3348 
3349 		if (nh_iter == nh)
3350 			break;
3351 		if (nh_iter->offloaded)
3352 			adj_hash_index += nh_iter->num_adj_entries;
3353 	}
3354 
3355 	*p_adj_hash_index = adj_hash_index;
3356 	return 0;
3357 }
3358 
mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop * nh)3359 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3360 {
3361 	if (WARN_ON(!nh->crif))
3362 		return NULL;
3363 	return nh->crif->rif;
3364 }
3365 
mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop * nh)3366 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3367 {
3368 	struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3369 	int i;
3370 
3371 	for (i = 0; i < nhgi->count; i++) {
3372 		struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3373 
3374 		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3375 			return true;
3376 	}
3377 	return false;
3378 }
3379 
3380 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3381 	.key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3382 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3383 	.key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3384 	.automatic_shrinking = true,
3385 };
3386 
3387 static struct mlxsw_sp_nexthop_group_vr_entry *
mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3388 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3389 				       const struct mlxsw_sp_fib *fib)
3390 {
3391 	struct mlxsw_sp_nexthop_group_vr_key key;
3392 
3393 	memset(&key, 0, sizeof(key));
3394 	key.vr_id = fib->vr->id;
3395 	key.proto = fib->proto;
3396 	return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3397 				      mlxsw_sp_nexthop_group_vr_ht_params);
3398 }
3399 
3400 static int
mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3401 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3402 				       const struct mlxsw_sp_fib *fib)
3403 {
3404 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3405 	int err;
3406 
3407 	vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3408 	if (!vr_entry)
3409 		return -ENOMEM;
3410 
3411 	vr_entry->key.vr_id = fib->vr->id;
3412 	vr_entry->key.proto = fib->proto;
3413 	refcount_set(&vr_entry->ref_count, 1);
3414 
3415 	err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3416 				     mlxsw_sp_nexthop_group_vr_ht_params);
3417 	if (err)
3418 		goto err_hashtable_insert;
3419 
3420 	list_add(&vr_entry->list, &nh_grp->vr_list);
3421 
3422 	return 0;
3423 
3424 err_hashtable_insert:
3425 	kfree(vr_entry);
3426 	return err;
3427 }
3428 
3429 static void
mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group_vr_entry * vr_entry)3430 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3431 					struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3432 {
3433 	list_del(&vr_entry->list);
3434 	rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3435 			       mlxsw_sp_nexthop_group_vr_ht_params);
3436 	kfree(vr_entry);
3437 }
3438 
3439 static int
mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3440 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3441 			       const struct mlxsw_sp_fib *fib)
3442 {
3443 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3444 
3445 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3446 	if (vr_entry) {
3447 		refcount_inc(&vr_entry->ref_count);
3448 		return 0;
3449 	}
3450 
3451 	return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3452 }
3453 
3454 static void
mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3455 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3456 				 const struct mlxsw_sp_fib *fib)
3457 {
3458 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3459 
3460 	vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3461 	if (WARN_ON_ONCE(!vr_entry))
3462 		return;
3463 
3464 	if (!refcount_dec_and_test(&vr_entry->ref_count))
3465 		return;
3466 
3467 	mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3468 }
3469 
3470 struct mlxsw_sp_nexthop_group_cmp_arg {
3471 	enum mlxsw_sp_nexthop_group_type type;
3472 	union {
3473 		struct fib_info *fi;
3474 		struct mlxsw_sp_fib6_entry *fib6_entry;
3475 		u32 id;
3476 	};
3477 };
3478 
3479 static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group * nh_grp,const struct in6_addr * gw,int ifindex,int weight)3480 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3481 				    const struct in6_addr *gw, int ifindex,
3482 				    int weight)
3483 {
3484 	int i;
3485 
3486 	for (i = 0; i < nh_grp->nhgi->count; i++) {
3487 		const struct mlxsw_sp_nexthop *nh;
3488 
3489 		nh = &nh_grp->nhgi->nexthops[i];
3490 		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3491 		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3492 			return true;
3493 	}
3494 
3495 	return false;
3496 }
3497 
3498 static bool
mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib6_entry * fib6_entry)3499 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3500 			    const struct mlxsw_sp_fib6_entry *fib6_entry)
3501 {
3502 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3503 
3504 	if (nh_grp->nhgi->count != fib6_entry->nrt6)
3505 		return false;
3506 
3507 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3508 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3509 		struct in6_addr *gw;
3510 		int ifindex, weight;
3511 
3512 		ifindex = fib6_nh->fib_nh_dev->ifindex;
3513 		weight = fib6_nh->fib_nh_weight;
3514 		gw = &fib6_nh->fib_nh_gw6;
3515 		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3516 							 weight))
3517 			return false;
3518 	}
3519 
3520 	return true;
3521 }
3522 
3523 static int
mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg * arg,const void * ptr)3524 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3525 {
3526 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3527 	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3528 
3529 	if (nh_grp->type != cmp_arg->type)
3530 		return 1;
3531 
3532 	switch (cmp_arg->type) {
3533 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3534 		return cmp_arg->fi != nh_grp->ipv4.fi;
3535 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3536 		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3537 						    cmp_arg->fib6_entry);
3538 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3539 		return cmp_arg->id != nh_grp->obj.id;
3540 	default:
3541 		WARN_ON(1);
3542 		return 1;
3543 	}
3544 }
3545 
mlxsw_sp_nexthop_group_hash_obj(const void * data,u32 len,u32 seed)3546 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3547 {
3548 	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3549 	const struct mlxsw_sp_nexthop *nh;
3550 	struct fib_info *fi;
3551 	unsigned int val;
3552 	int i;
3553 
3554 	switch (nh_grp->type) {
3555 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3556 		fi = nh_grp->ipv4.fi;
3557 		return jhash(&fi, sizeof(fi), seed);
3558 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3559 		val = nh_grp->nhgi->count;
3560 		for (i = 0; i < nh_grp->nhgi->count; i++) {
3561 			nh = &nh_grp->nhgi->nexthops[i];
3562 			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3563 			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3564 		}
3565 		return jhash(&val, sizeof(val), seed);
3566 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3567 		return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3568 	default:
3569 		WARN_ON(1);
3570 		return 0;
3571 	}
3572 }
3573 
3574 static u32
mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry * fib6_entry,u32 seed)3575 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3576 {
3577 	unsigned int val = fib6_entry->nrt6;
3578 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3579 
3580 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3581 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3582 		struct net_device *dev = fib6_nh->fib_nh_dev;
3583 		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3584 
3585 		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3586 		val ^= jhash(gw, sizeof(*gw), seed);
3587 	}
3588 
3589 	return jhash(&val, sizeof(val), seed);
3590 }
3591 
3592 static u32
mlxsw_sp_nexthop_group_hash(const void * data,u32 len,u32 seed)3593 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3594 {
3595 	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3596 
3597 	switch (cmp_arg->type) {
3598 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3599 		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3600 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3601 		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3602 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3603 		return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3604 	default:
3605 		WARN_ON(1);
3606 		return 0;
3607 	}
3608 }
3609 
3610 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3611 	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3612 	.hashfn	     = mlxsw_sp_nexthop_group_hash,
3613 	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
3614 	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3615 };
3616 
mlxsw_sp_nexthop_group_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3617 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3618 					 struct mlxsw_sp_nexthop_group *nh_grp)
3619 {
3620 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3621 	    !nh_grp->nhgi->gateway)
3622 		return 0;
3623 
3624 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3625 				      &nh_grp->ht_node,
3626 				      mlxsw_sp_nexthop_group_ht_params);
3627 }
3628 
mlxsw_sp_nexthop_group_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3629 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3630 					  struct mlxsw_sp_nexthop_group *nh_grp)
3631 {
3632 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3633 	    !nh_grp->nhgi->gateway)
3634 		return;
3635 
3636 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3637 			       &nh_grp->ht_node,
3638 			       mlxsw_sp_nexthop_group_ht_params);
3639 }
3640 
3641 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)3642 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3643 			       struct fib_info *fi)
3644 {
3645 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3646 
3647 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3648 	cmp_arg.fi = fi;
3649 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3650 				      &cmp_arg,
3651 				      mlxsw_sp_nexthop_group_ht_params);
3652 }
3653 
3654 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)3655 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3656 			       struct mlxsw_sp_fib6_entry *fib6_entry)
3657 {
3658 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3659 
3660 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3661 	cmp_arg.fib6_entry = fib6_entry;
3662 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3663 				      &cmp_arg,
3664 				      mlxsw_sp_nexthop_group_ht_params);
3665 }
3666 
3667 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3668 	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3669 	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3670 	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
3671 };
3672 
mlxsw_sp_nexthop_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3673 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3674 				   struct mlxsw_sp_nexthop *nh)
3675 {
3676 	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3677 				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3678 }
3679 
mlxsw_sp_nexthop_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3680 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3681 				    struct mlxsw_sp_nexthop *nh)
3682 {
3683 	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3684 			       mlxsw_sp_nexthop_ht_params);
3685 }
3686 
3687 static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_key key)3688 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3689 			struct mlxsw_sp_nexthop_key key)
3690 {
3691 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3692 				      mlxsw_sp_nexthop_ht_params);
3693 }
3694 
mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto proto,u16 vr_id,u32 adj_index,u16 ecmp_size,u32 new_adj_index,u16 new_ecmp_size)3695 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3696 					     enum mlxsw_sp_l3proto proto,
3697 					     u16 vr_id,
3698 					     u32 adj_index, u16 ecmp_size,
3699 					     u32 new_adj_index,
3700 					     u16 new_ecmp_size)
3701 {
3702 	char raleu_pl[MLXSW_REG_RALEU_LEN];
3703 
3704 	mlxsw_reg_raleu_pack(raleu_pl,
3705 			     (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3706 			     adj_index, ecmp_size, new_adj_index,
3707 			     new_ecmp_size);
3708 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3709 }
3710 
mlxsw_sp_adj_index_mass_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,u32 old_adj_index,u16 old_ecmp_size)3711 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3712 					  struct mlxsw_sp_nexthop_group *nh_grp,
3713 					  u32 old_adj_index, u16 old_ecmp_size)
3714 {
3715 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3716 	struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3717 	int err;
3718 
3719 	list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3720 		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3721 							vr_entry->key.proto,
3722 							vr_entry->key.vr_id,
3723 							old_adj_index,
3724 							old_ecmp_size,
3725 							nhgi->adj_index,
3726 							nhgi->ecmp_size);
3727 		if (err)
3728 			goto err_mass_update_vr;
3729 	}
3730 	return 0;
3731 
3732 err_mass_update_vr:
3733 	list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3734 		mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3735 						  vr_entry->key.vr_id,
3736 						  nhgi->adj_index,
3737 						  nhgi->ecmp_size,
3738 						  old_adj_index, old_ecmp_size);
3739 	return err;
3740 }
3741 
__mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3742 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3743 					 u32 adj_index,
3744 					 struct mlxsw_sp_nexthop *nh,
3745 					 bool force, char *ratr_pl)
3746 {
3747 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3748 	struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
3749 	enum mlxsw_reg_ratr_op op;
3750 	u16 rif_index;
3751 
3752 	rif_index = rif ? rif->rif_index :
3753 			  mlxsw_sp->router->lb_crif->rif->rif_index;
3754 	op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3755 		     MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3756 	mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3757 			    adj_index, rif_index);
3758 	switch (nh->action) {
3759 	case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3760 		mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3761 		break;
3762 	case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3763 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3764 					       MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3765 		break;
3766 	case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3767 		mlxsw_reg_ratr_trap_action_set(ratr_pl,
3768 					       MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3769 		mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3770 		break;
3771 	default:
3772 		WARN_ON_ONCE(1);
3773 		return -EINVAL;
3774 	}
3775 	if (nh->counter)
3776 		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter->counter_index,
3777 					    true);
3778 	else
3779 		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3780 
3781 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3782 }
3783 
mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3784 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3785 				struct mlxsw_sp_nexthop *nh, bool force,
3786 				char *ratr_pl)
3787 {
3788 	int i;
3789 
3790 	for (i = 0; i < nh->num_adj_entries; i++) {
3791 		int err;
3792 
3793 		err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3794 						    nh, force, ratr_pl);
3795 		if (err)
3796 			return err;
3797 	}
3798 
3799 	return 0;
3800 }
3801 
__mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3802 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3803 					  u32 adj_index,
3804 					  struct mlxsw_sp_nexthop *nh,
3805 					  bool force, char *ratr_pl)
3806 {
3807 	const struct mlxsw_sp_ipip_ops *ipip_ops;
3808 
3809 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3810 	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3811 					force, ratr_pl);
3812 }
3813 
mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3814 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3815 					u32 adj_index,
3816 					struct mlxsw_sp_nexthop *nh, bool force,
3817 					char *ratr_pl)
3818 {
3819 	int i;
3820 
3821 	for (i = 0; i < nh->num_adj_entries; i++) {
3822 		int err;
3823 
3824 		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3825 						     nh, force, ratr_pl);
3826 		if (err)
3827 			return err;
3828 	}
3829 
3830 	return 0;
3831 }
3832 
mlxsw_sp_nexthop_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3833 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3834 				   struct mlxsw_sp_nexthop *nh, bool force,
3835 				   char *ratr_pl)
3836 {
3837 	/* When action is discard or trap, the nexthop must be
3838 	 * programmed as an Ethernet nexthop.
3839 	 */
3840 	if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3841 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3842 	    nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3843 		return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3844 						   force, ratr_pl);
3845 	else
3846 		return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3847 						    force, ratr_pl);
3848 }
3849 
3850 static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,bool reallocate)3851 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3852 			      struct mlxsw_sp_nexthop_group_info *nhgi,
3853 			      bool reallocate)
3854 {
3855 	char ratr_pl[MLXSW_REG_RATR_LEN];
3856 	u32 adj_index = nhgi->adj_index; /* base */
3857 	struct mlxsw_sp_nexthop *nh;
3858 	int i;
3859 
3860 	for (i = 0; i < nhgi->count; i++) {
3861 		nh = &nhgi->nexthops[i];
3862 
3863 		if (!nh->should_offload) {
3864 			mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
3865 			nh->offloaded = 0;
3866 			continue;
3867 		}
3868 
3869 		if (nh->update || reallocate) {
3870 			int err = 0;
3871 
3872 			err = mlxsw_sp_nexthop_counter_update(mlxsw_sp, nh);
3873 			if (err)
3874 				return err;
3875 
3876 			err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3877 						      true, ratr_pl);
3878 			if (err)
3879 				return err;
3880 			nh->update = 0;
3881 			nh->offloaded = 1;
3882 		}
3883 		adj_index += nh->num_adj_entries;
3884 	}
3885 	return 0;
3886 }
3887 
3888 static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3889 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3890 				    struct mlxsw_sp_nexthop_group *nh_grp)
3891 {
3892 	struct mlxsw_sp_fib_entry *fib_entry;
3893 	int err;
3894 
3895 	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3896 		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3897 		if (err)
3898 			return err;
3899 	}
3900 	return 0;
3901 }
3902 
3903 struct mlxsw_sp_adj_grp_size_range {
3904 	u16 start; /* Inclusive */
3905 	u16 end; /* Inclusive */
3906 };
3907 
3908 /* Ordered by range start value */
3909 static const struct mlxsw_sp_adj_grp_size_range
3910 mlxsw_sp1_adj_grp_size_ranges[] = {
3911 	{ .start = 1, .end = 64 },
3912 	{ .start = 512, .end = 512 },
3913 	{ .start = 1024, .end = 1024 },
3914 	{ .start = 2048, .end = 2048 },
3915 	{ .start = 4096, .end = 4096 },
3916 };
3917 
3918 /* Ordered by range start value */
3919 static const struct mlxsw_sp_adj_grp_size_range
3920 mlxsw_sp2_adj_grp_size_ranges[] = {
3921 	{ .start = 1, .end = 128 },
3922 	{ .start = 256, .end = 256 },
3923 	{ .start = 512, .end = 512 },
3924 	{ .start = 1024, .end = 1024 },
3925 	{ .start = 2048, .end = 2048 },
3926 	{ .start = 4096, .end = 4096 },
3927 };
3928 
mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3929 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3930 					   u16 *p_adj_grp_size)
3931 {
3932 	int i;
3933 
3934 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3935 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3936 
3937 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3938 
3939 		if (*p_adj_grp_size >= size_range->start &&
3940 		    *p_adj_grp_size <= size_range->end)
3941 			return;
3942 
3943 		if (*p_adj_grp_size <= size_range->end) {
3944 			*p_adj_grp_size = size_range->end;
3945 			return;
3946 		}
3947 	}
3948 }
3949 
mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size,unsigned int alloc_size)3950 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3951 					     u16 *p_adj_grp_size,
3952 					     unsigned int alloc_size)
3953 {
3954 	int i;
3955 
3956 	for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3957 		const struct mlxsw_sp_adj_grp_size_range *size_range;
3958 
3959 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3960 
3961 		if (alloc_size >= size_range->end) {
3962 			*p_adj_grp_size = size_range->end;
3963 			return;
3964 		}
3965 	}
3966 }
3967 
mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3968 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3969 				     u16 *p_adj_grp_size)
3970 {
3971 	unsigned int alloc_size;
3972 	int err;
3973 
3974 	/* Round up the requested group size to the next size supported
3975 	 * by the device and make sure the request can be satisfied.
3976 	 */
3977 	mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3978 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3979 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3980 					      *p_adj_grp_size, &alloc_size);
3981 	if (err)
3982 		return err;
3983 	/* It is possible the allocation results in more allocated
3984 	 * entries than requested. Try to use as much of them as
3985 	 * possible.
3986 	 */
3987 	mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3988 
3989 	return 0;
3990 }
3991 
3992 static void
mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info * nhgi)3993 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3994 {
3995 	int i, g = 0, sum_norm_weight = 0;
3996 	struct mlxsw_sp_nexthop *nh;
3997 
3998 	for (i = 0; i < nhgi->count; i++) {
3999 		nh = &nhgi->nexthops[i];
4000 
4001 		if (!nh->should_offload)
4002 			continue;
4003 		if (g > 0)
4004 			g = gcd(nh->nh_weight, g);
4005 		else
4006 			g = nh->nh_weight;
4007 	}
4008 
4009 	for (i = 0; i < nhgi->count; i++) {
4010 		nh = &nhgi->nexthops[i];
4011 
4012 		if (!nh->should_offload)
4013 			continue;
4014 		nh->norm_nh_weight = nh->nh_weight / g;
4015 		sum_norm_weight += nh->norm_nh_weight;
4016 	}
4017 
4018 	nhgi->sum_norm_weight = sum_norm_weight;
4019 }
4020 
4021 static void
mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info * nhgi)4022 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
4023 {
4024 	int i, weight = 0, lower_bound = 0;
4025 	int total = nhgi->sum_norm_weight;
4026 	u16 ecmp_size = nhgi->ecmp_size;
4027 
4028 	for (i = 0; i < nhgi->count; i++) {
4029 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4030 		int upper_bound;
4031 
4032 		if (!nh->should_offload)
4033 			continue;
4034 		weight += nh->norm_nh_weight;
4035 		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
4036 		nh->num_adj_entries = upper_bound - lower_bound;
4037 		lower_bound = upper_bound;
4038 	}
4039 }
4040 
4041 static struct mlxsw_sp_nexthop *
4042 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4043 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
4044 
4045 static void
mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4046 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4047 					struct mlxsw_sp_nexthop_group *nh_grp)
4048 {
4049 	int i;
4050 
4051 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4052 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4053 
4054 		if (nh->offloaded)
4055 			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4056 		else
4057 			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4058 	}
4059 }
4060 
4061 static void
__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)4062 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
4063 					  struct mlxsw_sp_fib6_entry *fib6_entry)
4064 {
4065 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4066 
4067 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4068 		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
4069 		struct mlxsw_sp_nexthop *nh;
4070 
4071 		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
4072 		if (nh && nh->offloaded)
4073 			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
4074 		else
4075 			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
4076 	}
4077 }
4078 
4079 static void
mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4080 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4081 					struct mlxsw_sp_nexthop_group *nh_grp)
4082 {
4083 	struct mlxsw_sp_fib6_entry *fib6_entry;
4084 
4085 	/* Unfortunately, in IPv6 the route and the nexthop are described by
4086 	 * the same struct, so we need to iterate over all the routes using the
4087 	 * nexthop group and set / clear the offload indication for them.
4088 	 */
4089 	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
4090 			    common.nexthop_group_node)
4091 		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
4092 }
4093 
4094 static void
mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop * nh,u16 bucket_index)4095 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4096 					const struct mlxsw_sp_nexthop *nh,
4097 					u16 bucket_index)
4098 {
4099 	struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
4100 	bool offload = false, trap = false;
4101 
4102 	if (nh->offloaded) {
4103 		if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
4104 			trap = true;
4105 		else
4106 			offload = true;
4107 	}
4108 	nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4109 				    bucket_index, offload, trap);
4110 }
4111 
4112 static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4113 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4114 					   struct mlxsw_sp_nexthop_group *nh_grp)
4115 {
4116 	int i;
4117 
4118 	/* Do not update the flags if the nexthop group is being destroyed
4119 	 * since:
4120 	 * 1. The nexthop objects is being deleted, in which case the flags are
4121 	 * irrelevant.
4122 	 * 2. The nexthop group was replaced by a newer group, in which case
4123 	 * the flags of the nexthop object were already updated based on the
4124 	 * new group.
4125 	 */
4126 	if (nh_grp->can_destroy)
4127 		return;
4128 
4129 	nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4130 			     nh_grp->nhgi->adj_index_valid, false);
4131 
4132 	/* Update flags of individual nexthop buckets in case of a resilient
4133 	 * nexthop group.
4134 	 */
4135 	if (!nh_grp->nhgi->is_resilient)
4136 		return;
4137 
4138 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4139 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4140 
4141 		mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
4142 	}
4143 }
4144 
4145 static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4146 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
4147 				       struct mlxsw_sp_nexthop_group *nh_grp)
4148 {
4149 	switch (nh_grp->type) {
4150 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
4151 		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
4152 		break;
4153 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
4154 		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
4155 		break;
4156 	case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
4157 		mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
4158 		break;
4159 	}
4160 }
4161 
4162 static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)4163 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
4164 			       struct mlxsw_sp_nexthop_group *nh_grp)
4165 {
4166 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4167 	u16 ecmp_size, old_ecmp_size;
4168 	struct mlxsw_sp_nexthop *nh;
4169 	bool offload_change = false;
4170 	u32 adj_index;
4171 	bool old_adj_index_valid;
4172 	u32 old_adj_index;
4173 	int i, err2, err;
4174 
4175 	if (!nhgi->gateway)
4176 		return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4177 
4178 	for (i = 0; i < nhgi->count; i++) {
4179 		nh = &nhgi->nexthops[i];
4180 
4181 		if (nh->should_offload != nh->offloaded) {
4182 			offload_change = true;
4183 			if (nh->should_offload)
4184 				nh->update = 1;
4185 		}
4186 	}
4187 	if (!offload_change) {
4188 		/* Nothing was added or removed, so no need to reallocate. Just
4189 		 * update MAC on existing adjacency indexes.
4190 		 */
4191 		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
4192 		if (err) {
4193 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4194 			goto set_trap;
4195 		}
4196 		/* Flags of individual nexthop buckets might need to be
4197 		 * updated.
4198 		 */
4199 		mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4200 		return 0;
4201 	}
4202 	mlxsw_sp_nexthop_group_normalize(nhgi);
4203 	if (!nhgi->sum_norm_weight) {
4204 		/* No neigh of this group is connected so we just set
4205 		 * the trap and let everthing flow through kernel.
4206 		 */
4207 		err = 0;
4208 		goto set_trap;
4209 	}
4210 
4211 	ecmp_size = nhgi->sum_norm_weight;
4212 	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4213 	if (err)
4214 		/* No valid allocation size available. */
4215 		goto set_trap;
4216 
4217 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4218 				  ecmp_size, &adj_index);
4219 	if (err) {
4220 		/* We ran out of KVD linear space, just set the
4221 		 * trap and let everything flow through kernel.
4222 		 */
4223 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4224 		goto set_trap;
4225 	}
4226 	old_adj_index_valid = nhgi->adj_index_valid;
4227 	old_adj_index = nhgi->adj_index;
4228 	old_ecmp_size = nhgi->ecmp_size;
4229 	nhgi->adj_index_valid = 1;
4230 	nhgi->adj_index = adj_index;
4231 	nhgi->ecmp_size = ecmp_size;
4232 	mlxsw_sp_nexthop_group_rebalance(nhgi);
4233 	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4234 	if (err) {
4235 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4236 		goto set_trap;
4237 	}
4238 
4239 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4240 
4241 	if (!old_adj_index_valid) {
4242 		/* The trap was set for fib entries, so we have to call
4243 		 * fib entry update to unset it and use adjacency index.
4244 		 */
4245 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4246 		if (err) {
4247 			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4248 			goto set_trap;
4249 		}
4250 		return 0;
4251 	}
4252 
4253 	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4254 					     old_adj_index, old_ecmp_size);
4255 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4256 			   old_ecmp_size, old_adj_index);
4257 	if (err) {
4258 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4259 		goto set_trap;
4260 	}
4261 
4262 	return 0;
4263 
4264 set_trap:
4265 	old_adj_index_valid = nhgi->adj_index_valid;
4266 	nhgi->adj_index_valid = 0;
4267 	for (i = 0; i < nhgi->count; i++) {
4268 		nh = &nhgi->nexthops[i];
4269 		nh->offloaded = 0;
4270 	}
4271 	err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4272 	if (err2)
4273 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4274 	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4275 	if (old_adj_index_valid)
4276 		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4277 				   nhgi->ecmp_size, nhgi->adj_index);
4278 	return err;
4279 }
4280 
__mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop * nh,bool removing)4281 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4282 					    bool removing)
4283 {
4284 	if (!removing) {
4285 		nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4286 		nh->should_offload = 1;
4287 	} else if (nh->nhgi->is_resilient) {
4288 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4289 		nh->should_offload = 1;
4290 	} else {
4291 		nh->should_offload = 0;
4292 	}
4293 	nh->update = 1;
4294 }
4295 
4296 static int
mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)4297 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4298 				    struct mlxsw_sp_neigh_entry *neigh_entry)
4299 {
4300 	struct neighbour *n, *old_n = neigh_entry->key.n;
4301 	struct mlxsw_sp_nexthop *nh;
4302 	struct net_device *dev;
4303 	bool entry_connected;
4304 	u8 nud_state, dead;
4305 	int err;
4306 
4307 	nh = list_first_entry(&neigh_entry->nexthop_list,
4308 			      struct mlxsw_sp_nexthop, neigh_list_node);
4309 	dev = mlxsw_sp_nexthop_dev(nh);
4310 
4311 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4312 	if (!n) {
4313 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4314 		if (IS_ERR(n))
4315 			return PTR_ERR(n);
4316 		neigh_event_send(n, NULL);
4317 	}
4318 
4319 	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4320 	neigh_entry->key.n = n;
4321 	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4322 	if (err)
4323 		goto err_neigh_entry_insert;
4324 
4325 	neigh_release(old_n);
4326 
4327 	read_lock_bh(&n->lock);
4328 	nud_state = n->nud_state;
4329 	dead = n->dead;
4330 	read_unlock_bh(&n->lock);
4331 	entry_connected = nud_state & NUD_VALID && !dead;
4332 
4333 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4334 			    neigh_list_node) {
4335 		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4336 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4337 	}
4338 
4339 	return 0;
4340 
4341 err_neigh_entry_insert:
4342 	neigh_entry->key.n = old_n;
4343 	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4344 	neigh_release(n);
4345 	return err;
4346 }
4347 
4348 static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool removing,bool dead)4349 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4350 			      struct mlxsw_sp_neigh_entry *neigh_entry,
4351 			      bool removing, bool dead)
4352 {
4353 	struct mlxsw_sp_nexthop *nh;
4354 
4355 	if (list_empty(&neigh_entry->nexthop_list))
4356 		return;
4357 
4358 	if (dead) {
4359 		int err;
4360 
4361 		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4362 							  neigh_entry);
4363 		if (err)
4364 			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4365 		return;
4366 	}
4367 
4368 	list_for_each_entry(nh, &neigh_entry->nexthop_list,
4369 			    neigh_list_node) {
4370 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4371 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4372 	}
4373 }
4374 
mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_crif * crif)4375 static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
4376 				       struct mlxsw_sp_crif *crif)
4377 {
4378 	if (nh->crif)
4379 		return;
4380 
4381 	nh->crif = crif;
4382 	list_add(&nh->crif_list_node, &crif->nexthop_list);
4383 }
4384 
mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop * nh)4385 static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
4386 {
4387 	if (!nh->crif)
4388 		return;
4389 
4390 	list_del(&nh->crif_list_node);
4391 	nh->crif = NULL;
4392 }
4393 
mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4394 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4395 				       struct mlxsw_sp_nexthop *nh)
4396 {
4397 	struct mlxsw_sp_neigh_entry *neigh_entry;
4398 	struct net_device *dev;
4399 	struct neighbour *n;
4400 	u8 nud_state, dead;
4401 	int err;
4402 
4403 	if (WARN_ON(!nh->crif->rif))
4404 		return 0;
4405 
4406 	if (!nh->nhgi->gateway || nh->neigh_entry)
4407 		return 0;
4408 	dev = mlxsw_sp_nexthop_dev(nh);
4409 
4410 	/* Take a reference of neigh here ensuring that neigh would
4411 	 * not be destructed before the nexthop entry is finished.
4412 	 * The reference is taken either in neigh_lookup() or
4413 	 * in neigh_create() in case n is not found.
4414 	 */
4415 	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
4416 	if (!n) {
4417 		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
4418 		if (IS_ERR(n))
4419 			return PTR_ERR(n);
4420 		neigh_event_send(n, NULL);
4421 	}
4422 	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4423 	if (!neigh_entry) {
4424 		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4425 		if (IS_ERR(neigh_entry)) {
4426 			err = -EINVAL;
4427 			goto err_neigh_entry_create;
4428 		}
4429 	}
4430 
4431 	/* Release the reference taken by neigh_lookup() / neigh_create() since
4432 	 * neigh_entry already holds one.
4433 	 */
4434 	neigh_release(n);
4435 
4436 	/* If that is the first nexthop connected to that neigh, add to
4437 	 * nexthop_neighs_list
4438 	 */
4439 	if (list_empty(&neigh_entry->nexthop_list))
4440 		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4441 			      &mlxsw_sp->router->nexthop_neighs_list);
4442 
4443 	nh->neigh_entry = neigh_entry;
4444 	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4445 	read_lock_bh(&n->lock);
4446 	nud_state = n->nud_state;
4447 	dead = n->dead;
4448 	read_unlock_bh(&n->lock);
4449 	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4450 
4451 	return 0;
4452 
4453 err_neigh_entry_create:
4454 	neigh_release(n);
4455 	return err;
4456 }
4457 
mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4458 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4459 					struct mlxsw_sp_nexthop *nh)
4460 {
4461 	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4462 
4463 	if (!neigh_entry)
4464 		return;
4465 
4466 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4467 	list_del(&nh->neigh_list_node);
4468 	nh->neigh_entry = NULL;
4469 
4470 	/* If that is the last nexthop connected to that neigh, remove from
4471 	 * nexthop_neighs_list
4472 	 */
4473 	if (list_empty(&neigh_entry->nexthop_list))
4474 		list_del(&neigh_entry->nexthop_neighs_list_node);
4475 
4476 	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4477 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4478 }
4479 
mlxsw_sp_ipip_netdev_ul_up(struct net_device * ol_dev)4480 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4481 {
4482 	struct net_device *ul_dev;
4483 	bool is_up;
4484 
4485 	rcu_read_lock();
4486 	ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4487 	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4488 	rcu_read_unlock();
4489 
4490 	return is_up;
4491 }
4492 
mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_ipip_entry * ipip_entry)4493 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4494 				       struct mlxsw_sp_nexthop *nh,
4495 				       struct mlxsw_sp_ipip_entry *ipip_entry)
4496 {
4497 	struct mlxsw_sp_crif *crif;
4498 	bool removing;
4499 
4500 	if (!nh->nhgi->gateway || nh->ipip_entry)
4501 		return;
4502 
4503 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
4504 	if (WARN_ON(!crif))
4505 		return;
4506 
4507 	nh->ipip_entry = ipip_entry;
4508 	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4509 	__mlxsw_sp_nexthop_neigh_update(nh, removing);
4510 	mlxsw_sp_nexthop_crif_init(nh, crif);
4511 }
4512 
mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4513 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4514 				       struct mlxsw_sp_nexthop *nh)
4515 {
4516 	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4517 
4518 	if (!ipip_entry)
4519 		return;
4520 
4521 	__mlxsw_sp_nexthop_neigh_update(nh, true);
4522 	nh->ipip_entry = NULL;
4523 }
4524 
mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib_nh * fib_nh,enum mlxsw_sp_ipip_type * p_ipipt)4525 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4526 					const struct fib_nh *fib_nh,
4527 					enum mlxsw_sp_ipip_type *p_ipipt)
4528 {
4529 	struct net_device *dev = fib_nh->fib_nh_dev;
4530 
4531 	return dev &&
4532 	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4533 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4534 }
4535 
mlxsw_sp_nexthop_type_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,const struct net_device * dev)4536 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4537 				      struct mlxsw_sp_nexthop *nh,
4538 				      const struct net_device *dev)
4539 {
4540 	const struct mlxsw_sp_ipip_ops *ipip_ops;
4541 	struct mlxsw_sp_ipip_entry *ipip_entry;
4542 	struct mlxsw_sp_crif *crif;
4543 	int err;
4544 
4545 	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4546 	if (ipip_entry) {
4547 		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4548 		if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4549 			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4550 			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4551 			return 0;
4552 		}
4553 	}
4554 
4555 	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4556 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
4557 	if (!crif)
4558 		return 0;
4559 
4560 	mlxsw_sp_nexthop_crif_init(nh, crif);
4561 
4562 	if (!crif->rif)
4563 		return 0;
4564 
4565 	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4566 	if (err)
4567 		goto err_neigh_init;
4568 
4569 	return 0;
4570 
4571 err_neigh_init:
4572 	mlxsw_sp_nexthop_crif_fini(nh);
4573 	return err;
4574 }
4575 
mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4576 static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp,
4577 					  struct mlxsw_sp_nexthop *nh)
4578 {
4579 	switch (nh->type) {
4580 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4581 		return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4582 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4583 		break;
4584 	}
4585 
4586 	return 0;
4587 }
4588 
mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4589 static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
4590 					   struct mlxsw_sp_nexthop *nh)
4591 {
4592 	switch (nh->type) {
4593 	case MLXSW_SP_NEXTHOP_TYPE_ETH:
4594 		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4595 		break;
4596 	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4597 		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4598 		break;
4599 	}
4600 }
4601 
mlxsw_sp_nexthop_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4602 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4603 				       struct mlxsw_sp_nexthop *nh)
4604 {
4605 	mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4606 	mlxsw_sp_nexthop_crif_fini(nh);
4607 }
4608 
mlxsw_sp_nexthop4_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct fib_nh * fib_nh)4609 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4610 				  struct mlxsw_sp_nexthop_group *nh_grp,
4611 				  struct mlxsw_sp_nexthop *nh,
4612 				  struct fib_nh *fib_nh)
4613 {
4614 	struct net_device *dev = fib_nh->fib_nh_dev;
4615 	struct in_device *in_dev;
4616 	int err;
4617 
4618 	nh->nhgi = nh_grp->nhgi;
4619 	nh->key.fib_nh = fib_nh;
4620 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4621 	nh->nh_weight = fib_nh->fib_nh_weight;
4622 #else
4623 	nh->nh_weight = 1;
4624 #endif
4625 	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4626 	nh->neigh_tbl = &arp_tbl;
4627 	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4628 	if (err)
4629 		return err;
4630 
4631 	err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
4632 	if (err)
4633 		goto err_counter_enable;
4634 
4635 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4636 
4637 	if (!dev)
4638 		return 0;
4639 	nh->ifindex = dev->ifindex;
4640 
4641 	rcu_read_lock();
4642 	in_dev = __in_dev_get_rcu(dev);
4643 	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4644 	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4645 		rcu_read_unlock();
4646 		return 0;
4647 	}
4648 	rcu_read_unlock();
4649 
4650 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4651 	if (err)
4652 		goto err_nexthop_neigh_init;
4653 
4654 	return 0;
4655 
4656 err_nexthop_neigh_init:
4657 	list_del(&nh->router_list_node);
4658 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
4659 err_counter_enable:
4660 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4661 	return err;
4662 }
4663 
mlxsw_sp_nexthop4_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4664 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4665 				   struct mlxsw_sp_nexthop *nh)
4666 {
4667 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4668 	list_del(&nh->router_list_node);
4669 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
4670 	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4671 }
4672 
mlxsw_sp_nexthop4_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct fib_nh * fib_nh)4673 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4674 				    unsigned long event, struct fib_nh *fib_nh)
4675 {
4676 	struct mlxsw_sp_nexthop_key key;
4677 	struct mlxsw_sp_nexthop *nh;
4678 
4679 	key.fib_nh = fib_nh;
4680 	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4681 	if (!nh)
4682 		return;
4683 
4684 	switch (event) {
4685 	case FIB_EVENT_NH_ADD:
4686 		mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4687 		break;
4688 	case FIB_EVENT_NH_DEL:
4689 		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4690 		break;
4691 	}
4692 
4693 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4694 }
4695 
mlxsw_sp_nexthop_rif_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4696 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4697 					struct mlxsw_sp_rif *rif)
4698 {
4699 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
4700 	struct mlxsw_sp_nexthop *nh;
4701 	bool removing;
4702 
4703 	list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
4704 		switch (nh->type) {
4705 		case MLXSW_SP_NEXTHOP_TYPE_ETH:
4706 			removing = false;
4707 			break;
4708 		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4709 			removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
4710 			break;
4711 		default:
4712 			WARN_ON(1);
4713 			continue;
4714 		}
4715 
4716 		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4717 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4718 	}
4719 }
4720 
mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4721 static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
4722 					  struct mlxsw_sp_rif *rif)
4723 {
4724 	struct mlxsw_sp_nexthop *nh, *tmp;
4725 	unsigned int n = 0;
4726 	int err;
4727 
4728 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4729 				 crif_list_node) {
4730 		err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh);
4731 		if (err)
4732 			goto err_nexthop_type_rif;
4733 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4734 		n++;
4735 	}
4736 
4737 	return 0;
4738 
4739 err_nexthop_type_rif:
4740 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4741 				 crif_list_node) {
4742 		if (!n--)
4743 			break;
4744 		mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4745 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4746 	}
4747 	return err;
4748 }
4749 
mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4750 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4751 					   struct mlxsw_sp_rif *rif)
4752 {
4753 	struct mlxsw_sp_nexthop *nh, *tmp;
4754 
4755 	list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
4756 				 crif_list_node) {
4757 		mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
4758 		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4759 	}
4760 }
4761 
mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp * mlxsw_sp)4762 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4763 {
4764 	enum mlxsw_reg_ratr_trap_action trap_action;
4765 	char ratr_pl[MLXSW_REG_RATR_LEN];
4766 	int err;
4767 
4768 	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4769 				  &mlxsw_sp->router->adj_trap_index);
4770 	if (err)
4771 		return err;
4772 
4773 	trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4774 	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4775 			    MLXSW_REG_RATR_TYPE_ETHERNET,
4776 			    mlxsw_sp->router->adj_trap_index,
4777 			    mlxsw_sp->router->lb_crif->rif->rif_index);
4778 	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4779 	mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4780 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4781 	if (err)
4782 		goto err_ratr_write;
4783 
4784 	return 0;
4785 
4786 err_ratr_write:
4787 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4788 			   mlxsw_sp->router->adj_trap_index);
4789 	return err;
4790 }
4791 
mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp * mlxsw_sp)4792 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4793 {
4794 	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4795 			   mlxsw_sp->router->adj_trap_index);
4796 }
4797 
mlxsw_sp_nexthop_group_inc(struct mlxsw_sp * mlxsw_sp)4798 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4799 {
4800 	int err;
4801 
4802 	if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4803 		return 0;
4804 
4805 	err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4806 	if (err)
4807 		return err;
4808 
4809 	refcount_set(&mlxsw_sp->router->num_groups, 1);
4810 
4811 	return 0;
4812 }
4813 
mlxsw_sp_nexthop_group_dec(struct mlxsw_sp * mlxsw_sp)4814 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4815 {
4816 	if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4817 		return;
4818 
4819 	mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4820 }
4821 
4822 static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp,unsigned long * activity)4823 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4824 			     const struct mlxsw_sp_nexthop_group *nh_grp,
4825 			     unsigned long *activity)
4826 {
4827 	char *ratrad_pl;
4828 	int i, err;
4829 
4830 	ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4831 	if (!ratrad_pl)
4832 		return;
4833 
4834 	mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4835 			      nh_grp->nhgi->count);
4836 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4837 	if (err)
4838 		goto out;
4839 
4840 	for (i = 0; i < nh_grp->nhgi->count; i++) {
4841 		if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4842 			continue;
4843 		bitmap_set(activity, i, 1);
4844 	}
4845 
4846 out:
4847 	kfree(ratrad_pl);
4848 }
4849 
4850 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4851 
4852 static void
mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp)4853 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4854 				const struct mlxsw_sp_nexthop_group *nh_grp)
4855 {
4856 	unsigned long *activity;
4857 
4858 	activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4859 	if (!activity)
4860 		return;
4861 
4862 	mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4863 	nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4864 					nh_grp->nhgi->count, activity);
4865 
4866 	bitmap_free(activity);
4867 }
4868 
4869 static void
mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp * mlxsw_sp)4870 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4871 {
4872 	unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4873 
4874 	mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4875 			       msecs_to_jiffies(interval));
4876 }
4877 
mlxsw_sp_nh_grp_activity_work(struct work_struct * work)4878 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4879 {
4880 	struct mlxsw_sp_nexthop_group_info *nhgi;
4881 	struct mlxsw_sp_router *router;
4882 	bool reschedule = false;
4883 
4884 	router = container_of(work, struct mlxsw_sp_router,
4885 			      nh_grp_activity_dw.work);
4886 
4887 	mutex_lock(&router->lock);
4888 
4889 	list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4890 		mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4891 		reschedule = true;
4892 	}
4893 
4894 	mutex_unlock(&router->lock);
4895 
4896 	if (!reschedule)
4897 		return;
4898 	mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4899 }
4900 
4901 static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4902 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4903 				     const struct nh_notifier_single_info *nh,
4904 				     struct netlink_ext_ack *extack)
4905 {
4906 	int err = -EINVAL;
4907 
4908 	if (nh->is_fdb)
4909 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4910 	else if (nh->has_encap)
4911 		NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4912 	else
4913 		err = 0;
4914 
4915 	return err;
4916 }
4917 
4918 static int
mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4919 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4920 					  const struct nh_notifier_single_info *nh,
4921 					  struct netlink_ext_ack *extack)
4922 {
4923 	int err;
4924 
4925 	err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4926 	if (err)
4927 		return err;
4928 
4929 	/* Device only nexthops with an IPIP device are programmed as
4930 	 * encapsulating adjacency entries.
4931 	 */
4932 	if (!nh->gw_family && !nh->is_reject &&
4933 	    !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4934 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4935 		return -EINVAL;
4936 	}
4937 
4938 	return 0;
4939 }
4940 
4941 static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_grp_info * nh_grp,struct netlink_ext_ack * extack)4942 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4943 				    const struct nh_notifier_grp_info *nh_grp,
4944 				    struct netlink_ext_ack *extack)
4945 {
4946 	int i;
4947 
4948 	if (nh_grp->is_fdb) {
4949 		NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4950 		return -EINVAL;
4951 	}
4952 
4953 	for (i = 0; i < nh_grp->num_nh; i++) {
4954 		const struct nh_notifier_single_info *nh;
4955 		int err;
4956 
4957 		nh = &nh_grp->nh_entries[i].nh;
4958 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4959 								extack);
4960 		if (err)
4961 			return err;
4962 	}
4963 
4964 	return 0;
4965 }
4966 
4967 static int
mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4968 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4969 					     const struct nh_notifier_res_table_info *nh_res_table,
4970 					     struct netlink_ext_ack *extack)
4971 {
4972 	unsigned int alloc_size;
4973 	bool valid_size = false;
4974 	int err, i;
4975 
4976 	if (nh_res_table->num_nh_buckets < 32) {
4977 		NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4978 		return -EINVAL;
4979 	}
4980 
4981 	for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4982 		const struct mlxsw_sp_adj_grp_size_range *size_range;
4983 
4984 		size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4985 
4986 		if (nh_res_table->num_nh_buckets >= size_range->start &&
4987 		    nh_res_table->num_nh_buckets <= size_range->end) {
4988 			valid_size = true;
4989 			break;
4990 		}
4991 	}
4992 
4993 	if (!valid_size) {
4994 		NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4995 		return -EINVAL;
4996 	}
4997 
4998 	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4999 					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
5000 					      nh_res_table->num_nh_buckets,
5001 					      &alloc_size);
5002 	if (err || nh_res_table->num_nh_buckets != alloc_size) {
5003 		NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
5004 		return -EINVAL;
5005 	}
5006 
5007 	return 0;
5008 }
5009 
5010 static int
mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)5011 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
5012 					const struct nh_notifier_res_table_info *nh_res_table,
5013 					struct netlink_ext_ack *extack)
5014 {
5015 	int err;
5016 	u16 i;
5017 
5018 	err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
5019 							   nh_res_table,
5020 							   extack);
5021 	if (err)
5022 		return err;
5023 
5024 	for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
5025 		const struct nh_notifier_single_info *nh;
5026 		int err;
5027 
5028 		nh = &nh_res_table->nhs[i];
5029 		err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
5030 								extack);
5031 		if (err)
5032 			return err;
5033 	}
5034 
5035 	return 0;
5036 }
5037 
mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct nh_notifier_info * info)5038 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
5039 					 unsigned long event,
5040 					 struct nh_notifier_info *info)
5041 {
5042 	struct nh_notifier_single_info *nh;
5043 
5044 	if (event != NEXTHOP_EVENT_REPLACE &&
5045 	    event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
5046 	    event != NEXTHOP_EVENT_BUCKET_REPLACE)
5047 		return 0;
5048 
5049 	switch (info->type) {
5050 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
5051 		return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
5052 							    info->extack);
5053 	case NH_NOTIFIER_INFO_TYPE_GRP:
5054 		return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
5055 							   info->nh_grp,
5056 							   info->extack);
5057 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5058 		return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
5059 							       info->nh_res_table,
5060 							       info->extack);
5061 	case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
5062 		nh = &info->nh_res_bucket->new_nh;
5063 		return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
5064 								 info->extack);
5065 	default:
5066 		NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
5067 		return -EOPNOTSUPP;
5068 	}
5069 }
5070 
mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_info * info)5071 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
5072 					    const struct nh_notifier_info *info)
5073 {
5074 	const struct net_device *dev;
5075 
5076 	switch (info->type) {
5077 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
5078 		dev = info->nh->dev;
5079 		return info->nh->gw_family || info->nh->is_reject ||
5080 		       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
5081 	case NH_NOTIFIER_INFO_TYPE_GRP:
5082 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5083 		/* Already validated earlier. */
5084 		return true;
5085 	default:
5086 		return false;
5087 	}
5088 }
5089 
mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5090 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
5091 						struct mlxsw_sp_nexthop *nh)
5092 {
5093 	nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
5094 	nh->should_offload = 1;
5095 	/* While nexthops that discard packets do not forward packets
5096 	 * via an egress RIF, they still need to be programmed using a
5097 	 * valid RIF, so use the loopback RIF created during init.
5098 	 */
5099 	nh->crif = mlxsw_sp->router->lb_crif;
5100 }
5101 
mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5102 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
5103 						struct mlxsw_sp_nexthop *nh)
5104 {
5105 	nh->crif = NULL;
5106 	nh->should_offload = 0;
5107 }
5108 
5109 static int
mlxsw_sp_nexthop_obj_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_single_info * nh_obj,int weight)5110 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
5111 			  struct mlxsw_sp_nexthop_group *nh_grp,
5112 			  struct mlxsw_sp_nexthop *nh,
5113 			  struct nh_notifier_single_info *nh_obj, int weight)
5114 {
5115 	struct net_device *dev = nh_obj->dev;
5116 	int err;
5117 
5118 	nh->nhgi = nh_grp->nhgi;
5119 	nh->nh_weight = weight;
5120 
5121 	switch (nh_obj->gw_family) {
5122 	case AF_INET:
5123 		memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
5124 		nh->neigh_tbl = &arp_tbl;
5125 		break;
5126 	case AF_INET6:
5127 		memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
5128 #if IS_ENABLED(CONFIG_IPV6)
5129 		nh->neigh_tbl = &nd_tbl;
5130 #endif
5131 		break;
5132 	}
5133 
5134 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5135 	nh->ifindex = dev->ifindex;
5136 	nh->id = nh_obj->id;
5137 
5138 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
5139 	if (err)
5140 		goto err_type_init;
5141 
5142 	if (nh_obj->is_reject)
5143 		mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
5144 
5145 	/* In a resilient nexthop group, all the nexthops must be written to
5146 	 * the adjacency table. Even if they do not have a valid neighbour or
5147 	 * RIF.
5148 	 */
5149 	if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
5150 		nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
5151 		nh->should_offload = 1;
5152 	}
5153 
5154 	return 0;
5155 
5156 err_type_init:
5157 	list_del(&nh->router_list_node);
5158 	return err;
5159 }
5160 
mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)5161 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
5162 				      struct mlxsw_sp_nexthop *nh)
5163 {
5164 	if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
5165 		mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
5166 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5167 	list_del(&nh->router_list_node);
5168 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
5169 	nh->should_offload = 0;
5170 }
5171 
5172 static int
mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct nh_notifier_info * info)5173 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
5174 				     struct mlxsw_sp_nexthop_group *nh_grp,
5175 				     struct nh_notifier_info *info)
5176 {
5177 	struct mlxsw_sp_nexthop_group_info *nhgi;
5178 	struct mlxsw_sp_nexthop *nh;
5179 	bool is_resilient = false;
5180 	bool hw_stats = false;
5181 	unsigned int nhs;
5182 	int err, i;
5183 
5184 	switch (info->type) {
5185 	case NH_NOTIFIER_INFO_TYPE_SINGLE:
5186 		nhs = 1;
5187 		break;
5188 	case NH_NOTIFIER_INFO_TYPE_GRP:
5189 		nhs = info->nh_grp->num_nh;
5190 		hw_stats = info->nh_grp->hw_stats;
5191 		break;
5192 	case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5193 		nhs = info->nh_res_table->num_nh_buckets;
5194 		hw_stats = info->nh_res_table->hw_stats;
5195 		is_resilient = true;
5196 		break;
5197 	default:
5198 		return -EINVAL;
5199 	}
5200 
5201 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5202 	if (!nhgi)
5203 		return -ENOMEM;
5204 	nh_grp->nhgi = nhgi;
5205 	nhgi->nh_grp = nh_grp;
5206 	nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
5207 	nhgi->is_resilient = is_resilient;
5208 	nhgi->count = nhs;
5209 	nhgi->hw_stats = hw_stats;
5210 
5211 	xa_init_flags(&nhgi->nexthop_counters, XA_FLAGS_ALLOC1);
5212 
5213 	for (i = 0; i < nhgi->count; i++) {
5214 		struct nh_notifier_single_info *nh_obj;
5215 		int weight;
5216 
5217 		nh = &nhgi->nexthops[i];
5218 		switch (info->type) {
5219 		case NH_NOTIFIER_INFO_TYPE_SINGLE:
5220 			nh_obj = info->nh;
5221 			weight = 1;
5222 			break;
5223 		case NH_NOTIFIER_INFO_TYPE_GRP:
5224 			nh_obj = &info->nh_grp->nh_entries[i].nh;
5225 			weight = info->nh_grp->nh_entries[i].weight;
5226 			break;
5227 		case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
5228 			nh_obj = &info->nh_res_table->nhs[i];
5229 			weight = 1;
5230 			break;
5231 		default:
5232 			err = -EINVAL;
5233 			goto err_nexthop_obj_init;
5234 		}
5235 		err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
5236 						weight);
5237 		if (err)
5238 			goto err_nexthop_obj_init;
5239 	}
5240 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5241 	if (err)
5242 		goto err_group_inc;
5243 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5244 	if (err) {
5245 		NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
5246 		goto err_group_refresh;
5247 	}
5248 
5249 	/* Add resilient nexthop groups to a list so that the activity of their
5250 	 * nexthop buckets will be periodically queried and cleared.
5251 	 */
5252 	if (nhgi->is_resilient) {
5253 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5254 			mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
5255 		list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
5256 	}
5257 
5258 	return 0;
5259 
5260 err_group_refresh:
5261 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5262 err_group_inc:
5263 	i = nhgi->count;
5264 err_nexthop_obj_init:
5265 	for (i--; i >= 0; i--) {
5266 		nh = &nhgi->nexthops[i];
5267 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5268 	}
5269 	kfree(nhgi);
5270 	return err;
5271 }
5272 
5273 static void
mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5274 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5275 				     struct mlxsw_sp_nexthop_group *nh_grp)
5276 {
5277 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5278 	struct mlxsw_sp_router *router = mlxsw_sp->router;
5279 	int i;
5280 
5281 	if (nhgi->is_resilient) {
5282 		list_del(&nhgi->list);
5283 		if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5284 			cancel_delayed_work(&router->nh_grp_activity_dw);
5285 	}
5286 
5287 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5288 	for (i = nhgi->count - 1; i >= 0; i--) {
5289 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5290 
5291 		mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5292 	}
5293 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5294 	WARN_ON_ONCE(nhgi->adj_index_valid);
5295 	WARN_ON(!xa_empty(&nhgi->nexthop_counters));
5296 	xa_destroy(&nhgi->nexthop_counters);
5297 	kfree(nhgi);
5298 }
5299 
5300 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5301 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5302 				  struct nh_notifier_info *info)
5303 {
5304 	struct mlxsw_sp_nexthop_group *nh_grp;
5305 	int err;
5306 
5307 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5308 	if (!nh_grp)
5309 		return ERR_PTR(-ENOMEM);
5310 	INIT_LIST_HEAD(&nh_grp->vr_list);
5311 	err = rhashtable_init(&nh_grp->vr_ht,
5312 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5313 	if (err)
5314 		goto err_nexthop_group_vr_ht_init;
5315 	INIT_LIST_HEAD(&nh_grp->fib_list);
5316 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5317 	nh_grp->obj.id = info->id;
5318 
5319 	err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5320 	if (err)
5321 		goto err_nexthop_group_info_init;
5322 
5323 	nh_grp->can_destroy = false;
5324 
5325 	return nh_grp;
5326 
5327 err_nexthop_group_info_init:
5328 	rhashtable_destroy(&nh_grp->vr_ht);
5329 err_nexthop_group_vr_ht_init:
5330 	kfree(nh_grp);
5331 	return ERR_PTR(err);
5332 }
5333 
5334 static void
mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5335 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5336 				   struct mlxsw_sp_nexthop_group *nh_grp)
5337 {
5338 	if (!nh_grp->can_destroy)
5339 		return;
5340 	mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5341 	WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5342 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5343 	rhashtable_destroy(&nh_grp->vr_ht);
5344 	kfree(nh_grp);
5345 }
5346 
5347 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp * mlxsw_sp,u32 id)5348 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5349 {
5350 	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5351 
5352 	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5353 	cmp_arg.id = id;
5354 	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5355 				      &cmp_arg,
5356 				      mlxsw_sp_nexthop_group_ht_params);
5357 }
5358 
mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5359 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5360 					  struct mlxsw_sp_nexthop_group *nh_grp)
5361 {
5362 	return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5363 }
5364 
5365 static int
mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group * old_nh_grp,struct netlink_ext_ack * extack)5366 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5367 				   struct mlxsw_sp_nexthop_group *nh_grp,
5368 				   struct mlxsw_sp_nexthop_group *old_nh_grp,
5369 				   struct netlink_ext_ack *extack)
5370 {
5371 	struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5372 	struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5373 	int err;
5374 
5375 	old_nh_grp->nhgi = new_nhgi;
5376 	new_nhgi->nh_grp = old_nh_grp;
5377 	nh_grp->nhgi = old_nhgi;
5378 	old_nhgi->nh_grp = nh_grp;
5379 
5380 	if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5381 		/* Both the old adjacency index and the new one are valid.
5382 		 * Routes are currently using the old one. Tell the device to
5383 		 * replace the old adjacency index with the new one.
5384 		 */
5385 		err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5386 						     old_nhgi->adj_index,
5387 						     old_nhgi->ecmp_size);
5388 		if (err) {
5389 			NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5390 			goto err_out;
5391 		}
5392 	} else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5393 		/* The old adjacency index is valid, while the new one is not.
5394 		 * Iterate over all the routes using the group and change them
5395 		 * to trap packets to the CPU.
5396 		 */
5397 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5398 		if (err) {
5399 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5400 			goto err_out;
5401 		}
5402 	} else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5403 		/* The old adjacency index is invalid, while the new one is.
5404 		 * Iterate over all the routes using the group and change them
5405 		 * to forward packets using the new valid index.
5406 		 */
5407 		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5408 		if (err) {
5409 			NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5410 			goto err_out;
5411 		}
5412 	}
5413 
5414 	/* Make sure the flags are set / cleared based on the new nexthop group
5415 	 * information.
5416 	 */
5417 	mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5418 
5419 	/* At this point 'nh_grp' is just a shell that is not used by anyone
5420 	 * and its nexthop group info is the old info that was just replaced
5421 	 * with the new one. Remove it.
5422 	 */
5423 	nh_grp->can_destroy = true;
5424 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5425 
5426 	return 0;
5427 
5428 err_out:
5429 	old_nhgi->nh_grp = old_nh_grp;
5430 	nh_grp->nhgi = new_nhgi;
5431 	new_nhgi->nh_grp = nh_grp;
5432 	old_nh_grp->nhgi = old_nhgi;
5433 	return err;
5434 }
5435 
mlxsw_sp_nexthop_obj_res_group_pre(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5436 static int mlxsw_sp_nexthop_obj_res_group_pre(struct mlxsw_sp *mlxsw_sp,
5437 					      struct nh_notifier_info *info)
5438 {
5439 	struct nh_notifier_grp_info *grp_info = info->nh_grp;
5440 	struct mlxsw_sp_nexthop_group_info *nhgi;
5441 	struct mlxsw_sp_nexthop_group *nh_grp;
5442 	int err;
5443 	int i;
5444 
5445 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5446 	if (!nh_grp)
5447 		return 0;
5448 	nhgi = nh_grp->nhgi;
5449 
5450 	if (nhgi->hw_stats == grp_info->hw_stats)
5451 		return 0;
5452 
5453 	nhgi->hw_stats = grp_info->hw_stats;
5454 
5455 	for (i = 0; i < nhgi->count; i++) {
5456 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5457 
5458 		if (nh->offloaded)
5459 			nh->update = 1;
5460 	}
5461 
5462 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5463 	if (err)
5464 		goto err_group_refresh;
5465 
5466 	return 0;
5467 
5468 err_group_refresh:
5469 	nhgi->hw_stats = !grp_info->hw_stats;
5470 	return err;
5471 }
5472 
mlxsw_sp_nexthop_obj_new(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5473 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5474 				    struct nh_notifier_info *info)
5475 {
5476 	struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5477 	struct netlink_ext_ack *extack = info->extack;
5478 	int err;
5479 
5480 	nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5481 	if (IS_ERR(nh_grp))
5482 		return PTR_ERR(nh_grp);
5483 
5484 	old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5485 	if (!old_nh_grp)
5486 		err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5487 	else
5488 		err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5489 							 old_nh_grp, extack);
5490 
5491 	if (err) {
5492 		nh_grp->can_destroy = true;
5493 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5494 	}
5495 
5496 	return err;
5497 }
5498 
mlxsw_sp_nexthop_obj_del(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5499 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5500 				     struct nh_notifier_info *info)
5501 {
5502 	struct mlxsw_sp_nexthop_group *nh_grp;
5503 
5504 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5505 	if (!nh_grp)
5506 		return;
5507 
5508 	nh_grp->can_destroy = true;
5509 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5510 
5511 	/* If the group still has routes using it, then defer the delete
5512 	 * operation until the last route using it is deleted.
5513 	 */
5514 	if (!list_empty(&nh_grp->fib_list))
5515 		return;
5516 	mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5517 }
5518 
mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp * mlxsw_sp,u32 adj_index,char * ratr_pl)5519 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5520 					     u32 adj_index, char *ratr_pl)
5521 {
5522 	MLXSW_REG_ZERO(ratr, ratr_pl);
5523 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5524 	mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5525 	mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5526 
5527 	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5528 }
5529 
mlxsw_sp_nexthop_obj_bucket_compare(char * ratr_pl,char * ratr_pl_new)5530 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5531 {
5532 	/* Clear the opcode and activity on both the old and new payload as
5533 	 * they are irrelevant for the comparison.
5534 	 */
5535 	mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5536 	mlxsw_reg_ratr_a_set(ratr_pl, 0);
5537 	mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5538 	mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5539 
5540 	/* If the contents of the adjacency entry are consistent with the
5541 	 * replacement request, then replacement was successful.
5542 	 */
5543 	if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5544 		return 0;
5545 
5546 	return -EINVAL;
5547 }
5548 
5549 static int
mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_info * info)5550 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5551 				       struct mlxsw_sp_nexthop *nh,
5552 				       struct nh_notifier_info *info)
5553 {
5554 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5555 	struct netlink_ext_ack *extack = info->extack;
5556 	bool force = info->nh_res_bucket->force;
5557 	char ratr_pl_new[MLXSW_REG_RATR_LEN];
5558 	char ratr_pl[MLXSW_REG_RATR_LEN];
5559 	u32 adj_index;
5560 	int err;
5561 
5562 	/* No point in trying an atomic replacement if the idle timer interval
5563 	 * is smaller than the interval in which we query and clear activity.
5564 	 */
5565 	if (!force && info->nh_res_bucket->idle_timer_ms <
5566 	    MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5567 		force = true;
5568 
5569 	adj_index = nh->nhgi->adj_index + bucket_index;
5570 	err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5571 	if (err) {
5572 		NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5573 		return err;
5574 	}
5575 
5576 	if (!force) {
5577 		err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5578 							ratr_pl_new);
5579 		if (err) {
5580 			NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5581 			return err;
5582 		}
5583 
5584 		err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5585 		if (err) {
5586 			NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5587 			return err;
5588 		}
5589 	}
5590 
5591 	nh->update = 0;
5592 	nh->offloaded = 1;
5593 	mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5594 
5595 	return 0;
5596 }
5597 
mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5598 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5599 					       struct nh_notifier_info *info)
5600 {
5601 	u16 bucket_index = info->nh_res_bucket->bucket_index;
5602 	struct netlink_ext_ack *extack = info->extack;
5603 	struct mlxsw_sp_nexthop_group_info *nhgi;
5604 	struct nh_notifier_single_info *nh_obj;
5605 	struct mlxsw_sp_nexthop_group *nh_grp;
5606 	struct mlxsw_sp_nexthop *nh;
5607 	int err;
5608 
5609 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5610 	if (!nh_grp) {
5611 		NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5612 		return -EINVAL;
5613 	}
5614 
5615 	nhgi = nh_grp->nhgi;
5616 
5617 	if (bucket_index >= nhgi->count) {
5618 		NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5619 		return -EINVAL;
5620 	}
5621 
5622 	nh = &nhgi->nexthops[bucket_index];
5623 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5624 
5625 	nh_obj = &info->nh_res_bucket->new_nh;
5626 	err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5627 	if (err) {
5628 		NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5629 		goto err_nexthop_obj_init;
5630 	}
5631 
5632 	err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5633 	if (err)
5634 		goto err_nexthop_obj_bucket_adj_update;
5635 
5636 	return 0;
5637 
5638 err_nexthop_obj_bucket_adj_update:
5639 	mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5640 err_nexthop_obj_init:
5641 	nh_obj = &info->nh_res_bucket->old_nh;
5642 	mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5643 	/* The old adjacency entry was not overwritten */
5644 	nh->update = 0;
5645 	nh->offloaded = 1;
5646 	return err;
5647 }
5648 
5649 static void
mlxsw_sp_nexthop_obj_mp_hw_stats_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,struct nh_notifier_grp_hw_stats_info * info)5650 mlxsw_sp_nexthop_obj_mp_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
5651 				     struct mlxsw_sp_nexthop_group_info *nhgi,
5652 				     struct nh_notifier_grp_hw_stats_info *info)
5653 {
5654 	int nhi;
5655 
5656 	for (nhi = 0; nhi < info->num_nh; nhi++) {
5657 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[nhi];
5658 		u64 packets;
5659 		int err;
5660 
5661 		err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
5662 		if (err)
5663 			continue;
5664 
5665 		nh_grp_hw_stats_report_delta(info, nhi, packets);
5666 	}
5667 }
5668 
5669 static void
mlxsw_sp_nexthop_obj_res_hw_stats_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,struct nh_notifier_grp_hw_stats_info * info)5670 mlxsw_sp_nexthop_obj_res_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
5671 				      struct mlxsw_sp_nexthop_group_info *nhgi,
5672 				      struct nh_notifier_grp_hw_stats_info *info)
5673 {
5674 	int nhi = -1;
5675 	int bucket;
5676 
5677 	for (bucket = 0; bucket < nhgi->count; bucket++) {
5678 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[bucket];
5679 		u64 packets;
5680 		int err;
5681 
5682 		if (nhi == -1 || info->stats[nhi].id != nh->id) {
5683 			for (nhi = 0; nhi < info->num_nh; nhi++)
5684 				if (info->stats[nhi].id == nh->id)
5685 					break;
5686 			if (WARN_ON_ONCE(nhi == info->num_nh)) {
5687 				nhi = -1;
5688 				continue;
5689 			}
5690 		}
5691 
5692 		err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
5693 		if (err)
5694 			continue;
5695 
5696 		nh_grp_hw_stats_report_delta(info, nhi, packets);
5697 	}
5698 }
5699 
mlxsw_sp_nexthop_obj_hw_stats_get(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5700 static void mlxsw_sp_nexthop_obj_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
5701 					      struct nh_notifier_info *info)
5702 {
5703 	struct mlxsw_sp_nexthop_group_info *nhgi;
5704 	struct mlxsw_sp_nexthop_group *nh_grp;
5705 
5706 	if (info->type != NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS)
5707 		return;
5708 
5709 	nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5710 	if (!nh_grp)
5711 		return;
5712 	nhgi = nh_grp->nhgi;
5713 
5714 	if (nhgi->is_resilient)
5715 		mlxsw_sp_nexthop_obj_res_hw_stats_get(mlxsw_sp, nhgi,
5716 						      info->nh_grp_hw_stats);
5717 	else
5718 		mlxsw_sp_nexthop_obj_mp_hw_stats_get(mlxsw_sp, nhgi,
5719 						     info->nh_grp_hw_stats);
5720 }
5721 
mlxsw_sp_nexthop_obj_event(struct notifier_block * nb,unsigned long event,void * ptr)5722 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5723 				      unsigned long event, void *ptr)
5724 {
5725 	struct nh_notifier_info *info = ptr;
5726 	struct mlxsw_sp_router *router;
5727 	int err = 0;
5728 
5729 	router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5730 	err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5731 	if (err)
5732 		goto out;
5733 
5734 	mutex_lock(&router->lock);
5735 
5736 	switch (event) {
5737 	case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE:
5738 		err = mlxsw_sp_nexthop_obj_res_group_pre(router->mlxsw_sp,
5739 							 info);
5740 		break;
5741 	case NEXTHOP_EVENT_REPLACE:
5742 		err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5743 		break;
5744 	case NEXTHOP_EVENT_DEL:
5745 		mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5746 		break;
5747 	case NEXTHOP_EVENT_BUCKET_REPLACE:
5748 		err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5749 							  info);
5750 		break;
5751 	case NEXTHOP_EVENT_HW_STATS_REPORT_DELTA:
5752 		mlxsw_sp_nexthop_obj_hw_stats_get(router->mlxsw_sp, info);
5753 		break;
5754 	default:
5755 		break;
5756 	}
5757 
5758 	mutex_unlock(&router->lock);
5759 
5760 out:
5761 	return notifier_from_errno(err);
5762 }
5763 
mlxsw_sp_fi_is_gateway(const struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5764 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5765 				   struct fib_info *fi)
5766 {
5767 	const struct fib_nh *nh = fib_info_nh(fi, 0);
5768 
5769 	return nh->fib_nh_gw_family ||
5770 	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5771 }
5772 
5773 static int
mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5774 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5775 				  struct mlxsw_sp_nexthop_group *nh_grp)
5776 {
5777 	unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5778 	struct mlxsw_sp_nexthop_group_info *nhgi;
5779 	struct mlxsw_sp_nexthop *nh;
5780 	int err, i;
5781 
5782 	nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5783 	if (!nhgi)
5784 		return -ENOMEM;
5785 	nh_grp->nhgi = nhgi;
5786 	nhgi->nh_grp = nh_grp;
5787 	nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5788 	nhgi->count = nhs;
5789 	for (i = 0; i < nhgi->count; i++) {
5790 		struct fib_nh *fib_nh;
5791 
5792 		nh = &nhgi->nexthops[i];
5793 		fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5794 		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5795 		if (err)
5796 			goto err_nexthop4_init;
5797 	}
5798 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5799 	if (err)
5800 		goto err_group_inc;
5801 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5802 	if (err)
5803 		goto err_group_refresh;
5804 
5805 	return 0;
5806 
5807 err_group_refresh:
5808 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5809 err_group_inc:
5810 	i = nhgi->count;
5811 err_nexthop4_init:
5812 	for (i--; i >= 0; i--) {
5813 		nh = &nhgi->nexthops[i];
5814 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5815 	}
5816 	kfree(nhgi);
5817 	return err;
5818 }
5819 
5820 static void
mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5821 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5822 				  struct mlxsw_sp_nexthop_group *nh_grp)
5823 {
5824 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5825 	int i;
5826 
5827 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5828 	for (i = nhgi->count - 1; i >= 0; i--) {
5829 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5830 
5831 		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5832 	}
5833 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5834 	WARN_ON_ONCE(nhgi->adj_index_valid);
5835 	kfree(nhgi);
5836 }
5837 
5838 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5839 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5840 {
5841 	struct mlxsw_sp_nexthop_group *nh_grp;
5842 	int err;
5843 
5844 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5845 	if (!nh_grp)
5846 		return ERR_PTR(-ENOMEM);
5847 	INIT_LIST_HEAD(&nh_grp->vr_list);
5848 	err = rhashtable_init(&nh_grp->vr_ht,
5849 			      &mlxsw_sp_nexthop_group_vr_ht_params);
5850 	if (err)
5851 		goto err_nexthop_group_vr_ht_init;
5852 	INIT_LIST_HEAD(&nh_grp->fib_list);
5853 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5854 	nh_grp->ipv4.fi = fi;
5855 	fib_info_hold(fi);
5856 
5857 	err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5858 	if (err)
5859 		goto err_nexthop_group_info_init;
5860 
5861 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5862 	if (err)
5863 		goto err_nexthop_group_insert;
5864 
5865 	nh_grp->can_destroy = true;
5866 
5867 	return nh_grp;
5868 
5869 err_nexthop_group_insert:
5870 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5871 err_nexthop_group_info_init:
5872 	fib_info_put(fi);
5873 	rhashtable_destroy(&nh_grp->vr_ht);
5874 err_nexthop_group_vr_ht_init:
5875 	kfree(nh_grp);
5876 	return ERR_PTR(err);
5877 }
5878 
5879 static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5880 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5881 				struct mlxsw_sp_nexthop_group *nh_grp)
5882 {
5883 	if (!nh_grp->can_destroy)
5884 		return;
5885 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5886 	mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5887 	fib_info_put(nh_grp->ipv4.fi);
5888 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5889 	rhashtable_destroy(&nh_grp->vr_ht);
5890 	kfree(nh_grp);
5891 }
5892 
mlxsw_sp_nexthop4_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct fib_info * fi)5893 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5894 				       struct mlxsw_sp_fib_entry *fib_entry,
5895 				       struct fib_info *fi)
5896 {
5897 	struct mlxsw_sp_nexthop_group *nh_grp;
5898 
5899 	if (fi->nh) {
5900 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5901 							   fi->nh->id);
5902 		if (WARN_ON_ONCE(!nh_grp))
5903 			return -EINVAL;
5904 		goto out;
5905 	}
5906 
5907 	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5908 	if (!nh_grp) {
5909 		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5910 		if (IS_ERR(nh_grp))
5911 			return PTR_ERR(nh_grp);
5912 	}
5913 out:
5914 	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5915 	fib_entry->nh_group = nh_grp;
5916 	return 0;
5917 }
5918 
mlxsw_sp_nexthop4_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5919 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5920 					struct mlxsw_sp_fib_entry *fib_entry)
5921 {
5922 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5923 
5924 	list_del(&fib_entry->nexthop_group_node);
5925 	if (!list_empty(&nh_grp->fib_list))
5926 		return;
5927 
5928 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5929 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5930 		return;
5931 	}
5932 
5933 	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5934 }
5935 
5936 static bool
mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5937 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5938 {
5939 	struct mlxsw_sp_fib4_entry *fib4_entry;
5940 
5941 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5942 				  common);
5943 	return !fib4_entry->dscp;
5944 }
5945 
5946 static bool
mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5947 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5948 {
5949 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5950 
5951 	switch (fib_entry->fib_node->fib->proto) {
5952 	case MLXSW_SP_L3_PROTO_IPV4:
5953 		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5954 			return false;
5955 		break;
5956 	case MLXSW_SP_L3_PROTO_IPV6:
5957 		break;
5958 	}
5959 
5960 	switch (fib_entry->type) {
5961 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5962 		return !!nh_group->nhgi->adj_index_valid;
5963 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5964 		return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
5965 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5966 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5967 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5968 		return true;
5969 	default:
5970 		return false;
5971 	}
5972 }
5973 
5974 static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_rt6 * mlxsw_sp_rt6)5975 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5976 		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5977 {
5978 	int i;
5979 
5980 	for (i = 0; i < nh_grp->nhgi->count; i++) {
5981 		struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5982 		struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
5983 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5984 
5985 		if (dev && dev == rt->fib6_nh->fib_nh_dev &&
5986 		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5987 				    &rt->fib6_nh->fib_nh_gw6))
5988 			return nh;
5989 	}
5990 
5991 	return NULL;
5992 }
5993 
5994 static void
mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)5995 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5996 				      struct fib_entry_notifier_info *fen_info)
5997 {
5998 	u32 *p_dst = (u32 *) &fen_info->dst;
5999 	struct fib_rt_info fri;
6000 
6001 	fri.fi = fen_info->fi;
6002 	fri.tb_id = fen_info->tb_id;
6003 	fri.dst = cpu_to_be32(*p_dst);
6004 	fri.dst_len = fen_info->dst_len;
6005 	fri.dscp = fen_info->dscp;
6006 	fri.type = fen_info->type;
6007 	fri.offload = false;
6008 	fri.trap = false;
6009 	fri.offload_failed = true;
6010 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
6011 }
6012 
6013 static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6014 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
6015 				 struct mlxsw_sp_fib_entry *fib_entry)
6016 {
6017 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
6018 	int dst_len = fib_entry->fib_node->key.prefix_len;
6019 	struct mlxsw_sp_fib4_entry *fib4_entry;
6020 	struct fib_rt_info fri;
6021 	bool should_offload;
6022 
6023 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
6024 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
6025 				  common);
6026 	fri.fi = fib4_entry->fi;
6027 	fri.tb_id = fib4_entry->tb_id;
6028 	fri.dst = cpu_to_be32(*p_dst);
6029 	fri.dst_len = dst_len;
6030 	fri.dscp = fib4_entry->dscp;
6031 	fri.type = fib4_entry->type;
6032 	fri.offload = should_offload;
6033 	fri.trap = !should_offload;
6034 	fri.offload_failed = false;
6035 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
6036 }
6037 
6038 static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6039 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
6040 				   struct mlxsw_sp_fib_entry *fib_entry)
6041 {
6042 	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
6043 	int dst_len = fib_entry->fib_node->key.prefix_len;
6044 	struct mlxsw_sp_fib4_entry *fib4_entry;
6045 	struct fib_rt_info fri;
6046 
6047 	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
6048 				  common);
6049 	fri.fi = fib4_entry->fi;
6050 	fri.tb_id = fib4_entry->tb_id;
6051 	fri.dst = cpu_to_be32(*p_dst);
6052 	fri.dst_len = dst_len;
6053 	fri.dscp = fib4_entry->dscp;
6054 	fri.type = fib4_entry->type;
6055 	fri.offload = false;
6056 	fri.trap = false;
6057 	fri.offload_failed = false;
6058 	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
6059 }
6060 
6061 #if IS_ENABLED(CONFIG_IPV6)
6062 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)6063 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
6064 				      struct fib6_info **rt_arr,
6065 				      unsigned int nrt6)
6066 {
6067 	int i;
6068 
6069 	/* In IPv6 a multipath route is represented using multiple routes, so
6070 	 * we need to set the flags on all of them.
6071 	 */
6072 	for (i = 0; i < nrt6; i++)
6073 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
6074 				       false, false, true);
6075 }
6076 #else
6077 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)6078 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
6079 				      struct fib6_info **rt_arr,
6080 				      unsigned int nrt6)
6081 {
6082 }
6083 #endif
6084 
6085 #if IS_ENABLED(CONFIG_IPV6)
6086 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6087 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
6088 				 struct mlxsw_sp_fib_entry *fib_entry)
6089 {
6090 	struct mlxsw_sp_fib6_entry *fib6_entry;
6091 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6092 	bool should_offload;
6093 
6094 	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
6095 
6096 	/* In IPv6 a multipath route is represented using multiple routes, so
6097 	 * we need to set the flags on all of them.
6098 	 */
6099 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
6100 				  common);
6101 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
6102 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
6103 				       should_offload, !should_offload, false);
6104 }
6105 #else
6106 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6107 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
6108 				 struct mlxsw_sp_fib_entry *fib_entry)
6109 {
6110 }
6111 #endif
6112 
6113 #if IS_ENABLED(CONFIG_IPV6)
6114 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6115 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
6116 				   struct mlxsw_sp_fib_entry *fib_entry)
6117 {
6118 	struct mlxsw_sp_fib6_entry *fib6_entry;
6119 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6120 
6121 	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
6122 				  common);
6123 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
6124 		fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
6125 				       false, false, false);
6126 }
6127 #else
6128 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6129 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
6130 				   struct mlxsw_sp_fib_entry *fib_entry)
6131 {
6132 }
6133 #endif
6134 
6135 static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6136 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
6137 				struct mlxsw_sp_fib_entry *fib_entry)
6138 {
6139 	switch (fib_entry->fib_node->fib->proto) {
6140 	case MLXSW_SP_L3_PROTO_IPV4:
6141 		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
6142 		break;
6143 	case MLXSW_SP_L3_PROTO_IPV6:
6144 		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
6145 		break;
6146 	}
6147 }
6148 
6149 static void
mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6150 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
6151 				  struct mlxsw_sp_fib_entry *fib_entry)
6152 {
6153 	switch (fib_entry->fib_node->fib->proto) {
6154 	case MLXSW_SP_L3_PROTO_IPV4:
6155 		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
6156 		break;
6157 	case MLXSW_SP_L3_PROTO_IPV6:
6158 		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
6159 		break;
6160 	}
6161 }
6162 
6163 static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6164 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
6165 				    struct mlxsw_sp_fib_entry *fib_entry,
6166 				    enum mlxsw_reg_ralue_op op)
6167 {
6168 	switch (op) {
6169 	case MLXSW_REG_RALUE_OP_WRITE_WRITE:
6170 		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
6171 		break;
6172 	case MLXSW_REG_RALUE_OP_WRITE_DELETE:
6173 		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
6174 		break;
6175 	default:
6176 		break;
6177 	}
6178 }
6179 
6180 static void
mlxsw_sp_fib_entry_ralue_pack(char * ralue_pl,const struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6181 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
6182 			      const struct mlxsw_sp_fib_entry *fib_entry,
6183 			      enum mlxsw_reg_ralue_op op)
6184 {
6185 	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
6186 	enum mlxsw_reg_ralxx_protocol proto;
6187 	u32 *p_dip;
6188 
6189 	proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
6190 
6191 	switch (fib->proto) {
6192 	case MLXSW_SP_L3_PROTO_IPV4:
6193 		p_dip = (u32 *) fib_entry->fib_node->key.addr;
6194 		mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
6195 				      fib_entry->fib_node->key.prefix_len,
6196 				      *p_dip);
6197 		break;
6198 	case MLXSW_SP_L3_PROTO_IPV6:
6199 		mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
6200 				      fib_entry->fib_node->key.prefix_len,
6201 				      fib_entry->fib_node->key.addr);
6202 		break;
6203 	}
6204 }
6205 
mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6206 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
6207 					struct mlxsw_sp_fib_entry *fib_entry,
6208 					enum mlxsw_reg_ralue_op op)
6209 {
6210 	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
6211 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
6212 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6213 	enum mlxsw_reg_ralue_trap_action trap_action;
6214 	u16 trap_id = 0;
6215 	u32 adjacency_index = 0;
6216 	u16 ecmp_size = 0;
6217 
6218 	/* In case the nexthop group adjacency index is valid, use it
6219 	 * with provided ECMP size. Otherwise, setup trap and pass
6220 	 * traffic to kernel.
6221 	 */
6222 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
6223 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6224 		adjacency_index = nhgi->adj_index;
6225 		ecmp_size = nhgi->ecmp_size;
6226 	} else if (!nhgi->adj_index_valid && nhgi->count &&
6227 		   mlxsw_sp_nhgi_rif(nhgi)) {
6228 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6229 		adjacency_index = mlxsw_sp->router->adj_trap_index;
6230 		ecmp_size = 1;
6231 	} else {
6232 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6233 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
6234 	}
6235 
6236 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6237 	mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
6238 					adjacency_index, ecmp_size);
6239 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6240 }
6241 
mlxsw_sp_fib_entry_op_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6242 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
6243 				       struct mlxsw_sp_fib_entry *fib_entry,
6244 				       enum mlxsw_reg_ralue_op op)
6245 {
6246 	struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
6247 	enum mlxsw_reg_ralue_trap_action trap_action;
6248 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6249 	u16 trap_id = 0;
6250 	u16 rif_index = 0;
6251 
6252 	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
6253 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
6254 		rif_index = rif->rif_index;
6255 	} else {
6256 		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6257 		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
6258 	}
6259 
6260 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6261 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
6262 				       rif_index);
6263 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6264 }
6265 
mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6266 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
6267 				      struct mlxsw_sp_fib_entry *fib_entry,
6268 				      enum mlxsw_reg_ralue_op op)
6269 {
6270 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6271 
6272 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6273 	mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
6274 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6275 }
6276 
mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6277 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
6278 					   struct mlxsw_sp_fib_entry *fib_entry,
6279 					   enum mlxsw_reg_ralue_op op)
6280 {
6281 	enum mlxsw_reg_ralue_trap_action trap_action;
6282 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6283 
6284 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6285 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6286 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
6287 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6288 }
6289 
6290 static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6291 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6292 				  struct mlxsw_sp_fib_entry *fib_entry,
6293 				  enum mlxsw_reg_ralue_op op)
6294 {
6295 	enum mlxsw_reg_ralue_trap_action trap_action;
6296 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6297 	u16 trap_id;
6298 
6299 	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6300 	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6301 
6302 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6303 	mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
6304 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6305 }
6306 
6307 static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6308 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6309 				 struct mlxsw_sp_fib_entry *fib_entry,
6310 				 enum mlxsw_reg_ralue_op op)
6311 {
6312 	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6313 	const struct mlxsw_sp_ipip_ops *ipip_ops;
6314 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6315 	int err;
6316 
6317 	if (WARN_ON(!ipip_entry))
6318 		return -EINVAL;
6319 
6320 	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6321 	err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6322 				     fib_entry->decap.tunnel_index);
6323 	if (err)
6324 		return err;
6325 
6326 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6327 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6328 					   fib_entry->decap.tunnel_index);
6329 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6330 }
6331 
mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6332 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6333 					   struct mlxsw_sp_fib_entry *fib_entry,
6334 					   enum mlxsw_reg_ralue_op op)
6335 {
6336 	char ralue_pl[MLXSW_REG_RALUE_LEN];
6337 
6338 	mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
6339 	mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
6340 					   fib_entry->decap.tunnel_index);
6341 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
6342 }
6343 
__mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6344 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6345 				   struct mlxsw_sp_fib_entry *fib_entry,
6346 				   enum mlxsw_reg_ralue_op op)
6347 {
6348 	switch (fib_entry->type) {
6349 	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6350 		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
6351 	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6352 		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
6353 	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6354 		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
6355 	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6356 		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
6357 	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6358 		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
6359 							 op);
6360 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6361 		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
6362 							fib_entry, op);
6363 	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6364 		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
6365 	}
6366 	return -EINVAL;
6367 }
6368 
mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_reg_ralue_op op)6369 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6370 				 struct mlxsw_sp_fib_entry *fib_entry,
6371 				 enum mlxsw_reg_ralue_op op)
6372 {
6373 	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
6374 
6375 	if (err)
6376 		return err;
6377 
6378 	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6379 
6380 	return err;
6381 }
6382 
mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6383 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6384 				     struct mlxsw_sp_fib_entry *fib_entry)
6385 {
6386 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6387 				     MLXSW_REG_RALUE_OP_WRITE_WRITE);
6388 }
6389 
mlxsw_sp_fib_entry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6390 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6391 				  struct mlxsw_sp_fib_entry *fib_entry)
6392 {
6393 	return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
6394 				     MLXSW_REG_RALUE_OP_WRITE_DELETE);
6395 }
6396 
6397 static int
mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info,struct mlxsw_sp_fib_entry * fib_entry)6398 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6399 			     const struct fib_entry_notifier_info *fen_info,
6400 			     struct mlxsw_sp_fib_entry *fib_entry)
6401 {
6402 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6403 	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6404 	struct mlxsw_sp_router *router = mlxsw_sp->router;
6405 	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6406 	int ifindex = nhgi->nexthops[0].ifindex;
6407 	struct mlxsw_sp_ipip_entry *ipip_entry;
6408 
6409 	switch (fen_info->type) {
6410 	case RTN_LOCAL:
6411 		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6412 							       MLXSW_SP_L3_PROTO_IPV4, dip);
6413 		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6414 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6415 			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6416 							     fib_entry,
6417 							     ipip_entry);
6418 		}
6419 		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6420 						 MLXSW_SP_L3_PROTO_IPV4,
6421 						 &dip)) {
6422 			u32 tunnel_index;
6423 
6424 			tunnel_index = router->nve_decap_config.tunnel_index;
6425 			fib_entry->decap.tunnel_index = tunnel_index;
6426 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6427 			return 0;
6428 		}
6429 		fallthrough;
6430 	case RTN_BROADCAST:
6431 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6432 		return 0;
6433 	case RTN_BLACKHOLE:
6434 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6435 		return 0;
6436 	case RTN_UNREACHABLE:
6437 	case RTN_PROHIBIT:
6438 		/* Packets hitting these routes need to be trapped, but
6439 		 * can do so with a lower priority than packets directed
6440 		 * at the host, so use action type local instead of trap.
6441 		 */
6442 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6443 		return 0;
6444 	case RTN_UNICAST:
6445 		if (nhgi->gateway)
6446 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6447 		else
6448 			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6449 		return 0;
6450 	default:
6451 		return -EINVAL;
6452 	}
6453 }
6454 
6455 static void
mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6456 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6457 			      struct mlxsw_sp_fib_entry *fib_entry)
6458 {
6459 	switch (fib_entry->type) {
6460 	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6461 		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6462 		break;
6463 	default:
6464 		break;
6465 	}
6466 }
6467 
6468 static void
mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6469 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6470 			       struct mlxsw_sp_fib4_entry *fib4_entry)
6471 {
6472 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6473 }
6474 
6475 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,const struct fib_entry_notifier_info * fen_info)6476 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6477 			   struct mlxsw_sp_fib_node *fib_node,
6478 			   const struct fib_entry_notifier_info *fen_info)
6479 {
6480 	struct mlxsw_sp_fib4_entry *fib4_entry;
6481 	struct mlxsw_sp_fib_entry *fib_entry;
6482 	int err;
6483 
6484 	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6485 	if (!fib4_entry)
6486 		return ERR_PTR(-ENOMEM);
6487 	fib_entry = &fib4_entry->common;
6488 
6489 	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6490 	if (err)
6491 		goto err_nexthop4_group_get;
6492 
6493 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6494 					     fib_node->fib);
6495 	if (err)
6496 		goto err_nexthop_group_vr_link;
6497 
6498 	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6499 	if (err)
6500 		goto err_fib4_entry_type_set;
6501 
6502 	fib4_entry->fi = fen_info->fi;
6503 	fib_info_hold(fib4_entry->fi);
6504 	fib4_entry->tb_id = fen_info->tb_id;
6505 	fib4_entry->type = fen_info->type;
6506 	fib4_entry->dscp = fen_info->dscp;
6507 
6508 	fib_entry->fib_node = fib_node;
6509 
6510 	return fib4_entry;
6511 
6512 err_fib4_entry_type_set:
6513 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6514 err_nexthop_group_vr_link:
6515 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6516 err_nexthop4_group_get:
6517 	kfree(fib4_entry);
6518 	return ERR_PTR(err);
6519 }
6520 
mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6521 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6522 					struct mlxsw_sp_fib4_entry *fib4_entry)
6523 {
6524 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6525 
6526 	fib_info_put(fib4_entry->fi);
6527 	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6528 	mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6529 					 fib_node->fib);
6530 	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6531 	kfree(fib4_entry);
6532 }
6533 
6534 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6535 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6536 			   const struct fib_entry_notifier_info *fen_info)
6537 {
6538 	struct mlxsw_sp_fib4_entry *fib4_entry;
6539 	struct mlxsw_sp_fib_node *fib_node;
6540 	struct mlxsw_sp_fib *fib;
6541 	struct mlxsw_sp_vr *vr;
6542 
6543 	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6544 	if (!vr)
6545 		return NULL;
6546 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6547 
6548 	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6549 					    sizeof(fen_info->dst),
6550 					    fen_info->dst_len);
6551 	if (!fib_node)
6552 		return NULL;
6553 
6554 	fib4_entry = container_of(fib_node->fib_entry,
6555 				  struct mlxsw_sp_fib4_entry, common);
6556 	if (fib4_entry->tb_id == fen_info->tb_id &&
6557 	    fib4_entry->dscp == fen_info->dscp &&
6558 	    fib4_entry->type == fen_info->type &&
6559 	    fib4_entry->fi == fen_info->fi)
6560 		return fib4_entry;
6561 
6562 	return NULL;
6563 }
6564 
6565 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6566 	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6567 	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6568 	.key_len = sizeof(struct mlxsw_sp_fib_key),
6569 	.automatic_shrinking = true,
6570 };
6571 
mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6572 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6573 				    struct mlxsw_sp_fib_node *fib_node)
6574 {
6575 	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6576 				      mlxsw_sp_fib_ht_params);
6577 }
6578 
mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6579 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6580 				     struct mlxsw_sp_fib_node *fib_node)
6581 {
6582 	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6583 			       mlxsw_sp_fib_ht_params);
6584 }
6585 
6586 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6587 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6588 			 size_t addr_len, unsigned char prefix_len)
6589 {
6590 	struct mlxsw_sp_fib_key key;
6591 
6592 	memset(&key, 0, sizeof(key));
6593 	memcpy(key.addr, addr, addr_len);
6594 	key.prefix_len = prefix_len;
6595 	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6596 }
6597 
6598 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_create(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6599 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6600 			 size_t addr_len, unsigned char prefix_len)
6601 {
6602 	struct mlxsw_sp_fib_node *fib_node;
6603 
6604 	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6605 	if (!fib_node)
6606 		return NULL;
6607 
6608 	list_add(&fib_node->list, &fib->node_list);
6609 	memcpy(fib_node->key.addr, addr, addr_len);
6610 	fib_node->key.prefix_len = prefix_len;
6611 
6612 	return fib_node;
6613 }
6614 
mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node * fib_node)6615 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6616 {
6617 	list_del(&fib_node->list);
6618 	kfree(fib_node);
6619 }
6620 
mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6621 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6622 				      struct mlxsw_sp_fib_node *fib_node)
6623 {
6624 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6625 	struct mlxsw_sp_fib *fib = fib_node->fib;
6626 	struct mlxsw_sp_lpm_tree *lpm_tree;
6627 	int err;
6628 
6629 	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6630 	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6631 		goto out;
6632 
6633 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6634 	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6635 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6636 					 fib->proto);
6637 	if (IS_ERR(lpm_tree))
6638 		return PTR_ERR(lpm_tree);
6639 
6640 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6641 	if (err)
6642 		goto err_lpm_tree_replace;
6643 
6644 out:
6645 	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6646 	return 0;
6647 
6648 err_lpm_tree_replace:
6649 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6650 	return err;
6651 }
6652 
mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6653 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6654 					 struct mlxsw_sp_fib_node *fib_node)
6655 {
6656 	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6657 	struct mlxsw_sp_prefix_usage req_prefix_usage;
6658 	struct mlxsw_sp_fib *fib = fib_node->fib;
6659 	int err;
6660 
6661 	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6662 		return;
6663 	/* Try to construct a new LPM tree from the current prefix usage
6664 	 * minus the unused one. If we fail, continue using the old one.
6665 	 */
6666 	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6667 	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6668 				    fib_node->key.prefix_len);
6669 	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6670 					 fib->proto);
6671 	if (IS_ERR(lpm_tree))
6672 		return;
6673 
6674 	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6675 	if (err)
6676 		goto err_lpm_tree_replace;
6677 
6678 	return;
6679 
6680 err_lpm_tree_replace:
6681 	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6682 }
6683 
mlxsw_sp_fib_node_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct mlxsw_sp_fib * fib)6684 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6685 				  struct mlxsw_sp_fib_node *fib_node,
6686 				  struct mlxsw_sp_fib *fib)
6687 {
6688 	int err;
6689 
6690 	err = mlxsw_sp_fib_node_insert(fib, fib_node);
6691 	if (err)
6692 		return err;
6693 	fib_node->fib = fib;
6694 
6695 	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6696 	if (err)
6697 		goto err_fib_lpm_tree_link;
6698 
6699 	return 0;
6700 
6701 err_fib_lpm_tree_link:
6702 	fib_node->fib = NULL;
6703 	mlxsw_sp_fib_node_remove(fib, fib_node);
6704 	return err;
6705 }
6706 
mlxsw_sp_fib_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6707 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6708 				   struct mlxsw_sp_fib_node *fib_node)
6709 {
6710 	struct mlxsw_sp_fib *fib = fib_node->fib;
6711 
6712 	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6713 	fib_node->fib = NULL;
6714 	mlxsw_sp_fib_node_remove(fib, fib_node);
6715 }
6716 
6717 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,const void * addr,size_t addr_len,unsigned char prefix_len,enum mlxsw_sp_l3proto proto)6718 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6719 		      size_t addr_len, unsigned char prefix_len,
6720 		      enum mlxsw_sp_l3proto proto)
6721 {
6722 	struct mlxsw_sp_fib_node *fib_node;
6723 	struct mlxsw_sp_fib *fib;
6724 	struct mlxsw_sp_vr *vr;
6725 	int err;
6726 
6727 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6728 	if (IS_ERR(vr))
6729 		return ERR_CAST(vr);
6730 	fib = mlxsw_sp_vr_fib(vr, proto);
6731 
6732 	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6733 	if (fib_node)
6734 		return fib_node;
6735 
6736 	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6737 	if (!fib_node) {
6738 		err = -ENOMEM;
6739 		goto err_fib_node_create;
6740 	}
6741 
6742 	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6743 	if (err)
6744 		goto err_fib_node_init;
6745 
6746 	return fib_node;
6747 
6748 err_fib_node_init:
6749 	mlxsw_sp_fib_node_destroy(fib_node);
6750 err_fib_node_create:
6751 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6752 	return ERR_PTR(err);
6753 }
6754 
mlxsw_sp_fib_node_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6755 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6756 				  struct mlxsw_sp_fib_node *fib_node)
6757 {
6758 	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6759 
6760 	if (fib_node->fib_entry)
6761 		return;
6762 	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6763 	mlxsw_sp_fib_node_destroy(fib_node);
6764 	mlxsw_sp_vr_put(mlxsw_sp, vr);
6765 }
6766 
mlxsw_sp_fib_node_entry_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6767 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6768 					struct mlxsw_sp_fib_entry *fib_entry)
6769 {
6770 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6771 	int err;
6772 
6773 	fib_node->fib_entry = fib_entry;
6774 
6775 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6776 	if (err)
6777 		goto err_fib_entry_update;
6778 
6779 	return 0;
6780 
6781 err_fib_entry_update:
6782 	fib_node->fib_entry = NULL;
6783 	return err;
6784 }
6785 
6786 static void
mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6787 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6788 			       struct mlxsw_sp_fib_entry *fib_entry)
6789 {
6790 	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6791 
6792 	mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6793 	fib_node->fib_entry = NULL;
6794 }
6795 
mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry * fib4_entry)6796 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6797 {
6798 	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6799 	struct mlxsw_sp_fib4_entry *fib4_replaced;
6800 
6801 	if (!fib_node->fib_entry)
6802 		return true;
6803 
6804 	fib4_replaced = container_of(fib_node->fib_entry,
6805 				     struct mlxsw_sp_fib4_entry, common);
6806 	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6807 	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
6808 		return false;
6809 
6810 	return true;
6811 }
6812 
6813 static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6814 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6815 			     const struct fib_entry_notifier_info *fen_info)
6816 {
6817 	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6818 	struct mlxsw_sp_fib_entry *replaced;
6819 	struct mlxsw_sp_fib_node *fib_node;
6820 	int err;
6821 
6822 	if (fen_info->fi->nh &&
6823 	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6824 		return 0;
6825 
6826 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6827 					 &fen_info->dst, sizeof(fen_info->dst),
6828 					 fen_info->dst_len,
6829 					 MLXSW_SP_L3_PROTO_IPV4);
6830 	if (IS_ERR(fib_node)) {
6831 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6832 		return PTR_ERR(fib_node);
6833 	}
6834 
6835 	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6836 	if (IS_ERR(fib4_entry)) {
6837 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6838 		err = PTR_ERR(fib4_entry);
6839 		goto err_fib4_entry_create;
6840 	}
6841 
6842 	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6843 		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6844 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6845 		return 0;
6846 	}
6847 
6848 	replaced = fib_node->fib_entry;
6849 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6850 	if (err) {
6851 		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6852 		goto err_fib_node_entry_link;
6853 	}
6854 
6855 	/* Nothing to replace */
6856 	if (!replaced)
6857 		return 0;
6858 
6859 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6860 	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6861 				     common);
6862 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6863 
6864 	return 0;
6865 
6866 err_fib_node_entry_link:
6867 	fib_node->fib_entry = replaced;
6868 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6869 err_fib4_entry_create:
6870 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6871 	return err;
6872 }
6873 
mlxsw_sp_router_fib4_del(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)6874 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6875 				     struct fib_entry_notifier_info *fen_info)
6876 {
6877 	struct mlxsw_sp_fib4_entry *fib4_entry;
6878 	struct mlxsw_sp_fib_node *fib_node;
6879 
6880 	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6881 	if (!fib4_entry)
6882 		return;
6883 	fib_node = fib4_entry->common.fib_node;
6884 
6885 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6886 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6887 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6888 }
6889 
mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info * rt)6890 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6891 {
6892 	/* Multicast routes aren't supported, so ignore them. Neighbour
6893 	 * Discovery packets are specifically trapped.
6894 	 */
6895 	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6896 		return true;
6897 
6898 	/* Cloned routes are irrelevant in the forwarding path. */
6899 	if (rt->fib6_flags & RTF_CACHE)
6900 		return true;
6901 
6902 	return false;
6903 }
6904 
mlxsw_sp_rt6_create(struct fib6_info * rt)6905 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6906 {
6907 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6908 
6909 	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6910 	if (!mlxsw_sp_rt6)
6911 		return ERR_PTR(-ENOMEM);
6912 
6913 	/* In case of route replace, replaced route is deleted with
6914 	 * no notification. Take reference to prevent accessing freed
6915 	 * memory.
6916 	 */
6917 	mlxsw_sp_rt6->rt = rt;
6918 	fib6_info_hold(rt);
6919 
6920 	return mlxsw_sp_rt6;
6921 }
6922 
6923 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_rt6_release(struct fib6_info * rt)6924 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6925 {
6926 	fib6_info_release(rt);
6927 }
6928 #else
mlxsw_sp_rt6_release(struct fib6_info * rt)6929 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6930 {
6931 }
6932 #endif
6933 
mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 * mlxsw_sp_rt6)6934 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6935 {
6936 	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6937 
6938 	if (!mlxsw_sp_rt6->rt->nh)
6939 		fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6940 	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6941 	kfree(mlxsw_sp_rt6);
6942 }
6943 
6944 static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry * fib6_entry)6945 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6946 {
6947 	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6948 				list)->rt;
6949 }
6950 
6951 static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry * fib6_entry,const struct fib6_info * rt)6952 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6953 			    const struct fib6_info *rt)
6954 {
6955 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6956 
6957 	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6958 		if (mlxsw_sp_rt6->rt == rt)
6959 			return mlxsw_sp_rt6;
6960 	}
6961 
6962 	return NULL;
6963 }
6964 
mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt,enum mlxsw_sp_ipip_type * ret)6965 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6966 					const struct fib6_info *rt,
6967 					enum mlxsw_sp_ipip_type *ret)
6968 {
6969 	return rt->fib6_nh->fib_nh_dev &&
6970 	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6971 }
6972 
mlxsw_sp_nexthop6_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,const struct fib6_info * rt)6973 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6974 				  struct mlxsw_sp_nexthop_group *nh_grp,
6975 				  struct mlxsw_sp_nexthop *nh,
6976 				  const struct fib6_info *rt)
6977 {
6978 	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6979 	int err;
6980 
6981 	nh->nhgi = nh_grp->nhgi;
6982 	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6983 	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6984 #if IS_ENABLED(CONFIG_IPV6)
6985 	nh->neigh_tbl = &nd_tbl;
6986 #endif
6987 
6988 	err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
6989 	if (err)
6990 		return err;
6991 
6992 	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6993 
6994 	if (!dev)
6995 		return 0;
6996 	nh->ifindex = dev->ifindex;
6997 
6998 	err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6999 	if (err)
7000 		goto err_nexthop_type_init;
7001 
7002 	return 0;
7003 
7004 err_nexthop_type_init:
7005 	list_del(&nh->router_list_node);
7006 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
7007 	return err;
7008 }
7009 
mlxsw_sp_nexthop6_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)7010 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
7011 				   struct mlxsw_sp_nexthop *nh)
7012 {
7013 	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
7014 	list_del(&nh->router_list_node);
7015 	mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
7016 }
7017 
mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)7018 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
7019 				    const struct fib6_info *rt)
7020 {
7021 	return rt->fib6_nh->fib_nh_gw_family ||
7022 	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
7023 }
7024 
7025 static int
mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)7026 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
7027 				  struct mlxsw_sp_nexthop_group *nh_grp,
7028 				  struct mlxsw_sp_fib6_entry *fib6_entry)
7029 {
7030 	struct mlxsw_sp_nexthop_group_info *nhgi;
7031 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7032 	struct mlxsw_sp_nexthop *nh;
7033 	int err, i;
7034 
7035 	nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
7036 		       GFP_KERNEL);
7037 	if (!nhgi)
7038 		return -ENOMEM;
7039 	nh_grp->nhgi = nhgi;
7040 	nhgi->nh_grp = nh_grp;
7041 	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
7042 					struct mlxsw_sp_rt6, list);
7043 	nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
7044 	nhgi->count = fib6_entry->nrt6;
7045 	for (i = 0; i < nhgi->count; i++) {
7046 		struct fib6_info *rt = mlxsw_sp_rt6->rt;
7047 
7048 		nh = &nhgi->nexthops[i];
7049 		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
7050 		if (err)
7051 			goto err_nexthop6_init;
7052 		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
7053 	}
7054 	nh_grp->nhgi = nhgi;
7055 	err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
7056 	if (err)
7057 		goto err_group_inc;
7058 	err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
7059 	if (err)
7060 		goto err_group_refresh;
7061 
7062 	return 0;
7063 
7064 err_group_refresh:
7065 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
7066 err_group_inc:
7067 	i = nhgi->count;
7068 err_nexthop6_init:
7069 	for (i--; i >= 0; i--) {
7070 		nh = &nhgi->nexthops[i];
7071 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
7072 	}
7073 	kfree(nhgi);
7074 	return err;
7075 }
7076 
7077 static void
mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)7078 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
7079 				  struct mlxsw_sp_nexthop_group *nh_grp)
7080 {
7081 	struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
7082 	int i;
7083 
7084 	mlxsw_sp_nexthop_group_dec(mlxsw_sp);
7085 	for (i = nhgi->count - 1; i >= 0; i--) {
7086 		struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
7087 
7088 		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
7089 	}
7090 	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
7091 	WARN_ON_ONCE(nhgi->adj_index_valid);
7092 	kfree(nhgi);
7093 }
7094 
7095 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7096 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
7097 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7098 {
7099 	struct mlxsw_sp_nexthop_group *nh_grp;
7100 	int err;
7101 
7102 	nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
7103 	if (!nh_grp)
7104 		return ERR_PTR(-ENOMEM);
7105 	INIT_LIST_HEAD(&nh_grp->vr_list);
7106 	err = rhashtable_init(&nh_grp->vr_ht,
7107 			      &mlxsw_sp_nexthop_group_vr_ht_params);
7108 	if (err)
7109 		goto err_nexthop_group_vr_ht_init;
7110 	INIT_LIST_HEAD(&nh_grp->fib_list);
7111 	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
7112 
7113 	err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
7114 	if (err)
7115 		goto err_nexthop_group_info_init;
7116 
7117 	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
7118 	if (err)
7119 		goto err_nexthop_group_insert;
7120 
7121 	nh_grp->can_destroy = true;
7122 
7123 	return nh_grp;
7124 
7125 err_nexthop_group_insert:
7126 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
7127 err_nexthop_group_info_init:
7128 	rhashtable_destroy(&nh_grp->vr_ht);
7129 err_nexthop_group_vr_ht_init:
7130 	kfree(nh_grp);
7131 	return ERR_PTR(err);
7132 }
7133 
7134 static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)7135 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
7136 				struct mlxsw_sp_nexthop_group *nh_grp)
7137 {
7138 	if (!nh_grp->can_destroy)
7139 		return;
7140 	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
7141 	mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
7142 	WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
7143 	rhashtable_destroy(&nh_grp->vr_ht);
7144 	kfree(nh_grp);
7145 }
7146 
mlxsw_sp_nexthop6_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7147 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
7148 				       struct mlxsw_sp_fib6_entry *fib6_entry)
7149 {
7150 	struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7151 	struct mlxsw_sp_nexthop_group *nh_grp;
7152 
7153 	if (rt->nh) {
7154 		nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
7155 							   rt->nh->id);
7156 		if (WARN_ON_ONCE(!nh_grp))
7157 			return -EINVAL;
7158 		goto out;
7159 	}
7160 
7161 	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
7162 	if (!nh_grp) {
7163 		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
7164 		if (IS_ERR(nh_grp))
7165 			return PTR_ERR(nh_grp);
7166 	}
7167 
7168 	/* The route and the nexthop are described by the same struct, so we
7169 	 * need to the update the nexthop offload indication for the new route.
7170 	 */
7171 	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
7172 
7173 out:
7174 	list_add_tail(&fib6_entry->common.nexthop_group_node,
7175 		      &nh_grp->fib_list);
7176 	fib6_entry->common.nh_group = nh_grp;
7177 
7178 	return 0;
7179 }
7180 
mlxsw_sp_nexthop6_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)7181 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
7182 					struct mlxsw_sp_fib_entry *fib_entry)
7183 {
7184 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
7185 
7186 	list_del(&fib_entry->nexthop_group_node);
7187 	if (!list_empty(&nh_grp->fib_list))
7188 		return;
7189 
7190 	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
7191 		mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
7192 		return;
7193 	}
7194 
7195 	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
7196 }
7197 
7198 static int
mlxsw_sp_nexthop6_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7199 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
7200 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7201 {
7202 	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
7203 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7204 	int err;
7205 
7206 	mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
7207 	fib6_entry->common.nh_group = NULL;
7208 	list_del(&fib6_entry->common.nexthop_group_node);
7209 
7210 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7211 	if (err)
7212 		goto err_nexthop6_group_get;
7213 
7214 	err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
7215 					     fib_node->fib);
7216 	if (err)
7217 		goto err_nexthop_group_vr_link;
7218 
7219 	/* In case this entry is offloaded, then the adjacency index
7220 	 * currently associated with it in the device's table is that
7221 	 * of the old group. Start using the new one instead.
7222 	 */
7223 	err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
7224 	if (err)
7225 		goto err_fib_entry_update;
7226 
7227 	if (list_empty(&old_nh_grp->fib_list))
7228 		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
7229 
7230 	return 0;
7231 
7232 err_fib_entry_update:
7233 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7234 					 fib_node->fib);
7235 err_nexthop_group_vr_link:
7236 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7237 err_nexthop6_group_get:
7238 	list_add_tail(&fib6_entry->common.nexthop_group_node,
7239 		      &old_nh_grp->fib_list);
7240 	fib6_entry->common.nh_group = old_nh_grp;
7241 	mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
7242 	return err;
7243 }
7244 
7245 static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)7246 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
7247 				struct mlxsw_sp_fib6_entry *fib6_entry,
7248 				struct fib6_info **rt_arr, unsigned int nrt6)
7249 {
7250 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7251 	int err, i;
7252 
7253 	for (i = 0; i < nrt6; i++) {
7254 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7255 		if (IS_ERR(mlxsw_sp_rt6)) {
7256 			err = PTR_ERR(mlxsw_sp_rt6);
7257 			goto err_rt6_unwind;
7258 		}
7259 
7260 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7261 		fib6_entry->nrt6++;
7262 	}
7263 
7264 	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7265 	if (err)
7266 		goto err_rt6_unwind;
7267 
7268 	return 0;
7269 
7270 err_rt6_unwind:
7271 	for (; i > 0; i--) {
7272 		fib6_entry->nrt6--;
7273 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7274 					       struct mlxsw_sp_rt6, list);
7275 		list_del(&mlxsw_sp_rt6->list);
7276 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7277 	}
7278 	return err;
7279 }
7280 
7281 static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)7282 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7283 				struct mlxsw_sp_fib6_entry *fib6_entry,
7284 				struct fib6_info **rt_arr, unsigned int nrt6)
7285 {
7286 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7287 	int i;
7288 
7289 	for (i = 0; i < nrt6; i++) {
7290 		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7291 							   rt_arr[i]);
7292 		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7293 			continue;
7294 
7295 		fib6_entry->nrt6--;
7296 		list_del(&mlxsw_sp_rt6->list);
7297 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7298 	}
7299 
7300 	mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
7301 }
7302 
7303 static int
mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7304 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7305 				   struct mlxsw_sp_fib_entry *fib_entry,
7306 				   const struct fib6_info *rt)
7307 {
7308 	struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7309 	union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7310 	u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7311 	struct mlxsw_sp_router *router = mlxsw_sp->router;
7312 	int ifindex = nhgi->nexthops[0].ifindex;
7313 	struct mlxsw_sp_ipip_entry *ipip_entry;
7314 
7315 	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7316 	ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7317 						       MLXSW_SP_L3_PROTO_IPV6,
7318 						       dip);
7319 
7320 	if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7321 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7322 		return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7323 						     ipip_entry);
7324 	}
7325 	if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7326 					 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7327 		u32 tunnel_index;
7328 
7329 		tunnel_index = router->nve_decap_config.tunnel_index;
7330 		fib_entry->decap.tunnel_index = tunnel_index;
7331 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7332 	}
7333 
7334 	return 0;
7335 }
7336 
mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7337 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7338 					struct mlxsw_sp_fib_entry *fib_entry,
7339 					const struct fib6_info *rt)
7340 {
7341 	if (rt->fib6_flags & RTF_LOCAL)
7342 		return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7343 							  rt);
7344 	if (rt->fib6_flags & RTF_ANYCAST)
7345 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7346 	else if (rt->fib6_type == RTN_BLACKHOLE)
7347 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7348 	else if (rt->fib6_flags & RTF_REJECT)
7349 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7350 	else if (fib_entry->nh_group->nhgi->gateway)
7351 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7352 	else
7353 		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7354 
7355 	return 0;
7356 }
7357 
7358 static void
mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry * fib6_entry)7359 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7360 {
7361 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7362 
7363 	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7364 				 list) {
7365 		fib6_entry->nrt6--;
7366 		list_del(&mlxsw_sp_rt6->list);
7367 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7368 	}
7369 }
7370 
7371 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct fib6_info ** rt_arr,unsigned int nrt6)7372 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7373 			   struct mlxsw_sp_fib_node *fib_node,
7374 			   struct fib6_info **rt_arr, unsigned int nrt6)
7375 {
7376 	struct mlxsw_sp_fib6_entry *fib6_entry;
7377 	struct mlxsw_sp_fib_entry *fib_entry;
7378 	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7379 	int err, i;
7380 
7381 	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7382 	if (!fib6_entry)
7383 		return ERR_PTR(-ENOMEM);
7384 	fib_entry = &fib6_entry->common;
7385 
7386 	INIT_LIST_HEAD(&fib6_entry->rt6_list);
7387 
7388 	for (i = 0; i < nrt6; i++) {
7389 		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7390 		if (IS_ERR(mlxsw_sp_rt6)) {
7391 			err = PTR_ERR(mlxsw_sp_rt6);
7392 			goto err_rt6_unwind;
7393 		}
7394 		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7395 		fib6_entry->nrt6++;
7396 	}
7397 
7398 	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7399 	if (err)
7400 		goto err_rt6_unwind;
7401 
7402 	err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7403 					     fib_node->fib);
7404 	if (err)
7405 		goto err_nexthop_group_vr_link;
7406 
7407 	err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7408 	if (err)
7409 		goto err_fib6_entry_type_set;
7410 
7411 	fib_entry->fib_node = fib_node;
7412 
7413 	return fib6_entry;
7414 
7415 err_fib6_entry_type_set:
7416 	mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7417 err_nexthop_group_vr_link:
7418 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7419 err_rt6_unwind:
7420 	for (; i > 0; i--) {
7421 		fib6_entry->nrt6--;
7422 		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7423 					       struct mlxsw_sp_rt6, list);
7424 		list_del(&mlxsw_sp_rt6->list);
7425 		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7426 	}
7427 	kfree(fib6_entry);
7428 	return ERR_PTR(err);
7429 }
7430 
7431 static void
mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7432 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7433 			       struct mlxsw_sp_fib6_entry *fib6_entry)
7434 {
7435 	mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7436 }
7437 
mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7438 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7439 					struct mlxsw_sp_fib6_entry *fib6_entry)
7440 {
7441 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7442 
7443 	mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7444 	mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7445 					 fib_node->fib);
7446 	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7447 	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7448 	WARN_ON(fib6_entry->nrt6);
7449 	kfree(fib6_entry);
7450 }
7451 
7452 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)7453 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7454 			   const struct fib6_info *rt)
7455 {
7456 	struct mlxsw_sp_fib6_entry *fib6_entry;
7457 	struct mlxsw_sp_fib_node *fib_node;
7458 	struct mlxsw_sp_fib *fib;
7459 	struct fib6_info *cmp_rt;
7460 	struct mlxsw_sp_vr *vr;
7461 
7462 	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7463 	if (!vr)
7464 		return NULL;
7465 	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7466 
7467 	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7468 					    sizeof(rt->fib6_dst.addr),
7469 					    rt->fib6_dst.plen);
7470 	if (!fib_node)
7471 		return NULL;
7472 
7473 	fib6_entry = container_of(fib_node->fib_entry,
7474 				  struct mlxsw_sp_fib6_entry, common);
7475 	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7476 	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7477 	    rt->fib6_metric == cmp_rt->fib6_metric &&
7478 	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7479 		return fib6_entry;
7480 
7481 	return NULL;
7482 }
7483 
mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry * fib6_entry)7484 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7485 {
7486 	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7487 	struct mlxsw_sp_fib6_entry *fib6_replaced;
7488 	struct fib6_info *rt, *rt_replaced;
7489 
7490 	if (!fib_node->fib_entry)
7491 		return true;
7492 
7493 	fib6_replaced = container_of(fib_node->fib_entry,
7494 				     struct mlxsw_sp_fib6_entry,
7495 				     common);
7496 	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7497 	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7498 	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7499 	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7500 		return false;
7501 
7502 	return true;
7503 }
7504 
mlxsw_sp_router_fib6_replace(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7505 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7506 					struct fib6_info **rt_arr,
7507 					unsigned int nrt6)
7508 {
7509 	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7510 	struct mlxsw_sp_fib_entry *replaced;
7511 	struct mlxsw_sp_fib_node *fib_node;
7512 	struct fib6_info *rt = rt_arr[0];
7513 	int err;
7514 
7515 	if (rt->fib6_src.plen)
7516 		return -EINVAL;
7517 
7518 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7519 		return 0;
7520 
7521 	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7522 		return 0;
7523 
7524 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7525 					 &rt->fib6_dst.addr,
7526 					 sizeof(rt->fib6_dst.addr),
7527 					 rt->fib6_dst.plen,
7528 					 MLXSW_SP_L3_PROTO_IPV6);
7529 	if (IS_ERR(fib_node))
7530 		return PTR_ERR(fib_node);
7531 
7532 	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7533 						nrt6);
7534 	if (IS_ERR(fib6_entry)) {
7535 		err = PTR_ERR(fib6_entry);
7536 		goto err_fib6_entry_create;
7537 	}
7538 
7539 	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7540 		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7541 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7542 		return 0;
7543 	}
7544 
7545 	replaced = fib_node->fib_entry;
7546 	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7547 	if (err)
7548 		goto err_fib_node_entry_link;
7549 
7550 	/* Nothing to replace */
7551 	if (!replaced)
7552 		return 0;
7553 
7554 	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7555 	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7556 				     common);
7557 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7558 
7559 	return 0;
7560 
7561 err_fib_node_entry_link:
7562 	fib_node->fib_entry = replaced;
7563 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7564 err_fib6_entry_create:
7565 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7566 	return err;
7567 }
7568 
mlxsw_sp_router_fib6_append(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7569 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7570 				       struct fib6_info **rt_arr,
7571 				       unsigned int nrt6)
7572 {
7573 	struct mlxsw_sp_fib6_entry *fib6_entry;
7574 	struct mlxsw_sp_fib_node *fib_node;
7575 	struct fib6_info *rt = rt_arr[0];
7576 	int err;
7577 
7578 	if (rt->fib6_src.plen)
7579 		return -EINVAL;
7580 
7581 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7582 		return 0;
7583 
7584 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7585 					 &rt->fib6_dst.addr,
7586 					 sizeof(rt->fib6_dst.addr),
7587 					 rt->fib6_dst.plen,
7588 					 MLXSW_SP_L3_PROTO_IPV6);
7589 	if (IS_ERR(fib_node))
7590 		return PTR_ERR(fib_node);
7591 
7592 	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7593 		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7594 		return -EINVAL;
7595 	}
7596 
7597 	fib6_entry = container_of(fib_node->fib_entry,
7598 				  struct mlxsw_sp_fib6_entry, common);
7599 	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7600 					      nrt6);
7601 	if (err)
7602 		goto err_fib6_entry_nexthop_add;
7603 
7604 	return 0;
7605 
7606 err_fib6_entry_nexthop_add:
7607 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7608 	return err;
7609 }
7610 
mlxsw_sp_router_fib6_del(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)7611 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7612 				     struct fib6_info **rt_arr,
7613 				     unsigned int nrt6)
7614 {
7615 	struct mlxsw_sp_fib6_entry *fib6_entry;
7616 	struct mlxsw_sp_fib_node *fib_node;
7617 	struct fib6_info *rt = rt_arr[0];
7618 
7619 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
7620 		return;
7621 
7622 	/* Multipath routes are first added to the FIB trie and only then
7623 	 * notified. If we vetoed the addition, we will get a delete
7624 	 * notification for a route we do not have. Therefore, do not warn if
7625 	 * route was not found.
7626 	 */
7627 	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7628 	if (!fib6_entry)
7629 		return;
7630 
7631 	/* If not all the nexthops are deleted, then only reduce the nexthop
7632 	 * group.
7633 	 */
7634 	if (nrt6 != fib6_entry->nrt6) {
7635 		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7636 						nrt6);
7637 		return;
7638 	}
7639 
7640 	fib_node = fib6_entry->common.fib_node;
7641 
7642 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7643 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7644 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7645 }
7646 
7647 static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr * vr,int family)7648 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7649 {
7650 	if (family == RTNL_FAMILY_IPMR)
7651 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7652 	else
7653 		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7654 }
7655 
mlxsw_sp_router_fibmr_add(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info,bool replace)7656 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7657 				     struct mfc_entry_notifier_info *men_info,
7658 				     bool replace)
7659 {
7660 	struct mlxsw_sp_mr_table *mrt;
7661 	struct mlxsw_sp_vr *vr;
7662 
7663 	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7664 	if (IS_ERR(vr))
7665 		return PTR_ERR(vr);
7666 
7667 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7668 	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7669 }
7670 
mlxsw_sp_router_fibmr_del(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info)7671 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7672 				      struct mfc_entry_notifier_info *men_info)
7673 {
7674 	struct mlxsw_sp_mr_table *mrt;
7675 	struct mlxsw_sp_vr *vr;
7676 
7677 	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7678 	if (WARN_ON(!vr))
7679 		return;
7680 
7681 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7682 	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7683 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7684 }
7685 
7686 static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7687 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7688 			      struct vif_entry_notifier_info *ven_info)
7689 {
7690 	struct mlxsw_sp_mr_table *mrt;
7691 	struct mlxsw_sp_rif *rif;
7692 	struct mlxsw_sp_vr *vr;
7693 
7694 	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7695 	if (IS_ERR(vr))
7696 		return PTR_ERR(vr);
7697 
7698 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7699 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7700 	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7701 				   ven_info->vif_index,
7702 				   ven_info->vif_flags, rif);
7703 }
7704 
7705 static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7706 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7707 			      struct vif_entry_notifier_info *ven_info)
7708 {
7709 	struct mlxsw_sp_mr_table *mrt;
7710 	struct mlxsw_sp_vr *vr;
7711 
7712 	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7713 	if (WARN_ON(!vr))
7714 		return;
7715 
7716 	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7717 	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7718 	mlxsw_sp_vr_put(mlxsw_sp, vr);
7719 }
7720 
mlxsw_sp_fib4_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7721 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7722 				     struct mlxsw_sp_fib_node *fib_node)
7723 {
7724 	struct mlxsw_sp_fib4_entry *fib4_entry;
7725 
7726 	fib4_entry = container_of(fib_node->fib_entry,
7727 				  struct mlxsw_sp_fib4_entry, common);
7728 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7729 	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7730 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7731 }
7732 
mlxsw_sp_fib6_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7733 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7734 				     struct mlxsw_sp_fib_node *fib_node)
7735 {
7736 	struct mlxsw_sp_fib6_entry *fib6_entry;
7737 
7738 	fib6_entry = container_of(fib_node->fib_entry,
7739 				  struct mlxsw_sp_fib6_entry, common);
7740 	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7741 	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7742 	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7743 }
7744 
mlxsw_sp_fib_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7745 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7746 				    struct mlxsw_sp_fib_node *fib_node)
7747 {
7748 	switch (fib_node->fib->proto) {
7749 	case MLXSW_SP_L3_PROTO_IPV4:
7750 		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7751 		break;
7752 	case MLXSW_SP_L3_PROTO_IPV6:
7753 		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7754 		break;
7755 	}
7756 }
7757 
mlxsw_sp_vr_fib_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)7758 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7759 				  struct mlxsw_sp_vr *vr,
7760 				  enum mlxsw_sp_l3proto proto)
7761 {
7762 	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7763 	struct mlxsw_sp_fib_node *fib_node, *tmp;
7764 
7765 	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7766 		bool do_break = &tmp->list == &fib->node_list;
7767 
7768 		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7769 		if (do_break)
7770 			break;
7771 	}
7772 }
7773 
mlxsw_sp_router_fib_flush(struct mlxsw_sp * mlxsw_sp)7774 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7775 {
7776 	int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
7777 	int i, j;
7778 
7779 	for (i = 0; i < max_vrs; i++) {
7780 		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7781 
7782 		if (!mlxsw_sp_vr_is_used(vr))
7783 			continue;
7784 
7785 		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7786 			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7787 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7788 
7789 		/* If virtual router was only used for IPv4, then it's no
7790 		 * longer used.
7791 		 */
7792 		if (!mlxsw_sp_vr_is_used(vr))
7793 			continue;
7794 		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7795 	}
7796 }
7797 
7798 struct mlxsw_sp_fib6_event_work {
7799 	struct fib6_info **rt_arr;
7800 	unsigned int nrt6;
7801 };
7802 
7803 struct mlxsw_sp_fib_event_work {
7804 	struct work_struct work;
7805 	netdevice_tracker dev_tracker;
7806 	union {
7807 		struct mlxsw_sp_fib6_event_work fib6_work;
7808 		struct fib_entry_notifier_info fen_info;
7809 		struct fib_rule_notifier_info fr_info;
7810 		struct fib_nh_notifier_info fnh_info;
7811 		struct mfc_entry_notifier_info men_info;
7812 		struct vif_entry_notifier_info ven_info;
7813 	};
7814 	struct mlxsw_sp *mlxsw_sp;
7815 	unsigned long event;
7816 };
7817 
7818 static int
mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work * fib6_work,struct fib6_entry_notifier_info * fen6_info)7819 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7820 			       struct fib6_entry_notifier_info *fen6_info)
7821 {
7822 	struct fib6_info *rt = fen6_info->rt;
7823 	struct fib6_info **rt_arr;
7824 	struct fib6_info *iter;
7825 	unsigned int nrt6;
7826 	int i = 0;
7827 
7828 	nrt6 = fen6_info->nsiblings + 1;
7829 
7830 	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7831 	if (!rt_arr)
7832 		return -ENOMEM;
7833 
7834 	fib6_work->rt_arr = rt_arr;
7835 	fib6_work->nrt6 = nrt6;
7836 
7837 	rt_arr[0] = rt;
7838 	fib6_info_hold(rt);
7839 
7840 	if (!fen6_info->nsiblings)
7841 		return 0;
7842 
7843 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7844 		if (i == fen6_info->nsiblings)
7845 			break;
7846 
7847 		rt_arr[i + 1] = iter;
7848 		fib6_info_hold(iter);
7849 		i++;
7850 	}
7851 	WARN_ON_ONCE(i != fen6_info->nsiblings);
7852 
7853 	return 0;
7854 }
7855 
7856 static void
mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work * fib6_work)7857 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7858 {
7859 	int i;
7860 
7861 	for (i = 0; i < fib6_work->nrt6; i++)
7862 		mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7863 	kfree(fib6_work->rt_arr);
7864 }
7865 
mlxsw_sp_router_fib4_event_work(struct work_struct * work)7866 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7867 {
7868 	struct mlxsw_sp_fib_event_work *fib_work =
7869 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7870 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7871 	int err;
7872 
7873 	mutex_lock(&mlxsw_sp->router->lock);
7874 	mlxsw_sp_span_respin(mlxsw_sp);
7875 
7876 	switch (fib_work->event) {
7877 	case FIB_EVENT_ENTRY_REPLACE:
7878 		err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7879 						   &fib_work->fen_info);
7880 		if (err) {
7881 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7882 			mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7883 							      &fib_work->fen_info);
7884 		}
7885 		fib_info_put(fib_work->fen_info.fi);
7886 		break;
7887 	case FIB_EVENT_ENTRY_DEL:
7888 		mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7889 		fib_info_put(fib_work->fen_info.fi);
7890 		break;
7891 	case FIB_EVENT_NH_ADD:
7892 	case FIB_EVENT_NH_DEL:
7893 		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7894 					fib_work->fnh_info.fib_nh);
7895 		fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7896 		break;
7897 	}
7898 	mutex_unlock(&mlxsw_sp->router->lock);
7899 	kfree(fib_work);
7900 }
7901 
mlxsw_sp_router_fib6_event_work(struct work_struct * work)7902 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7903 {
7904 	struct mlxsw_sp_fib_event_work *fib_work =
7905 		    container_of(work, struct mlxsw_sp_fib_event_work, work);
7906 	struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7907 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7908 	int err;
7909 
7910 	mutex_lock(&mlxsw_sp->router->lock);
7911 	mlxsw_sp_span_respin(mlxsw_sp);
7912 
7913 	switch (fib_work->event) {
7914 	case FIB_EVENT_ENTRY_REPLACE:
7915 		err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7916 						   fib6_work->rt_arr,
7917 						   fib6_work->nrt6);
7918 		if (err) {
7919 			dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7920 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7921 							      fib6_work->rt_arr,
7922 							      fib6_work->nrt6);
7923 		}
7924 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7925 		break;
7926 	case FIB_EVENT_ENTRY_APPEND:
7927 		err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7928 						  fib6_work->rt_arr,
7929 						  fib6_work->nrt6);
7930 		if (err) {
7931 			dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7932 			mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7933 							      fib6_work->rt_arr,
7934 							      fib6_work->nrt6);
7935 		}
7936 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7937 		break;
7938 	case FIB_EVENT_ENTRY_DEL:
7939 		mlxsw_sp_router_fib6_del(mlxsw_sp,
7940 					 fib6_work->rt_arr,
7941 					 fib6_work->nrt6);
7942 		mlxsw_sp_router_fib6_work_fini(fib6_work);
7943 		break;
7944 	}
7945 	mutex_unlock(&mlxsw_sp->router->lock);
7946 	kfree(fib_work);
7947 }
7948 
mlxsw_sp_router_fibmr_event_work(struct work_struct * work)7949 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7950 {
7951 	struct mlxsw_sp_fib_event_work *fib_work =
7952 		container_of(work, struct mlxsw_sp_fib_event_work, work);
7953 	struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7954 	bool replace;
7955 	int err;
7956 
7957 	rtnl_lock();
7958 	mutex_lock(&mlxsw_sp->router->lock);
7959 	switch (fib_work->event) {
7960 	case FIB_EVENT_ENTRY_REPLACE:
7961 	case FIB_EVENT_ENTRY_ADD:
7962 		replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7963 
7964 		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7965 						replace);
7966 		if (err)
7967 			dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7968 		mr_cache_put(fib_work->men_info.mfc);
7969 		break;
7970 	case FIB_EVENT_ENTRY_DEL:
7971 		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7972 		mr_cache_put(fib_work->men_info.mfc);
7973 		break;
7974 	case FIB_EVENT_VIF_ADD:
7975 		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7976 						    &fib_work->ven_info);
7977 		if (err)
7978 			dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7979 		netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7980 		break;
7981 	case FIB_EVENT_VIF_DEL:
7982 		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7983 					      &fib_work->ven_info);
7984 		netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
7985 		break;
7986 	}
7987 	mutex_unlock(&mlxsw_sp->router->lock);
7988 	rtnl_unlock();
7989 	kfree(fib_work);
7990 }
7991 
mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)7992 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7993 				       struct fib_notifier_info *info)
7994 {
7995 	struct fib_entry_notifier_info *fen_info;
7996 	struct fib_nh_notifier_info *fnh_info;
7997 
7998 	switch (fib_work->event) {
7999 	case FIB_EVENT_ENTRY_REPLACE:
8000 	case FIB_EVENT_ENTRY_DEL:
8001 		fen_info = container_of(info, struct fib_entry_notifier_info,
8002 					info);
8003 		fib_work->fen_info = *fen_info;
8004 		/* Take reference on fib_info to prevent it from being
8005 		 * freed while work is queued. Release it afterwards.
8006 		 */
8007 		fib_info_hold(fib_work->fen_info.fi);
8008 		break;
8009 	case FIB_EVENT_NH_ADD:
8010 	case FIB_EVENT_NH_DEL:
8011 		fnh_info = container_of(info, struct fib_nh_notifier_info,
8012 					info);
8013 		fib_work->fnh_info = *fnh_info;
8014 		fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
8015 		break;
8016 	}
8017 }
8018 
mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)8019 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
8020 				      struct fib_notifier_info *info)
8021 {
8022 	struct fib6_entry_notifier_info *fen6_info;
8023 	int err;
8024 
8025 	switch (fib_work->event) {
8026 	case FIB_EVENT_ENTRY_REPLACE:
8027 	case FIB_EVENT_ENTRY_APPEND:
8028 	case FIB_EVENT_ENTRY_DEL:
8029 		fen6_info = container_of(info, struct fib6_entry_notifier_info,
8030 					 info);
8031 		err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
8032 						     fen6_info);
8033 		if (err)
8034 			return err;
8035 		break;
8036 	}
8037 
8038 	return 0;
8039 }
8040 
8041 static void
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work * fib_work,struct fib_notifier_info * info)8042 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
8043 			    struct fib_notifier_info *info)
8044 {
8045 	switch (fib_work->event) {
8046 	case FIB_EVENT_ENTRY_REPLACE:
8047 	case FIB_EVENT_ENTRY_ADD:
8048 	case FIB_EVENT_ENTRY_DEL:
8049 		memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
8050 		mr_cache_hold(fib_work->men_info.mfc);
8051 		break;
8052 	case FIB_EVENT_VIF_ADD:
8053 	case FIB_EVENT_VIF_DEL:
8054 		memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
8055 		netdev_hold(fib_work->ven_info.dev, &fib_work->dev_tracker,
8056 			    GFP_ATOMIC);
8057 		break;
8058 	}
8059 }
8060 
mlxsw_sp_router_fib_rule_event(unsigned long event,struct fib_notifier_info * info,struct mlxsw_sp * mlxsw_sp)8061 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
8062 					  struct fib_notifier_info *info,
8063 					  struct mlxsw_sp *mlxsw_sp)
8064 {
8065 	struct netlink_ext_ack *extack = info->extack;
8066 	struct fib_rule_notifier_info *fr_info;
8067 	struct fib_rule *rule;
8068 	int err = 0;
8069 
8070 	/* nothing to do at the moment */
8071 	if (event == FIB_EVENT_RULE_DEL)
8072 		return 0;
8073 
8074 	fr_info = container_of(info, struct fib_rule_notifier_info, info);
8075 	rule = fr_info->rule;
8076 
8077 	/* Rule only affects locally generated traffic */
8078 	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
8079 		return 0;
8080 
8081 	switch (info->family) {
8082 	case AF_INET:
8083 		if (!fib4_rule_default(rule) && !rule->l3mdev)
8084 			err = -EOPNOTSUPP;
8085 		break;
8086 	case AF_INET6:
8087 		if (!fib6_rule_default(rule) && !rule->l3mdev)
8088 			err = -EOPNOTSUPP;
8089 		break;
8090 	case RTNL_FAMILY_IPMR:
8091 		if (!ipmr_rule_default(rule) && !rule->l3mdev)
8092 			err = -EOPNOTSUPP;
8093 		break;
8094 	case RTNL_FAMILY_IP6MR:
8095 		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
8096 			err = -EOPNOTSUPP;
8097 		break;
8098 	}
8099 
8100 	if (err < 0)
8101 		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
8102 
8103 	return err;
8104 }
8105 
8106 /* Called with rcu_read_lock() */
mlxsw_sp_router_fib_event(struct notifier_block * nb,unsigned long event,void * ptr)8107 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
8108 				     unsigned long event, void *ptr)
8109 {
8110 	struct mlxsw_sp_fib_event_work *fib_work;
8111 	struct fib_notifier_info *info = ptr;
8112 	struct mlxsw_sp_router *router;
8113 	int err;
8114 
8115 	if ((info->family != AF_INET && info->family != AF_INET6 &&
8116 	     info->family != RTNL_FAMILY_IPMR &&
8117 	     info->family != RTNL_FAMILY_IP6MR))
8118 		return NOTIFY_DONE;
8119 
8120 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
8121 
8122 	switch (event) {
8123 	case FIB_EVENT_RULE_ADD:
8124 	case FIB_EVENT_RULE_DEL:
8125 		err = mlxsw_sp_router_fib_rule_event(event, info,
8126 						     router->mlxsw_sp);
8127 		return notifier_from_errno(err);
8128 	case FIB_EVENT_ENTRY_ADD:
8129 	case FIB_EVENT_ENTRY_REPLACE:
8130 	case FIB_EVENT_ENTRY_APPEND:
8131 		if (info->family == AF_INET) {
8132 			struct fib_entry_notifier_info *fen_info = ptr;
8133 
8134 			if (fen_info->fi->fib_nh_is_v6) {
8135 				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
8136 				return notifier_from_errno(-EINVAL);
8137 			}
8138 		}
8139 		break;
8140 	}
8141 
8142 	fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
8143 	if (!fib_work)
8144 		return NOTIFY_BAD;
8145 
8146 	fib_work->mlxsw_sp = router->mlxsw_sp;
8147 	fib_work->event = event;
8148 
8149 	switch (info->family) {
8150 	case AF_INET:
8151 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
8152 		mlxsw_sp_router_fib4_event(fib_work, info);
8153 		break;
8154 	case AF_INET6:
8155 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
8156 		err = mlxsw_sp_router_fib6_event(fib_work, info);
8157 		if (err)
8158 			goto err_fib_event;
8159 		break;
8160 	case RTNL_FAMILY_IP6MR:
8161 	case RTNL_FAMILY_IPMR:
8162 		INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
8163 		mlxsw_sp_router_fibmr_event(fib_work, info);
8164 		break;
8165 	}
8166 
8167 	mlxsw_core_schedule_work(&fib_work->work);
8168 
8169 	return NOTIFY_DONE;
8170 
8171 err_fib_event:
8172 	kfree(fib_work);
8173 	return NOTIFY_BAD;
8174 }
8175 
8176 static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8177 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
8178 			 const struct net_device *dev)
8179 {
8180 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8181 	int i;
8182 
8183 	for (i = 0; i < max_rifs; i++)
8184 		if (mlxsw_sp->router->rifs[i] &&
8185 		    mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
8186 			return mlxsw_sp->router->rifs[i];
8187 
8188 	return NULL;
8189 }
8190 
mlxsw_sp_router_rif_disable(struct mlxsw_sp * mlxsw_sp,u16 rif)8191 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
8192 {
8193 	char ritr_pl[MLXSW_REG_RITR_LEN];
8194 	int err;
8195 
8196 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
8197 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8198 	if (err)
8199 		return err;
8200 
8201 	mlxsw_reg_ritr_enable_set(ritr_pl, false);
8202 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8203 }
8204 
mlxsw_sp_router_rif_made_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)8205 static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
8206 					 struct mlxsw_sp_rif *rif)
8207 {
8208 	int err;
8209 
8210 	err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif);
8211 	if (err)
8212 		return err;
8213 
8214 	err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif);
8215 	if (err)
8216 		goto err_nexthop;
8217 
8218 	return 0;
8219 
8220 err_nexthop:
8221 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8222 	return err;
8223 }
8224 
mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)8225 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8226 					  struct mlxsw_sp_rif *rif)
8227 {
8228 	/* Signal to nexthop cleanup that the RIF is going away. */
8229 	rif->crif->rif = NULL;
8230 
8231 	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8232 	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8233 	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8234 }
8235 
__mlxsw_sp_dev_addr_list_empty(const struct net_device * dev)8236 static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8237 {
8238 	struct inet6_dev *inet6_dev;
8239 	struct in_device *idev;
8240 
8241 	idev = __in_dev_get_rcu(dev);
8242 	if (idev && idev->ifa_list)
8243 		return false;
8244 
8245 	inet6_dev = __in6_dev_get(dev);
8246 	if (inet6_dev && !list_empty(&inet6_dev->addr_list))
8247 		return false;
8248 
8249 	return true;
8250 }
8251 
mlxsw_sp_dev_addr_list_empty(const struct net_device * dev)8252 static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
8253 {
8254 	bool addr_list_empty;
8255 
8256 	rcu_read_lock();
8257 	addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
8258 	rcu_read_unlock();
8259 
8260 	return addr_list_empty;
8261 }
8262 
8263 static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif * rif,struct net_device * dev,unsigned long event)8264 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8265 			   unsigned long event)
8266 {
8267 	bool addr_list_empty;
8268 
8269 	switch (event) {
8270 	case NETDEV_UP:
8271 		return rif == NULL;
8272 	case NETDEV_DOWN:
8273 		addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
8274 
8275 		/* macvlans do not have a RIF, but rather piggy back on the
8276 		 * RIF of their lower device.
8277 		 */
8278 		if (netif_is_macvlan(dev) && addr_list_empty)
8279 			return true;
8280 
8281 		if (rif && addr_list_empty &&
8282 		    !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
8283 			return true;
8284 		/* It is possible we already removed the RIF ourselves
8285 		 * if it was assigned to a netdev that is now a bridge
8286 		 * or LAG slave.
8287 		 */
8288 		return false;
8289 	}
8290 
8291 	return false;
8292 }
8293 
8294 static enum mlxsw_sp_rif_type
mlxsw_sp_dev_rif_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8295 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8296 		      const struct net_device *dev)
8297 {
8298 	enum mlxsw_sp_fid_type type;
8299 
8300 	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8301 		return MLXSW_SP_RIF_TYPE_IPIP_LB;
8302 
8303 	/* Otherwise RIF type is derived from the type of the underlying FID. */
8304 	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8305 		type = MLXSW_SP_FID_TYPE_8021Q;
8306 	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8307 		type = MLXSW_SP_FID_TYPE_8021Q;
8308 	else if (netif_is_bridge_master(dev))
8309 		type = MLXSW_SP_FID_TYPE_8021D;
8310 	else
8311 		type = MLXSW_SP_FID_TYPE_RFID;
8312 
8313 	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8314 }
8315 
mlxsw_sp_rif_index_alloc(struct mlxsw_sp * mlxsw_sp,u16 * p_rif_index,u8 rif_entries)8316 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
8317 				    u8 rif_entries)
8318 {
8319 	*p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
8320 				      rif_entries);
8321 	if (*p_rif_index == 0)
8322 		return -ENOBUFS;
8323 	*p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
8324 
8325 	/* RIF indexes must be aligned to the allocation size. */
8326 	WARN_ON_ONCE(*p_rif_index % rif_entries);
8327 
8328 	return 0;
8329 }
8330 
mlxsw_sp_rif_index_free(struct mlxsw_sp * mlxsw_sp,u16 rif_index,u8 rif_entries)8331 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8332 				    u8 rif_entries)
8333 {
8334 	gen_pool_free(mlxsw_sp->router->rifs_table,
8335 		      MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
8336 }
8337 
mlxsw_sp_rif_alloc(size_t rif_size,u16 rif_index,u16 vr_id,struct mlxsw_sp_crif * crif)8338 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8339 					       u16 vr_id,
8340 					       struct mlxsw_sp_crif *crif)
8341 {
8342 	struct net_device *l3_dev = crif ? crif->key.dev : NULL;
8343 	struct mlxsw_sp_rif *rif;
8344 
8345 	rif = kzalloc(rif_size, GFP_KERNEL);
8346 	if (!rif)
8347 		return NULL;
8348 
8349 	INIT_LIST_HEAD(&rif->neigh_list);
8350 	if (l3_dev) {
8351 		ether_addr_copy(rif->addr, l3_dev->dev_addr);
8352 		rif->mtu = l3_dev->mtu;
8353 	}
8354 	rif->vr_id = vr_id;
8355 	rif->rif_index = rif_index;
8356 	if (crif) {
8357 		rif->crif = crif;
8358 		crif->rif = rif;
8359 	}
8360 
8361 	return rif;
8362 }
8363 
mlxsw_sp_rif_free(struct mlxsw_sp_rif * rif)8364 static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
8365 {
8366 	WARN_ON(!list_empty(&rif->neigh_list));
8367 
8368 	if (rif->crif)
8369 		rif->crif->rif = NULL;
8370 	kfree(rif);
8371 }
8372 
mlxsw_sp_rif_by_index(const struct mlxsw_sp * mlxsw_sp,u16 rif_index)8373 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8374 					   u16 rif_index)
8375 {
8376 	return mlxsw_sp->router->rifs[rif_index];
8377 }
8378 
mlxsw_sp_rif_index(const struct mlxsw_sp_rif * rif)8379 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8380 {
8381 	return rif->rif_index;
8382 }
8383 
mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8384 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8385 {
8386 	return lb_rif->common.rif_index;
8387 }
8388 
mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8389 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8390 {
8391 	return lb_rif->ul_rif_id;
8392 }
8393 
8394 static bool
mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif * rif)8395 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8396 {
8397 	return mlxsw_sp_rif_counter_valid_get(rif,
8398 					      MLXSW_SP_RIF_COUNTER_EGRESS) &&
8399 	       mlxsw_sp_rif_counter_valid_get(rif,
8400 					      MLXSW_SP_RIF_COUNTER_INGRESS);
8401 }
8402 
8403 static int
mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif * rif)8404 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8405 {
8406 	int err;
8407 
8408 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8409 	if (err)
8410 		return err;
8411 
8412 	/* Clear stale data. */
8413 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8414 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8415 					       NULL);
8416 	if (err)
8417 		goto err_clear_ingress;
8418 
8419 	err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8420 	if (err)
8421 		goto err_alloc_egress;
8422 
8423 	/* Clear stale data. */
8424 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8425 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8426 					       NULL);
8427 	if (err)
8428 		goto err_clear_egress;
8429 
8430 	return 0;
8431 
8432 err_clear_egress:
8433 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8434 err_alloc_egress:
8435 err_clear_ingress:
8436 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8437 	return err;
8438 }
8439 
8440 static void
mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif * rif)8441 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8442 {
8443 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8444 	mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8445 }
8446 
8447 static void
mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8448 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8449 					  struct netdev_notifier_offload_xstats_info *info)
8450 {
8451 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8452 		return;
8453 	netdev_offload_xstats_report_used(info->report_used);
8454 }
8455 
8456 static int
mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif * rif,struct rtnl_hw_stats64 * p_stats)8457 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8458 				    struct rtnl_hw_stats64 *p_stats)
8459 {
8460 	struct mlxsw_sp_rif_counter_set_basic ingress;
8461 	struct mlxsw_sp_rif_counter_set_basic egress;
8462 	int err;
8463 
8464 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8465 					       MLXSW_SP_RIF_COUNTER_INGRESS,
8466 					       &ingress);
8467 	if (err)
8468 		return err;
8469 
8470 	err = mlxsw_sp_rif_counter_fetch_clear(rif,
8471 					       MLXSW_SP_RIF_COUNTER_EGRESS,
8472 					       &egress);
8473 	if (err)
8474 		return err;
8475 
8476 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX)		\
8477 		((SET.good_unicast_ ## SFX) +		\
8478 		 (SET.good_multicast_ ## SFX) +		\
8479 		 (SET.good_broadcast_ ## SFX))
8480 
8481 	p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8482 	p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8483 	p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8484 	p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8485 	p_stats->rx_errors = ingress.error_packets;
8486 	p_stats->tx_errors = egress.error_packets;
8487 	p_stats->rx_dropped = ingress.discard_packets;
8488 	p_stats->tx_dropped = egress.discard_packets;
8489 	p_stats->multicast = ingress.good_multicast_packets +
8490 			     ingress.good_broadcast_packets;
8491 
8492 #undef MLXSW_SP_ROUTER_ALL_GOOD
8493 
8494 	return 0;
8495 }
8496 
8497 static int
mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8498 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8499 					   struct netdev_notifier_offload_xstats_info *info)
8500 {
8501 	struct rtnl_hw_stats64 stats = {};
8502 	int err;
8503 
8504 	if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8505 		return 0;
8506 
8507 	err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8508 	if (err)
8509 		return err;
8510 
8511 	netdev_offload_xstats_report_delta(info->report_delta, &stats);
8512 	return 0;
8513 }
8514 
8515 struct mlxsw_sp_router_hwstats_notify_work {
8516 	struct work_struct work;
8517 	struct net_device *dev;
8518 	netdevice_tracker dev_tracker;
8519 };
8520 
mlxsw_sp_router_hwstats_notify_work(struct work_struct * work)8521 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8522 {
8523 	struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8524 		container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8525 			     work);
8526 
8527 	rtnl_lock();
8528 	rtnl_offload_xstats_notify(hws_work->dev);
8529 	rtnl_unlock();
8530 	netdev_put(hws_work->dev, &hws_work->dev_tracker);
8531 	kfree(hws_work);
8532 }
8533 
8534 static void
mlxsw_sp_router_hwstats_notify_schedule(struct net_device * dev)8535 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8536 {
8537 	struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8538 
8539 	/* To collect notification payload, the core ends up sending another
8540 	 * notifier block message, which would deadlock on the attempt to
8541 	 * acquire the router lock again. Just postpone the notification until
8542 	 * later.
8543 	 */
8544 
8545 	hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8546 	if (!hws_work)
8547 		return;
8548 
8549 	INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8550 	netdev_hold(dev, &hws_work->dev_tracker, GFP_KERNEL);
8551 	hws_work->dev = dev;
8552 	mlxsw_core_schedule_work(&hws_work->work);
8553 }
8554 
mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif * rif)8555 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8556 {
8557 	return mlxsw_sp_rif_dev(rif)->ifindex;
8558 }
8559 
mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif * rif)8560 bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
8561 {
8562 	return !!mlxsw_sp_rif_dev(rif);
8563 }
8564 
mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif * rif,const struct net_device * dev)8565 bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
8566 			 const struct net_device *dev)
8567 {
8568 	return mlxsw_sp_rif_dev(rif) == dev;
8569 }
8570 
mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif * rif)8571 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8572 {
8573 	struct rtnl_hw_stats64 stats = {};
8574 
8575 	if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8576 		netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
8577 						 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8578 						 &stats);
8579 }
8580 
8581 static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8582 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8583 		    const struct mlxsw_sp_rif_params *params,
8584 		    struct netlink_ext_ack *extack)
8585 {
8586 	u8 rif_entries = params->double_entry ? 2 : 1;
8587 	u32 tb_id = l3mdev_fib_table(params->dev);
8588 	const struct mlxsw_sp_rif_ops *ops;
8589 	struct mlxsw_sp_fid *fid = NULL;
8590 	enum mlxsw_sp_rif_type type;
8591 	struct mlxsw_sp_crif *crif;
8592 	struct mlxsw_sp_rif *rif;
8593 	struct mlxsw_sp_vr *vr;
8594 	u16 rif_index;
8595 	int i, err;
8596 
8597 	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8598 	ops = mlxsw_sp->router->rif_ops_arr[type];
8599 
8600 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8601 	if (IS_ERR(vr))
8602 		return ERR_CAST(vr);
8603 	vr->rif_count++;
8604 
8605 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8606 	if (err) {
8607 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8608 		goto err_rif_index_alloc;
8609 	}
8610 
8611 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
8612 	if (WARN_ON(!crif)) {
8613 		err = -ENOENT;
8614 		goto err_crif_lookup;
8615 	}
8616 
8617 	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
8618 	if (!rif) {
8619 		err = -ENOMEM;
8620 		goto err_rif_alloc;
8621 	}
8622 	netdev_hold(params->dev, &rif->dev_tracker, GFP_KERNEL);
8623 	mlxsw_sp->router->rifs[rif_index] = rif;
8624 	rif->mlxsw_sp = mlxsw_sp;
8625 	rif->ops = ops;
8626 	rif->rif_entries = rif_entries;
8627 
8628 	if (ops->setup)
8629 		ops->setup(rif, params);
8630 
8631 	if (ops->fid_get) {
8632 		fid = ops->fid_get(rif, params, extack);
8633 		if (IS_ERR(fid)) {
8634 			err = PTR_ERR(fid);
8635 			goto err_fid_get;
8636 		}
8637 		rif->fid = fid;
8638 	}
8639 
8640 	err = ops->configure(rif, extack);
8641 	if (err)
8642 		goto err_configure;
8643 
8644 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8645 		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8646 		if (err)
8647 			goto err_mr_rif_add;
8648 	}
8649 
8650 	err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif);
8651 	if (err)
8652 		goto err_rif_made_sync;
8653 
8654 	if (netdev_offload_xstats_enabled(params->dev,
8655 					  NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8656 		err = mlxsw_sp_router_port_l3_stats_enable(rif);
8657 		if (err)
8658 			goto err_stats_enable;
8659 		mlxsw_sp_router_hwstats_notify_schedule(params->dev);
8660 	} else {
8661 		mlxsw_sp_rif_counters_alloc(rif);
8662 	}
8663 
8664 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8665 	return rif;
8666 
8667 err_stats_enable:
8668 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8669 err_rif_made_sync:
8670 err_mr_rif_add:
8671 	for (i--; i >= 0; i--)
8672 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8673 	ops->deconfigure(rif);
8674 err_configure:
8675 	if (fid)
8676 		mlxsw_sp_fid_put(fid);
8677 err_fid_get:
8678 	mlxsw_sp->router->rifs[rif_index] = NULL;
8679 	netdev_put(params->dev, &rif->dev_tracker);
8680 	mlxsw_sp_rif_free(rif);
8681 err_rif_alloc:
8682 err_crif_lookup:
8683 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8684 err_rif_index_alloc:
8685 	vr->rif_count--;
8686 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8687 	return ERR_PTR(err);
8688 }
8689 
mlxsw_sp_rif_destroy(struct mlxsw_sp_rif * rif)8690 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8691 {
8692 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
8693 	const struct mlxsw_sp_rif_ops *ops = rif->ops;
8694 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8695 	struct mlxsw_sp_crif *crif = rif->crif;
8696 	struct mlxsw_sp_fid *fid = rif->fid;
8697 	u8 rif_entries = rif->rif_entries;
8698 	u16 rif_index = rif->rif_index;
8699 	struct mlxsw_sp_vr *vr;
8700 	int i;
8701 
8702 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8703 	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8704 	vr = &mlxsw_sp->router->vrs[rif->vr_id];
8705 
8706 	if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8707 		mlxsw_sp_rif_push_l3_stats(rif);
8708 		mlxsw_sp_router_port_l3_stats_disable(rif);
8709 		mlxsw_sp_router_hwstats_notify_schedule(dev);
8710 	} else {
8711 		mlxsw_sp_rif_counters_free(rif);
8712 	}
8713 
8714 	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8715 		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8716 	ops->deconfigure(rif);
8717 	if (fid)
8718 		/* Loopback RIFs are not associated with a FID. */
8719 		mlxsw_sp_fid_put(fid);
8720 	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8721 	netdev_put(dev, &rif->dev_tracker);
8722 	mlxsw_sp_rif_free(rif);
8723 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8724 	vr->rif_count--;
8725 	mlxsw_sp_vr_put(mlxsw_sp, vr);
8726 
8727 	if (crif->can_destroy)
8728 		mlxsw_sp_crif_free(crif);
8729 }
8730 
mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)8731 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8732 				 struct net_device *dev)
8733 {
8734 	struct mlxsw_sp_rif *rif;
8735 
8736 	mutex_lock(&mlxsw_sp->router->lock);
8737 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8738 	if (!rif)
8739 		goto out;
8740 	mlxsw_sp_rif_destroy(rif);
8741 out:
8742 	mutex_unlock(&mlxsw_sp->router->lock);
8743 }
8744 
mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,u16 vid)8745 static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
8746 					    struct net_device *br_dev,
8747 					    u16 vid)
8748 {
8749 	struct net_device *upper_dev;
8750 	struct mlxsw_sp_crif *crif;
8751 
8752 	rcu_read_lock();
8753 	upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
8754 	rcu_read_unlock();
8755 
8756 	if (!upper_dev)
8757 		return;
8758 
8759 	crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
8760 	if (!crif || !crif->rif)
8761 		return;
8762 
8763 	mlxsw_sp_rif_destroy(crif->rif);
8764 }
8765 
8766 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8767 					  struct net_device *l3_dev,
8768 					  int lower_pvid,
8769 					  unsigned long event,
8770 					  struct netlink_ext_ack *extack);
8771 
mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,u16 new_vid,bool is_pvid,struct netlink_ext_ack * extack)8772 int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
8773 				    struct net_device *br_dev,
8774 				    u16 new_vid, bool is_pvid,
8775 				    struct netlink_ext_ack *extack)
8776 {
8777 	struct mlxsw_sp_rif *old_rif;
8778 	struct mlxsw_sp_rif *new_rif;
8779 	struct net_device *upper_dev;
8780 	u16 old_pvid = 0;
8781 	u16 new_pvid;
8782 	int err = 0;
8783 
8784 	mutex_lock(&mlxsw_sp->router->lock);
8785 	old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
8786 	if (old_rif) {
8787 		/* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
8788 		 * gotten a PVID notification.
8789 		 */
8790 		if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
8791 			old_rif = NULL;
8792 		else
8793 			old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
8794 	}
8795 
8796 	if (is_pvid)
8797 		new_pvid = new_vid;
8798 	else if (old_pvid == new_vid)
8799 		new_pvid = 0;
8800 	else
8801 		goto out;
8802 
8803 	if (old_pvid == new_pvid)
8804 		goto out;
8805 
8806 	if (new_pvid) {
8807 		struct mlxsw_sp_rif_params params = {
8808 			.dev = br_dev,
8809 			.vid = new_pvid,
8810 		};
8811 
8812 		/* If there is a VLAN upper with the same VID as the new PVID,
8813 		 * kill its RIF, if there is one.
8814 		 */
8815 		mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
8816 
8817 		if (mlxsw_sp_dev_addr_list_empty(br_dev))
8818 			goto out;
8819 		new_rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
8820 		if (IS_ERR(new_rif)) {
8821 			err = PTR_ERR(new_rif);
8822 			goto out;
8823 		}
8824 
8825 		if (old_pvid)
8826 			mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
8827 						     true);
8828 	} else {
8829 		mlxsw_sp_rif_destroy(old_rif);
8830 	}
8831 
8832 	if (old_pvid) {
8833 		rcu_read_lock();
8834 		upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
8835 						     old_pvid);
8836 		rcu_read_unlock();
8837 		if (upper_dev)
8838 			err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
8839 							     upper_dev,
8840 							     new_pvid,
8841 							     NETDEV_UP, extack);
8842 	}
8843 
8844 out:
8845 	mutex_unlock(&mlxsw_sp->router->lock);
8846 	return err;
8847 }
8848 
8849 static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params * params,struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8850 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8851 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8852 {
8853 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8854 
8855 	params->vid = mlxsw_sp_port_vlan->vid;
8856 	params->lag = mlxsw_sp_port->lagged;
8857 	if (params->lag)
8858 		params->lag_id = mlxsw_sp_port->lag_id;
8859 	else
8860 		params->system_port = mlxsw_sp_port->local_port;
8861 }
8862 
8863 static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif * rif)8864 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8865 {
8866 	return container_of(rif, struct mlxsw_sp_rif_subport, common);
8867 }
8868 
mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif * rif,u16 * port,bool * is_lag)8869 int mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif *rif,
8870 			      u16 *port, bool *is_lag)
8871 {
8872 	struct mlxsw_sp_rif_subport *rif_subport;
8873 
8874 	if (WARN_ON(rif->ops->type != MLXSW_SP_RIF_TYPE_SUBPORT))
8875 		return -EINVAL;
8876 
8877 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8878 	*is_lag = rif_subport->lag;
8879 	*port = *is_lag ? rif_subport->lag_id : rif_subport->system_port;
8880 	return 0;
8881 }
8882 
8883 static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8884 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8885 			 const struct mlxsw_sp_rif_params *params,
8886 			 struct netlink_ext_ack *extack)
8887 {
8888 	struct mlxsw_sp_rif_subport *rif_subport;
8889 	struct mlxsw_sp_rif *rif;
8890 
8891 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8892 	if (!rif)
8893 		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8894 
8895 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8896 	refcount_inc(&rif_subport->ref_count);
8897 	return rif;
8898 }
8899 
mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif * rif)8900 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8901 {
8902 	struct mlxsw_sp_rif_subport *rif_subport;
8903 
8904 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
8905 	if (!refcount_dec_and_test(&rif_subport->ref_count))
8906 		return;
8907 
8908 	mlxsw_sp_rif_destroy(rif);
8909 }
8910 
mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif_mac_profile * profile,struct netlink_ext_ack * extack)8911 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8912 						struct mlxsw_sp_rif_mac_profile *profile,
8913 						struct netlink_ext_ack *extack)
8914 {
8915 	u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8916 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8917 	int id;
8918 
8919 	id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8920 		       max_rif_mac_profiles, GFP_KERNEL);
8921 
8922 	if (id >= 0) {
8923 		profile->id = id;
8924 		return 0;
8925 	}
8926 
8927 	if (id == -ENOSPC)
8928 		NL_SET_ERR_MSG_MOD(extack,
8929 				   "Exceeded number of supported router interface MAC profiles");
8930 
8931 	return id;
8932 }
8933 
8934 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8935 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8936 {
8937 	struct mlxsw_sp_rif_mac_profile *profile;
8938 
8939 	profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8940 			     mac_profile);
8941 	WARN_ON(!profile);
8942 	return profile;
8943 }
8944 
8945 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_alloc(const char * mac)8946 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8947 {
8948 	struct mlxsw_sp_rif_mac_profile *profile;
8949 
8950 	profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8951 	if (!profile)
8952 		return NULL;
8953 
8954 	ether_addr_copy(profile->mac_prefix, mac);
8955 	refcount_set(&profile->ref_count, 1);
8956 	return profile;
8957 }
8958 
8959 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp * mlxsw_sp,const char * mac)8960 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8961 {
8962 	struct mlxsw_sp_router *router = mlxsw_sp->router;
8963 	struct mlxsw_sp_rif_mac_profile *profile;
8964 	int id;
8965 
8966 	idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8967 		if (ether_addr_equal_masked(profile->mac_prefix, mac,
8968 					    mlxsw_sp->mac_mask))
8969 			return profile;
8970 	}
8971 
8972 	return NULL;
8973 }
8974 
mlxsw_sp_rif_mac_profiles_occ_get(void * priv)8975 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8976 {
8977 	const struct mlxsw_sp *mlxsw_sp = priv;
8978 
8979 	return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8980 }
8981 
mlxsw_sp_rifs_occ_get(void * priv)8982 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8983 {
8984 	const struct mlxsw_sp *mlxsw_sp = priv;
8985 
8986 	return atomic_read(&mlxsw_sp->router->rifs_count);
8987 }
8988 
8989 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp * mlxsw_sp,const char * mac,struct netlink_ext_ack * extack)8990 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8991 				struct netlink_ext_ack *extack)
8992 {
8993 	struct mlxsw_sp_rif_mac_profile *profile;
8994 	int err;
8995 
8996 	profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8997 	if (!profile)
8998 		return ERR_PTR(-ENOMEM);
8999 
9000 	err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
9001 	if (err)
9002 		goto profile_index_alloc_err;
9003 
9004 	atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
9005 	return profile;
9006 
9007 profile_index_alloc_err:
9008 	kfree(profile);
9009 	return ERR_PTR(err);
9010 }
9011 
mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)9012 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
9013 					     u8 mac_profile)
9014 {
9015 	struct mlxsw_sp_rif_mac_profile *profile;
9016 
9017 	atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
9018 	profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
9019 	kfree(profile);
9020 }
9021 
mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp * mlxsw_sp,const char * mac,u8 * p_mac_profile,struct netlink_ext_ack * extack)9022 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
9023 					const char *mac, u8 *p_mac_profile,
9024 					struct netlink_ext_ack *extack)
9025 {
9026 	struct mlxsw_sp_rif_mac_profile *profile;
9027 
9028 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
9029 	if (profile) {
9030 		refcount_inc(&profile->ref_count);
9031 		goto out;
9032 	}
9033 
9034 	profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
9035 	if (IS_ERR(profile))
9036 		return PTR_ERR(profile);
9037 
9038 out:
9039 	*p_mac_profile = profile->id;
9040 	return 0;
9041 }
9042 
mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)9043 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
9044 					 u8 mac_profile)
9045 {
9046 	struct mlxsw_sp_rif_mac_profile *profile;
9047 
9048 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
9049 			   mac_profile);
9050 	if (WARN_ON(!profile))
9051 		return;
9052 
9053 	if (!refcount_dec_and_test(&profile->ref_count))
9054 		return;
9055 
9056 	mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
9057 }
9058 
mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif * rif)9059 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
9060 {
9061 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9062 	struct mlxsw_sp_rif_mac_profile *profile;
9063 
9064 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
9065 			   rif->mac_profile_id);
9066 	if (WARN_ON(!profile))
9067 		return false;
9068 
9069 	return refcount_read(&profile->ref_count) > 1;
9070 }
9071 
mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif * rif,const char * new_mac)9072 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
9073 					 const char *new_mac)
9074 {
9075 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9076 	struct mlxsw_sp_rif_mac_profile *profile;
9077 
9078 	profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
9079 			   rif->mac_profile_id);
9080 	if (WARN_ON(!profile))
9081 		return -EINVAL;
9082 
9083 	ether_addr_copy(profile->mac_prefix, new_mac);
9084 	return 0;
9085 }
9086 
9087 static int
mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,const char * new_mac,struct netlink_ext_ack * extack)9088 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
9089 				 struct mlxsw_sp_rif *rif,
9090 				 const char *new_mac,
9091 				 struct netlink_ext_ack *extack)
9092 {
9093 	u8 mac_profile;
9094 	int err;
9095 
9096 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
9097 	    !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
9098 		return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
9099 
9100 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
9101 					   &mac_profile, extack);
9102 	if (err)
9103 		return err;
9104 
9105 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
9106 	rif->mac_profile_id = mac_profile;
9107 	return 0;
9108 }
9109 
9110 static int
__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)9111 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
9112 				 struct net_device *l3_dev,
9113 				 struct netlink_ext_ack *extack)
9114 {
9115 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
9116 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
9117 	struct mlxsw_sp_rif_params params;
9118 	u16 vid = mlxsw_sp_port_vlan->vid;
9119 	struct mlxsw_sp_rif *rif;
9120 	struct mlxsw_sp_fid *fid;
9121 	int err;
9122 
9123 	params = (struct mlxsw_sp_rif_params) {
9124 		.dev = l3_dev,
9125 		.vid = vid,
9126 	};
9127 
9128 	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
9129 	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
9130 	if (IS_ERR(rif))
9131 		return PTR_ERR(rif);
9132 
9133 	/* FID was already created, just take a reference */
9134 	fid = rif->ops->fid_get(rif, &params, extack);
9135 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
9136 	if (err)
9137 		goto err_fid_port_vid_map;
9138 
9139 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
9140 	if (err)
9141 		goto err_port_vid_learning_set;
9142 
9143 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
9144 					BR_STATE_FORWARDING);
9145 	if (err)
9146 		goto err_port_vid_stp_set;
9147 
9148 	mlxsw_sp_port_vlan->fid = fid;
9149 
9150 	return 0;
9151 
9152 err_port_vid_stp_set:
9153 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
9154 err_port_vid_learning_set:
9155 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
9156 err_fid_port_vid_map:
9157 	mlxsw_sp_fid_put(fid);
9158 	mlxsw_sp_rif_subport_put(rif);
9159 	return err;
9160 }
9161 
9162 static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)9163 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
9164 {
9165 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
9166 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
9167 	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
9168 	u16 vid = mlxsw_sp_port_vlan->vid;
9169 
9170 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
9171 		return;
9172 
9173 	mlxsw_sp_port_vlan->fid = NULL;
9174 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
9175 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
9176 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
9177 	mlxsw_sp_fid_put(fid);
9178 	mlxsw_sp_rif_subport_put(rif);
9179 }
9180 
9181 static int
mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)9182 mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
9183 					struct net_device *l3_dev,
9184 					struct netlink_ext_ack *extack)
9185 {
9186 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
9187 
9188 	lockdep_assert_held(&mlxsw_sp->router->lock);
9189 
9190 	if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
9191 		return 0;
9192 
9193 	return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
9194 						extack);
9195 }
9196 
9197 void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)9198 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
9199 {
9200 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
9201 
9202 	mutex_lock(&mlxsw_sp->router->lock);
9203 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9204 	mutex_unlock(&mlxsw_sp->router->lock);
9205 }
9206 
mlxsw_sp_inetaddr_port_vlan_event(struct net_device * l3_dev,struct net_device * port_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)9207 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
9208 					     struct net_device *port_dev,
9209 					     unsigned long event, u16 vid,
9210 					     struct netlink_ext_ack *extack)
9211 {
9212 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
9213 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
9214 
9215 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
9216 	if (WARN_ON(!mlxsw_sp_port_vlan))
9217 		return -EINVAL;
9218 
9219 	switch (event) {
9220 	case NETDEV_UP:
9221 		return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
9222 							l3_dev, extack);
9223 	case NETDEV_DOWN:
9224 		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
9225 		break;
9226 	}
9227 
9228 	return 0;
9229 }
9230 
mlxsw_sp_inetaddr_port_event(struct net_device * port_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9231 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
9232 					unsigned long event, bool nomaster,
9233 					struct netlink_ext_ack *extack)
9234 {
9235 	if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
9236 			  netif_is_lag_port(port_dev)))
9237 		return 0;
9238 
9239 	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
9240 						 MLXSW_SP_DEFAULT_VID, extack);
9241 }
9242 
__mlxsw_sp_inetaddr_lag_event(struct net_device * l3_dev,struct net_device * lag_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)9243 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
9244 					 struct net_device *lag_dev,
9245 					 unsigned long event, u16 vid,
9246 					 struct netlink_ext_ack *extack)
9247 {
9248 	struct net_device *port_dev;
9249 	struct list_head *iter;
9250 	int err;
9251 
9252 	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
9253 		if (mlxsw_sp_port_dev_check(port_dev)) {
9254 			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
9255 								port_dev,
9256 								event, vid,
9257 								extack);
9258 			if (err)
9259 				return err;
9260 		}
9261 	}
9262 
9263 	return 0;
9264 }
9265 
mlxsw_sp_inetaddr_lag_event(struct net_device * lag_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9266 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
9267 				       unsigned long event, bool nomaster,
9268 				       struct netlink_ext_ack *extack)
9269 {
9270 	if (!nomaster && netif_is_bridge_port(lag_dev))
9271 		return 0;
9272 
9273 	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
9274 					     MLXSW_SP_DEFAULT_VID, extack);
9275 }
9276 
mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,int lower_pvid,unsigned long event,struct netlink_ext_ack * extack)9277 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
9278 					  struct net_device *l3_dev,
9279 					  int lower_pvid,
9280 					  unsigned long event,
9281 					  struct netlink_ext_ack *extack)
9282 {
9283 	struct mlxsw_sp_rif_params params = {
9284 		.dev = l3_dev,
9285 	};
9286 	struct mlxsw_sp_rif *rif;
9287 	int err;
9288 
9289 	switch (event) {
9290 	case NETDEV_UP:
9291 		if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
9292 			u16 proto;
9293 
9294 			br_vlan_get_proto(l3_dev, &proto);
9295 			if (proto == ETH_P_8021AD) {
9296 				NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
9297 				return -EOPNOTSUPP;
9298 			}
9299 			err = br_vlan_get_pvid(l3_dev, &params.vid);
9300 			if (err)
9301 				return err;
9302 			if (!params.vid)
9303 				return 0;
9304 		} else if (is_vlan_dev(l3_dev)) {
9305 			params.vid = vlan_dev_vlan_id(l3_dev);
9306 
9307 			/* If the VID matches PVID of the bridge below, the
9308 			 * bridge owns the RIF for this VLAN. Don't do anything.
9309 			 */
9310 			if ((int)params.vid == lower_pvid)
9311 				return 0;
9312 		}
9313 
9314 		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
9315 		if (IS_ERR(rif))
9316 			return PTR_ERR(rif);
9317 		break;
9318 	case NETDEV_DOWN:
9319 		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9320 		mlxsw_sp_rif_destroy(rif);
9321 		break;
9322 	}
9323 
9324 	return 0;
9325 }
9326 
mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9327 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
9328 					struct net_device *vlan_dev,
9329 					unsigned long event, bool nomaster,
9330 					struct netlink_ext_ack *extack)
9331 {
9332 	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
9333 	u16 vid = vlan_dev_vlan_id(vlan_dev);
9334 	u16 lower_pvid;
9335 	int err;
9336 
9337 	if (!nomaster && netif_is_bridge_port(vlan_dev))
9338 		return 0;
9339 
9340 	if (mlxsw_sp_port_dev_check(real_dev)) {
9341 		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
9342 							 event, vid, extack);
9343 	} else if (netif_is_lag_master(real_dev)) {
9344 		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
9345 						     vid, extack);
9346 	} else if (netif_is_bridge_master(real_dev) &&
9347 		   br_vlan_enabled(real_dev)) {
9348 		err = br_vlan_get_pvid(real_dev, &lower_pvid);
9349 		if (err)
9350 			return err;
9351 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
9352 						      lower_pvid, event,
9353 						      extack);
9354 	}
9355 
9356 	return 0;
9357 }
9358 
mlxsw_sp_rif_macvlan_is_vrrp4(const u8 * mac)9359 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9360 {
9361 	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9362 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9363 
9364 	return ether_addr_equal_masked(mac, vrrp4, mask);
9365 }
9366 
mlxsw_sp_rif_macvlan_is_vrrp6(const u8 * mac)9367 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9368 {
9369 	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9370 	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9371 
9372 	return ether_addr_equal_masked(mac, vrrp6, mask);
9373 }
9374 
mlxsw_sp_rif_vrrp_op(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const u8 * mac,bool adding)9375 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9376 				const u8 *mac, bool adding)
9377 {
9378 	char ritr_pl[MLXSW_REG_RITR_LEN];
9379 	u8 vrrp_id = adding ? mac[5] : 0;
9380 	int err;
9381 
9382 	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9383 	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9384 		return 0;
9385 
9386 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9387 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9388 	if (err)
9389 		return err;
9390 
9391 	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9392 		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9393 	else
9394 		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9395 
9396 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9397 }
9398 
mlxsw_sp_rif_macvlan_add(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev,struct netlink_ext_ack * extack)9399 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9400 				    const struct net_device *macvlan_dev,
9401 				    struct netlink_ext_ack *extack)
9402 {
9403 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9404 	struct mlxsw_sp_rif *rif;
9405 	int err;
9406 
9407 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9408 	if (!rif)
9409 		return 0;
9410 
9411 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9412 				  mlxsw_sp_fid_index(rif->fid), true);
9413 	if (err)
9414 		return err;
9415 
9416 	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9417 				   macvlan_dev->dev_addr, true);
9418 	if (err)
9419 		goto err_rif_vrrp_add;
9420 
9421 	/* Make sure the bridge driver does not have this MAC pointing at
9422 	 * some other port.
9423 	 */
9424 	if (rif->ops->fdb_del)
9425 		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9426 
9427 	return 0;
9428 
9429 err_rif_vrrp_add:
9430 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9431 			    mlxsw_sp_fid_index(rif->fid), false);
9432 	return err;
9433 }
9434 
__mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9435 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9436 				       const struct net_device *macvlan_dev)
9437 {
9438 	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9439 	struct mlxsw_sp_rif *rif;
9440 
9441 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9442 	/* If we do not have a RIF, then we already took care of
9443 	 * removing the macvlan's MAC during RIF deletion.
9444 	 */
9445 	if (!rif)
9446 		return;
9447 	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9448 			     false);
9449 	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9450 			    mlxsw_sp_fid_index(rif->fid), false);
9451 }
9452 
mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9453 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9454 			      const struct net_device *macvlan_dev)
9455 {
9456 	mutex_lock(&mlxsw_sp->router->lock);
9457 	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9458 	mutex_unlock(&mlxsw_sp->router->lock);
9459 }
9460 
mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * macvlan_dev,unsigned long event,struct netlink_ext_ack * extack)9461 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9462 					   struct net_device *macvlan_dev,
9463 					   unsigned long event,
9464 					   struct netlink_ext_ack *extack)
9465 {
9466 	switch (event) {
9467 	case NETDEV_UP:
9468 		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9469 	case NETDEV_DOWN:
9470 		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9471 		break;
9472 	}
9473 
9474 	return 0;
9475 }
9476 
__mlxsw_sp_inetaddr_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,bool nomaster,struct netlink_ext_ack * extack)9477 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9478 				     struct net_device *dev,
9479 				     unsigned long event, bool nomaster,
9480 				     struct netlink_ext_ack *extack)
9481 {
9482 	if (mlxsw_sp_port_dev_check(dev))
9483 		return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
9484 						    extack);
9485 	else if (netif_is_lag_master(dev))
9486 		return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
9487 						   extack);
9488 	else if (netif_is_bridge_master(dev))
9489 		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
9490 						      extack);
9491 	else if (is_vlan_dev(dev))
9492 		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9493 						    nomaster, extack);
9494 	else if (netif_is_macvlan(dev))
9495 		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9496 						       extack);
9497 	else
9498 		return 0;
9499 }
9500 
mlxsw_sp_inetaddr_event(struct notifier_block * nb,unsigned long event,void * ptr)9501 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9502 				   unsigned long event, void *ptr)
9503 {
9504 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9505 	struct net_device *dev = ifa->ifa_dev->dev;
9506 	struct mlxsw_sp_router *router;
9507 	struct mlxsw_sp_rif *rif;
9508 	int err = 0;
9509 
9510 	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9511 	if (event == NETDEV_UP)
9512 		return NOTIFY_DONE;
9513 
9514 	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9515 	mutex_lock(&router->lock);
9516 	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9517 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9518 		goto out;
9519 
9520 	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
9521 					NULL);
9522 out:
9523 	mutex_unlock(&router->lock);
9524 	return notifier_from_errno(err);
9525 }
9526 
mlxsw_sp_inetaddr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9527 static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9528 					 unsigned long event, void *ptr)
9529 {
9530 	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9531 	struct net_device *dev = ivi->ivi_dev->dev;
9532 	struct mlxsw_sp *mlxsw_sp;
9533 	struct mlxsw_sp_rif *rif;
9534 	int err = 0;
9535 
9536 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9537 	if (!mlxsw_sp)
9538 		return NOTIFY_DONE;
9539 
9540 	mutex_lock(&mlxsw_sp->router->lock);
9541 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9542 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9543 		goto out;
9544 
9545 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9546 					ivi->extack);
9547 out:
9548 	mutex_unlock(&mlxsw_sp->router->lock);
9549 	return notifier_from_errno(err);
9550 }
9551 
9552 struct mlxsw_sp_inet6addr_event_work {
9553 	struct work_struct work;
9554 	struct mlxsw_sp *mlxsw_sp;
9555 	struct net_device *dev;
9556 	netdevice_tracker dev_tracker;
9557 	unsigned long event;
9558 };
9559 
mlxsw_sp_inet6addr_event_work(struct work_struct * work)9560 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9561 {
9562 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9563 		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9564 	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9565 	struct net_device *dev = inet6addr_work->dev;
9566 	unsigned long event = inet6addr_work->event;
9567 	struct mlxsw_sp_rif *rif;
9568 
9569 	rtnl_lock();
9570 	mutex_lock(&mlxsw_sp->router->lock);
9571 
9572 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9573 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9574 		goto out;
9575 
9576 	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
9577 out:
9578 	mutex_unlock(&mlxsw_sp->router->lock);
9579 	rtnl_unlock();
9580 	netdev_put(dev, &inet6addr_work->dev_tracker);
9581 	kfree(inet6addr_work);
9582 }
9583 
9584 /* Called with rcu_read_lock() */
mlxsw_sp_inet6addr_event(struct notifier_block * nb,unsigned long event,void * ptr)9585 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9586 				    unsigned long event, void *ptr)
9587 {
9588 	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9589 	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9590 	struct net_device *dev = if6->idev->dev;
9591 	struct mlxsw_sp_router *router;
9592 
9593 	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9594 	if (event == NETDEV_UP)
9595 		return NOTIFY_DONE;
9596 
9597 	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9598 	if (!inet6addr_work)
9599 		return NOTIFY_BAD;
9600 
9601 	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9602 	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9603 	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9604 	inet6addr_work->dev = dev;
9605 	inet6addr_work->event = event;
9606 	netdev_hold(dev, &inet6addr_work->dev_tracker, GFP_ATOMIC);
9607 	mlxsw_core_schedule_work(&inet6addr_work->work);
9608 
9609 	return NOTIFY_DONE;
9610 }
9611 
mlxsw_sp_inet6addr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9612 static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9613 					  unsigned long event, void *ptr)
9614 {
9615 	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9616 	struct net_device *dev = i6vi->i6vi_dev->dev;
9617 	struct mlxsw_sp *mlxsw_sp;
9618 	struct mlxsw_sp_rif *rif;
9619 	int err = 0;
9620 
9621 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9622 	if (!mlxsw_sp)
9623 		return NOTIFY_DONE;
9624 
9625 	mutex_lock(&mlxsw_sp->router->lock);
9626 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9627 	if (!mlxsw_sp_rif_should_config(rif, dev, event))
9628 		goto out;
9629 
9630 	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
9631 					i6vi->extack);
9632 out:
9633 	mutex_unlock(&mlxsw_sp->router->lock);
9634 	return notifier_from_errno(err);
9635 }
9636 
mlxsw_sp_rif_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const char * mac,int mtu,u8 mac_profile)9637 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9638 			     const char *mac, int mtu, u8 mac_profile)
9639 {
9640 	char ritr_pl[MLXSW_REG_RITR_LEN];
9641 	int err;
9642 
9643 	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9644 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9645 	if (err)
9646 		return err;
9647 
9648 	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9649 	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9650 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9651 	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9652 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9653 }
9654 
9655 static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9656 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9657 				  struct mlxsw_sp_rif *rif,
9658 				  struct netlink_ext_ack *extack)
9659 {
9660 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
9661 	u8 old_mac_profile;
9662 	u16 fid_index;
9663 	int err;
9664 
9665 	fid_index = mlxsw_sp_fid_index(rif->fid);
9666 
9667 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9668 	if (err)
9669 		return err;
9670 
9671 	old_mac_profile = rif->mac_profile_id;
9672 	err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9673 					       extack);
9674 	if (err)
9675 		goto err_rif_mac_profile_replace;
9676 
9677 	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9678 				dev->mtu, rif->mac_profile_id);
9679 	if (err)
9680 		goto err_rif_edit;
9681 
9682 	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9683 	if (err)
9684 		goto err_rif_fdb_op;
9685 
9686 	if (rif->mtu != dev->mtu) {
9687 		struct mlxsw_sp_vr *vr;
9688 		int i;
9689 
9690 		/* The RIF is relevant only to its mr_table instance, as unlike
9691 		 * unicast routing, in multicast routing a RIF cannot be shared
9692 		 * between several multicast routing tables.
9693 		 */
9694 		vr = &mlxsw_sp->router->vrs[rif->vr_id];
9695 		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9696 			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9697 						   rif, dev->mtu);
9698 	}
9699 
9700 	ether_addr_copy(rif->addr, dev->dev_addr);
9701 	rif->mtu = dev->mtu;
9702 
9703 	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9704 
9705 	return 0;
9706 
9707 err_rif_fdb_op:
9708 	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9709 			  old_mac_profile);
9710 err_rif_edit:
9711 	mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9712 err_rif_mac_profile_replace:
9713 	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9714 	return err;
9715 }
9716 
mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif * rif,struct netdev_notifier_pre_changeaddr_info * info)9717 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9718 			    struct netdev_notifier_pre_changeaddr_info *info)
9719 {
9720 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9721 	struct mlxsw_sp_rif_mac_profile *profile;
9722 	struct netlink_ext_ack *extack;
9723 	u8 max_rif_mac_profiles;
9724 	u64 occ;
9725 
9726 	extack = netdev_notifier_info_to_extack(&info->info);
9727 
9728 	profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9729 	if (profile)
9730 		return 0;
9731 
9732 	max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9733 	occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9734 	if (occ < max_rif_mac_profiles)
9735 		return 0;
9736 
9737 	if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9738 		return 0;
9739 
9740 	NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9741 	return -ENOBUFS;
9742 }
9743 
mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)9744 static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
9745 						  struct net_device *dev)
9746 {
9747 	struct vlan_dev_priv *vlan;
9748 
9749 	if (netif_is_lag_master(dev) ||
9750 	    netif_is_bridge_master(dev) ||
9751 	    mlxsw_sp_port_dev_check(dev) ||
9752 	    mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
9753 	    netif_is_l3_master(dev))
9754 		return true;
9755 
9756 	if (!is_vlan_dev(dev))
9757 		return false;
9758 
9759 	vlan = vlan_dev_priv(dev);
9760 	return netif_is_lag_master(vlan->real_dev) ||
9761 	       netif_is_bridge_master(vlan->real_dev) ||
9762 	       mlxsw_sp_port_dev_check(vlan->real_dev);
9763 }
9764 
9765 static struct mlxsw_sp_crif *
mlxsw_sp_crif_register(struct mlxsw_sp_router * router,struct net_device * dev)9766 mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
9767 {
9768 	struct mlxsw_sp_crif *crif;
9769 	int err;
9770 
9771 	if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
9772 		return NULL;
9773 
9774 	crif = mlxsw_sp_crif_alloc(dev);
9775 	if (!crif)
9776 		return ERR_PTR(-ENOMEM);
9777 
9778 	err = mlxsw_sp_crif_insert(router, crif);
9779 	if (err)
9780 		goto err_netdev_insert;
9781 
9782 	return crif;
9783 
9784 err_netdev_insert:
9785 	mlxsw_sp_crif_free(crif);
9786 	return ERR_PTR(err);
9787 }
9788 
mlxsw_sp_crif_unregister(struct mlxsw_sp_router * router,struct mlxsw_sp_crif * crif)9789 static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
9790 				     struct mlxsw_sp_crif *crif)
9791 {
9792 	struct mlxsw_sp_nexthop *nh, *tmp;
9793 
9794 	mlxsw_sp_crif_remove(router, crif);
9795 
9796 	list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
9797 		mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
9798 
9799 	if (crif->rif)
9800 		crif->can_destroy = true;
9801 	else
9802 		mlxsw_sp_crif_free(crif);
9803 }
9804 
mlxsw_sp_netdevice_register(struct mlxsw_sp_router * router,struct net_device * dev)9805 static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
9806 				       struct net_device *dev)
9807 {
9808 	struct mlxsw_sp_crif *crif;
9809 
9810 	if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9811 		return 0;
9812 
9813 	crif = mlxsw_sp_crif_register(router, dev);
9814 	return PTR_ERR_OR_ZERO(crif);
9815 }
9816 
mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router * router,struct net_device * dev)9817 static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
9818 					  struct net_device *dev)
9819 {
9820 	struct mlxsw_sp_crif *crif;
9821 
9822 	if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
9823 		return;
9824 
9825 	/* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
9826 	 * the NETDEV_UNREGISTER message, so we can get here twice. If that's
9827 	 * what happened, the netdevice state is NETREG_UNREGISTERED. In that
9828 	 * case, we expect to have collected the CRIF already, and warn if it
9829 	 * still exists. Otherwise we expect the CRIF to exist.
9830 	 */
9831 	crif = mlxsw_sp_crif_lookup(router, dev);
9832 	if (dev->reg_state == NETREG_UNREGISTERED) {
9833 		if (!WARN_ON(crif))
9834 			return;
9835 	}
9836 	if (WARN_ON(!crif))
9837 		return;
9838 
9839 	mlxsw_sp_crif_unregister(router, crif);
9840 }
9841 
mlxsw_sp_is_offload_xstats_event(unsigned long event)9842 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9843 {
9844 	switch (event) {
9845 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9846 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9847 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9848 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9849 		return true;
9850 	}
9851 
9852 	return false;
9853 }
9854 
9855 static int
mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif * rif,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9856 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9857 					unsigned long event,
9858 					struct netdev_notifier_offload_xstats_info *info)
9859 {
9860 	switch (info->type) {
9861 	case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9862 		break;
9863 	default:
9864 		return 0;
9865 	}
9866 
9867 	switch (event) {
9868 	case NETDEV_OFFLOAD_XSTATS_ENABLE:
9869 		return mlxsw_sp_router_port_l3_stats_enable(rif);
9870 	case NETDEV_OFFLOAD_XSTATS_DISABLE:
9871 		mlxsw_sp_router_port_l3_stats_disable(rif);
9872 		return 0;
9873 	case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9874 		mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9875 		return 0;
9876 	case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9877 		return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9878 	}
9879 
9880 	WARN_ON_ONCE(1);
9881 	return 0;
9882 }
9883 
9884 static int
mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9885 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9886 				      struct net_device *dev,
9887 				      unsigned long event,
9888 				      struct netdev_notifier_offload_xstats_info *info)
9889 {
9890 	struct mlxsw_sp_rif *rif;
9891 
9892 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9893 	if (!rif)
9894 		return 0;
9895 
9896 	return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9897 }
9898 
mlxsw_sp_is_router_event(unsigned long event)9899 static bool mlxsw_sp_is_router_event(unsigned long event)
9900 {
9901 	switch (event) {
9902 	case NETDEV_PRE_CHANGEADDR:
9903 	case NETDEV_CHANGEADDR:
9904 	case NETDEV_CHANGEMTU:
9905 		return true;
9906 	default:
9907 		return false;
9908 	}
9909 }
9910 
mlxsw_sp_netdevice_router_port_event(struct net_device * dev,unsigned long event,void * ptr)9911 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9912 						unsigned long event, void *ptr)
9913 {
9914 	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9915 	struct mlxsw_sp *mlxsw_sp;
9916 	struct mlxsw_sp_rif *rif;
9917 
9918 	mlxsw_sp = mlxsw_sp_lower_get(dev);
9919 	if (!mlxsw_sp)
9920 		return 0;
9921 
9922 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9923 	if (!rif)
9924 		return 0;
9925 
9926 	switch (event) {
9927 	case NETDEV_CHANGEMTU:
9928 	case NETDEV_CHANGEADDR:
9929 		return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9930 	case NETDEV_PRE_CHANGEADDR:
9931 		return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9932 	default:
9933 		WARN_ON_ONCE(1);
9934 		break;
9935 	}
9936 
9937 	return 0;
9938 }
9939 
mlxsw_sp_port_vrf_join(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,struct netlink_ext_ack * extack)9940 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9941 				  struct net_device *l3_dev,
9942 				  struct netlink_ext_ack *extack)
9943 {
9944 	struct mlxsw_sp_rif *rif;
9945 
9946 	/* If netdev is already associated with a RIF, then we need to
9947 	 * destroy it and create a new one with the new virtual router ID.
9948 	 */
9949 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9950 	if (rif)
9951 		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
9952 					  extack);
9953 
9954 	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
9955 					 extack);
9956 }
9957 
mlxsw_sp_port_vrf_leave(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev)9958 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9959 				    struct net_device *l3_dev)
9960 {
9961 	struct mlxsw_sp_rif *rif;
9962 
9963 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9964 	if (!rif)
9965 		return;
9966 	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
9967 }
9968 
mlxsw_sp_is_vrf_event(unsigned long event,void * ptr)9969 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9970 {
9971 	struct netdev_notifier_changeupper_info *info = ptr;
9972 
9973 	if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9974 		return false;
9975 	return netif_is_l3_master(info->upper_dev);
9976 }
9977 
9978 static int
mlxsw_sp_netdevice_vrf_event(struct net_device * l3_dev,unsigned long event,struct netdev_notifier_changeupper_info * info)9979 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9980 			     struct netdev_notifier_changeupper_info *info)
9981 {
9982 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9983 	int err = 0;
9984 
9985 	/* We do not create a RIF for a macvlan, but only use it to
9986 	 * direct more MAC addresses to the router.
9987 	 */
9988 	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9989 		return 0;
9990 
9991 	switch (event) {
9992 	case NETDEV_PRECHANGEUPPER:
9993 		break;
9994 	case NETDEV_CHANGEUPPER:
9995 		if (info->linking) {
9996 			struct netlink_ext_ack *extack;
9997 
9998 			extack = netdev_notifier_info_to_extack(&info->info);
9999 			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
10000 		} else {
10001 			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
10002 		}
10003 		break;
10004 	}
10005 
10006 	return err;
10007 }
10008 
10009 struct mlxsw_sp_router_replay_inetaddr_up {
10010 	struct mlxsw_sp *mlxsw_sp;
10011 	struct netlink_ext_ack *extack;
10012 	unsigned int done;
10013 	bool deslavement;
10014 };
10015 
mlxsw_sp_router_replay_inetaddr_up(struct net_device * dev,struct netdev_nested_priv * priv)10016 static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev,
10017 					      struct netdev_nested_priv *priv)
10018 {
10019 	struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
10020 	bool nomaster = ctx->deslavement;
10021 	struct mlxsw_sp_crif *crif;
10022 	int err;
10023 
10024 	if (mlxsw_sp_dev_addr_list_empty(dev))
10025 		return 0;
10026 
10027 	crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
10028 	if (!crif || crif->rif)
10029 		return 0;
10030 
10031 	if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
10032 		return 0;
10033 
10034 	err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP,
10035 					nomaster, ctx->extack);
10036 	if (err)
10037 		return err;
10038 
10039 	ctx->done++;
10040 	return 0;
10041 }
10042 
mlxsw_sp_router_unreplay_inetaddr_up(struct net_device * dev,struct netdev_nested_priv * priv)10043 static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev,
10044 						struct netdev_nested_priv *priv)
10045 {
10046 	struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
10047 	bool nomaster = ctx->deslavement;
10048 	struct mlxsw_sp_crif *crif;
10049 
10050 	if (!ctx->done)
10051 		return 0;
10052 
10053 	if (mlxsw_sp_dev_addr_list_empty(dev))
10054 		return 0;
10055 
10056 	crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
10057 	if (!crif || !crif->rif)
10058 		return 0;
10059 
10060 	/* We are rolling back NETDEV_UP, so ask for that. */
10061 	if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
10062 		return 0;
10063 
10064 	__mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster,
10065 				  NULL);
10066 
10067 	ctx->done--;
10068 	return 0;
10069 }
10070 
mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp * mlxsw_sp,struct net_device * upper_dev,struct netlink_ext_ack * extack)10071 int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
10072 					  struct net_device *upper_dev,
10073 					  struct netlink_ext_ack *extack)
10074 {
10075 	struct mlxsw_sp_router_replay_inetaddr_up ctx = {
10076 		.mlxsw_sp = mlxsw_sp,
10077 		.extack = extack,
10078 		.deslavement = false,
10079 	};
10080 	struct netdev_nested_priv priv = {
10081 		.data = &ctx,
10082 	};
10083 	int err;
10084 
10085 	err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv);
10086 	if (err)
10087 		return err;
10088 
10089 	err = netdev_walk_all_upper_dev_rcu(upper_dev,
10090 					    mlxsw_sp_router_replay_inetaddr_up,
10091 					    &priv);
10092 	if (err)
10093 		goto err_replay_up;
10094 
10095 	return 0;
10096 
10097 err_replay_up:
10098 	netdev_walk_all_upper_dev_rcu(upper_dev,
10099 				      mlxsw_sp_router_unreplay_inetaddr_up,
10100 				      &priv);
10101 	mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv);
10102 	return err;
10103 }
10104 
mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)10105 void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
10106 					   struct net_device *dev)
10107 {
10108 	struct mlxsw_sp_router_replay_inetaddr_up ctx = {
10109 		.mlxsw_sp = mlxsw_sp,
10110 		.deslavement = true,
10111 	};
10112 	struct netdev_nested_priv priv = {
10113 		.data = &ctx,
10114 	};
10115 
10116 	mlxsw_sp_router_replay_inetaddr_up(dev, &priv);
10117 }
10118 
10119 static int
mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,struct net_device * dev,struct netlink_ext_ack * extack)10120 mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
10121 				       u16 vid, struct net_device *dev,
10122 				       struct netlink_ext_ack *extack)
10123 {
10124 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
10125 
10126 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
10127 							    vid);
10128 	if (WARN_ON(!mlxsw_sp_port_vlan))
10129 		return -EINVAL;
10130 
10131 	return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
10132 						       dev, extack);
10133 }
10134 
10135 static void
mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,struct net_device * dev)10136 mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
10137 			       struct net_device *dev)
10138 {
10139 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
10140 
10141 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
10142 							    vid);
10143 	if (WARN_ON(!mlxsw_sp_port_vlan))
10144 		return;
10145 
10146 	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
10147 }
10148 
__mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)10149 static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10150 					   struct net_device *lag_dev,
10151 					   struct netlink_ext_ack *extack)
10152 {
10153 	u16 default_vid = MLXSW_SP_DEFAULT_VID;
10154 	struct net_device *upper_dev;
10155 	struct list_head *iter;
10156 	int done = 0;
10157 	u16 vid;
10158 	int err;
10159 
10160 	err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid,
10161 						     lag_dev, extack);
10162 	if (err)
10163 		return err;
10164 
10165 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
10166 		if (!is_vlan_dev(upper_dev))
10167 			continue;
10168 
10169 		vid = vlan_dev_vlan_id(upper_dev);
10170 		err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid,
10171 							     upper_dev, extack);
10172 		if (err)
10173 			goto err_router_join_dev;
10174 
10175 		++done;
10176 	}
10177 
10178 	return 0;
10179 
10180 err_router_join_dev:
10181 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
10182 		if (!is_vlan_dev(upper_dev))
10183 			continue;
10184 		if (!done--)
10185 			break;
10186 
10187 		vid = vlan_dev_vlan_id(upper_dev);
10188 		mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
10189 	}
10190 
10191 	mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
10192 	return err;
10193 }
10194 
10195 static void
__mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)10196 __mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10197 				 struct net_device *lag_dev)
10198 {
10199 	u16 default_vid = MLXSW_SP_DEFAULT_VID;
10200 	struct net_device *upper_dev;
10201 	struct list_head *iter;
10202 	u16 vid;
10203 
10204 	netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
10205 		if (!is_vlan_dev(upper_dev))
10206 			continue;
10207 
10208 		vid = vlan_dev_vlan_id(upper_dev);
10209 		mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
10210 	}
10211 
10212 	mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
10213 }
10214 
mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)10215 int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10216 				  struct net_device *lag_dev,
10217 				  struct netlink_ext_ack *extack)
10218 {
10219 	int err;
10220 
10221 	mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10222 	err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
10223 	mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10224 
10225 	return err;
10226 }
10227 
mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)10228 void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
10229 				    struct net_device *lag_dev)
10230 {
10231 	mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10232 	__mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
10233 	mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
10234 }
10235 
mlxsw_sp_router_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)10236 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
10237 					   unsigned long event, void *ptr)
10238 {
10239 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
10240 	struct mlxsw_sp_router *router;
10241 	struct mlxsw_sp *mlxsw_sp;
10242 	int err = 0;
10243 
10244 	router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
10245 	mlxsw_sp = router->mlxsw_sp;
10246 
10247 	mutex_lock(&mlxsw_sp->router->lock);
10248 
10249 	if (event == NETDEV_REGISTER) {
10250 		err = mlxsw_sp_netdevice_register(router, dev);
10251 		if (err)
10252 			/* No need to roll this back, UNREGISTER will collect it
10253 			 * anyhow.
10254 			 */
10255 			goto out;
10256 	}
10257 
10258 	if (mlxsw_sp_is_offload_xstats_event(event))
10259 		err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
10260 							    event, ptr);
10261 	else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
10262 		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
10263 						       event, ptr);
10264 	else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
10265 		err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
10266 						       event, ptr);
10267 	else if (mlxsw_sp_is_router_event(event))
10268 		err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
10269 	else if (mlxsw_sp_is_vrf_event(event, ptr))
10270 		err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
10271 
10272 	if (event == NETDEV_UNREGISTER)
10273 		mlxsw_sp_netdevice_unregister(router, dev);
10274 
10275 out:
10276 	mutex_unlock(&mlxsw_sp->router->lock);
10277 
10278 	return notifier_from_errno(err);
10279 }
10280 
10281 struct mlxsw_sp_macvlan_replay {
10282 	struct mlxsw_sp *mlxsw_sp;
10283 	struct netlink_ext_ack *extack;
10284 };
10285 
mlxsw_sp_macvlan_replay_upper(struct net_device * dev,struct netdev_nested_priv * priv)10286 static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev,
10287 					 struct netdev_nested_priv *priv)
10288 {
10289 	const struct mlxsw_sp_macvlan_replay *rms = priv->data;
10290 	struct netlink_ext_ack *extack = rms->extack;
10291 	struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp;
10292 
10293 	if (!netif_is_macvlan(dev))
10294 		return 0;
10295 
10296 	return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack);
10297 }
10298 
mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10299 static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif,
10300 				   struct netlink_ext_ack *extack)
10301 {
10302 	struct mlxsw_sp_macvlan_replay rms = {
10303 		.mlxsw_sp = rif->mlxsw_sp,
10304 		.extack = extack,
10305 	};
10306 	struct netdev_nested_priv priv = {
10307 		.data = &rms,
10308 	};
10309 
10310 	return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif),
10311 					     mlxsw_sp_macvlan_replay_upper,
10312 					     &priv);
10313 }
10314 
__mlxsw_sp_rif_macvlan_flush(struct net_device * dev,struct netdev_nested_priv * priv)10315 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
10316 					struct netdev_nested_priv *priv)
10317 {
10318 	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
10319 
10320 	if (!netif_is_macvlan(dev))
10321 		return 0;
10322 
10323 	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10324 				   mlxsw_sp_fid_index(rif->fid), false);
10325 }
10326 
mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif * rif)10327 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
10328 {
10329 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10330 	struct netdev_nested_priv priv = {
10331 		.data = (void *)rif,
10332 	};
10333 
10334 	if (!netif_is_macvlan_port(dev))
10335 		return 0;
10336 
10337 	return netdev_walk_all_upper_dev_rcu(dev,
10338 					     __mlxsw_sp_rif_macvlan_flush, &priv);
10339 }
10340 
mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)10341 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
10342 				       const struct mlxsw_sp_rif_params *params)
10343 {
10344 	struct mlxsw_sp_rif_subport *rif_subport;
10345 
10346 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
10347 	refcount_set(&rif_subport->ref_count, 1);
10348 	rif_subport->vid = params->vid;
10349 	rif_subport->lag = params->lag;
10350 	if (params->lag)
10351 		rif_subport->lag_id = params->lag_id;
10352 	else
10353 		rif_subport->system_port = params->system_port;
10354 }
10355 
mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif * rif,bool enable)10356 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
10357 {
10358 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10359 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10360 	struct mlxsw_sp_rif_subport *rif_subport;
10361 	char ritr_pl[MLXSW_REG_RITR_LEN];
10362 	u16 efid;
10363 
10364 	rif_subport = mlxsw_sp_rif_subport_rif(rif);
10365 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
10366 			    rif->rif_index, rif->vr_id, dev->mtu);
10367 	mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10368 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10369 	efid = mlxsw_sp_fid_index(rif->fid);
10370 	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
10371 				  rif_subport->lag ? rif_subport->lag_id :
10372 						     rif_subport->system_port,
10373 				  efid, 0);
10374 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10375 }
10376 
mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10377 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
10378 					  struct netlink_ext_ack *extack)
10379 {
10380 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10381 	u8 mac_profile;
10382 	int err;
10383 
10384 	err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
10385 					   &mac_profile, extack);
10386 	if (err)
10387 		return err;
10388 	rif->mac_profile_id = mac_profile;
10389 
10390 	err = mlxsw_sp_rif_subport_op(rif, true);
10391 	if (err)
10392 		goto err_rif_subport_op;
10393 
10394 	err = mlxsw_sp_macvlan_replay(rif, extack);
10395 	if (err)
10396 		goto err_macvlan_replay;
10397 
10398 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10399 				  mlxsw_sp_fid_index(rif->fid), true);
10400 	if (err)
10401 		goto err_rif_fdb_op;
10402 
10403 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10404 	if (err)
10405 		goto err_fid_rif_set;
10406 
10407 	return 0;
10408 
10409 err_fid_rif_set:
10410 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10411 			    mlxsw_sp_fid_index(rif->fid), false);
10412 err_rif_fdb_op:
10413 	mlxsw_sp_rif_macvlan_flush(rif);
10414 err_macvlan_replay:
10415 	mlxsw_sp_rif_subport_op(rif, false);
10416 err_rif_subport_op:
10417 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
10418 	return err;
10419 }
10420 
mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif * rif)10421 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
10422 {
10423 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10424 	struct mlxsw_sp_fid *fid = rif->fid;
10425 
10426 	mlxsw_sp_fid_rif_unset(fid);
10427 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10428 			    mlxsw_sp_fid_index(fid), false);
10429 	mlxsw_sp_rif_macvlan_flush(rif);
10430 	mlxsw_sp_rif_subport_op(rif, false);
10431 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10432 }
10433 
10434 static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10435 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
10436 			     const struct mlxsw_sp_rif_params *params,
10437 			     struct netlink_ext_ack *extack)
10438 {
10439 	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
10440 }
10441 
10442 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
10443 	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
10444 	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
10445 	.setup			= mlxsw_sp_rif_subport_setup,
10446 	.configure		= mlxsw_sp_rif_subport_configure,
10447 	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
10448 	.fid_get		= mlxsw_sp_rif_subport_fid_get,
10449 };
10450 
mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif * rif,u16 fid,bool enable)10451 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
10452 {
10453 	enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
10454 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10455 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10456 	char ritr_pl[MLXSW_REG_RITR_LEN];
10457 
10458 	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
10459 			    dev->mtu);
10460 	mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
10461 	mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
10462 	mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
10463 
10464 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10465 }
10466 
mlxsw_sp_router_port(const struct mlxsw_sp * mlxsw_sp)10467 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
10468 {
10469 	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
10470 }
10471 
mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10472 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
10473 				      struct netlink_ext_ack *extack)
10474 {
10475 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10476 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10477 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10478 	u8 mac_profile;
10479 	int err;
10480 
10481 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10482 					   &mac_profile, extack);
10483 	if (err)
10484 		return err;
10485 	rif->mac_profile_id = mac_profile;
10486 
10487 	err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
10488 	if (err)
10489 		goto err_rif_fid_op;
10490 
10491 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10492 				     mlxsw_sp_router_port(mlxsw_sp), true);
10493 	if (err)
10494 		goto err_fid_mc_flood_set;
10495 
10496 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10497 				     mlxsw_sp_router_port(mlxsw_sp), true);
10498 	if (err)
10499 		goto err_fid_bc_flood_set;
10500 
10501 	err = mlxsw_sp_macvlan_replay(rif, extack);
10502 	if (err)
10503 		goto err_macvlan_replay;
10504 
10505 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10506 				  mlxsw_sp_fid_index(rif->fid), true);
10507 	if (err)
10508 		goto err_rif_fdb_op;
10509 
10510 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10511 	if (err)
10512 		goto err_fid_rif_set;
10513 
10514 	return 0;
10515 
10516 err_fid_rif_set:
10517 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10518 			    mlxsw_sp_fid_index(rif->fid), false);
10519 err_rif_fdb_op:
10520 	mlxsw_sp_rif_macvlan_flush(rif);
10521 err_macvlan_replay:
10522 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10523 			       mlxsw_sp_router_port(mlxsw_sp), false);
10524 err_fid_bc_flood_set:
10525 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10526 			       mlxsw_sp_router_port(mlxsw_sp), false);
10527 err_fid_mc_flood_set:
10528 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
10529 err_rif_fid_op:
10530 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10531 	return err;
10532 }
10533 
mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif * rif)10534 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
10535 {
10536 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10537 	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
10538 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10539 	struct mlxsw_sp_fid *fid = rif->fid;
10540 
10541 	mlxsw_sp_fid_rif_unset(fid);
10542 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10543 			    mlxsw_sp_fid_index(fid), false);
10544 	mlxsw_sp_rif_macvlan_flush(rif);
10545 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10546 			       mlxsw_sp_router_port(mlxsw_sp), false);
10547 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10548 			       mlxsw_sp_router_port(mlxsw_sp), false);
10549 	mlxsw_sp_rif_fid_op(rif, fid_index, false);
10550 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10551 }
10552 
10553 static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10554 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
10555 			 const struct mlxsw_sp_rif_params *params,
10556 			 struct netlink_ext_ack *extack)
10557 {
10558 	int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
10559 
10560 	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
10561 }
10562 
mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)10563 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10564 {
10565 	struct switchdev_notifier_fdb_info info = {};
10566 	struct net_device *dev;
10567 
10568 	dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
10569 	if (!dev)
10570 		return;
10571 
10572 	info.addr = mac;
10573 	info.vid = 0;
10574 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10575 				 NULL);
10576 }
10577 
10578 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
10579 	.type			= MLXSW_SP_RIF_TYPE_FID,
10580 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10581 	.configure		= mlxsw_sp_rif_fid_configure,
10582 	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
10583 	.fid_get		= mlxsw_sp_rif_fid_fid_get,
10584 	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
10585 };
10586 
10587 static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)10588 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
10589 			  const struct mlxsw_sp_rif_params *params,
10590 			  struct netlink_ext_ack *extack)
10591 {
10592 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10593 	struct net_device *br_dev;
10594 
10595 	if (WARN_ON(!params->vid))
10596 		return ERR_PTR(-EINVAL);
10597 
10598 	if (is_vlan_dev(dev)) {
10599 		br_dev = vlan_dev_real_dev(dev);
10600 		if (WARN_ON(!netif_is_bridge_master(br_dev)))
10601 			return ERR_PTR(-EINVAL);
10602 	}
10603 
10604 	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
10605 }
10606 
mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)10607 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
10608 {
10609 	struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
10610 	struct switchdev_notifier_fdb_info info = {};
10611 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10612 	struct net_device *br_dev;
10613 	struct net_device *dev;
10614 
10615 	br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
10616 	dev = br_fdb_find_port(br_dev, mac, vid);
10617 	if (!dev)
10618 		return;
10619 
10620 	info.addr = mac;
10621 	info.vid = vid;
10622 	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
10623 				 NULL);
10624 }
10625 
mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif * rif,u16 vid,u16 efid,bool enable)10626 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
10627 				bool enable)
10628 {
10629 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10630 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10631 	char ritr_pl[MLXSW_REG_RITR_LEN];
10632 
10633 	mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
10634 				    dev->mtu, dev->dev_addr,
10635 				    rif->mac_profile_id, vid, efid);
10636 
10637 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10638 }
10639 
mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif * rif,u16 efid,struct netlink_ext_ack * extack)10640 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
10641 				       struct netlink_ext_ack *extack)
10642 {
10643 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10644 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10645 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10646 	u8 mac_profile;
10647 	int err;
10648 
10649 	err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
10650 					   &mac_profile, extack);
10651 	if (err)
10652 		return err;
10653 	rif->mac_profile_id = mac_profile;
10654 
10655 	err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
10656 	if (err)
10657 		goto err_rif_vlan_fid_op;
10658 
10659 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10660 				     mlxsw_sp_router_port(mlxsw_sp), true);
10661 	if (err)
10662 		goto err_fid_mc_flood_set;
10663 
10664 	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10665 				     mlxsw_sp_router_port(mlxsw_sp), true);
10666 	if (err)
10667 		goto err_fid_bc_flood_set;
10668 
10669 	err = mlxsw_sp_macvlan_replay(rif, extack);
10670 	if (err)
10671 		goto err_macvlan_replay;
10672 
10673 	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10674 				  mlxsw_sp_fid_index(rif->fid), true);
10675 	if (err)
10676 		goto err_rif_fdb_op;
10677 
10678 	err = mlxsw_sp_fid_rif_set(rif->fid, rif);
10679 	if (err)
10680 		goto err_fid_rif_set;
10681 
10682 	return 0;
10683 
10684 err_fid_rif_set:
10685 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10686 			    mlxsw_sp_fid_index(rif->fid), false);
10687 err_rif_fdb_op:
10688 	mlxsw_sp_rif_macvlan_flush(rif);
10689 err_macvlan_replay:
10690 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10691 			       mlxsw_sp_router_port(mlxsw_sp), false);
10692 err_fid_bc_flood_set:
10693 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10694 			       mlxsw_sp_router_port(mlxsw_sp), false);
10695 err_fid_mc_flood_set:
10696 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10697 err_rif_vlan_fid_op:
10698 	mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
10699 	return err;
10700 }
10701 
mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif * rif)10702 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
10703 {
10704 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10705 	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
10706 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10707 
10708 	mlxsw_sp_fid_rif_unset(rif->fid);
10709 	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
10710 			    mlxsw_sp_fid_index(rif->fid), false);
10711 	mlxsw_sp_rif_macvlan_flush(rif);
10712 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
10713 			       mlxsw_sp_router_port(mlxsw_sp), false);
10714 	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
10715 			       mlxsw_sp_router_port(mlxsw_sp), false);
10716 	mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
10717 	mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
10718 }
10719 
mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10720 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10721 					struct netlink_ext_ack *extack)
10722 {
10723 	return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
10724 }
10725 
10726 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
10727 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
10728 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10729 	.configure		= mlxsw_sp1_rif_vlan_configure,
10730 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
10731 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
10732 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
10733 };
10734 
mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10735 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
10736 					struct netlink_ext_ack *extack)
10737 {
10738 	u16 efid = mlxsw_sp_fid_index(rif->fid);
10739 
10740 	return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
10741 }
10742 
10743 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
10744 	.type			= MLXSW_SP_RIF_TYPE_VLAN,
10745 	.rif_size		= sizeof(struct mlxsw_sp_rif),
10746 	.configure		= mlxsw_sp2_rif_vlan_configure,
10747 	.deconfigure		= mlxsw_sp_rif_vlan_deconfigure,
10748 	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
10749 	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
10750 };
10751 
10752 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif * rif)10753 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
10754 {
10755 	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
10756 }
10757 
10758 static void
mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)10759 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
10760 			   const struct mlxsw_sp_rif_params *params)
10761 {
10762 	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
10763 	struct mlxsw_sp_rif_ipip_lb *rif_lb;
10764 
10765 	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
10766 				 common);
10767 	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
10768 	rif_lb->lb_config = params_lb->lb_config;
10769 }
10770 
10771 static int
mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10772 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10773 				struct netlink_ext_ack *extack)
10774 {
10775 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10776 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10777 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10778 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10779 	struct mlxsw_sp_vr *ul_vr;
10780 	int err;
10781 
10782 	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
10783 	if (IS_ERR(ul_vr))
10784 		return PTR_ERR(ul_vr);
10785 
10786 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
10787 	if (err)
10788 		goto err_loopback_op;
10789 
10790 	lb_rif->ul_vr_id = ul_vr->id;
10791 	lb_rif->ul_rif_id = 0;
10792 	++ul_vr->rif_count;
10793 	return 0;
10794 
10795 err_loopback_op:
10796 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10797 	return err;
10798 }
10799 
mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)10800 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10801 {
10802 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10803 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10804 	struct mlxsw_sp_vr *ul_vr;
10805 
10806 	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
10807 	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
10808 
10809 	--ul_vr->rif_count;
10810 	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
10811 }
10812 
10813 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
10814 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
10815 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
10816 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
10817 	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
10818 	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
10819 };
10820 
10821 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
10822 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
10823 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp1_rif_vlan_ops,
10824 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
10825 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
10826 };
10827 
10828 static int
mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif * ul_rif,bool enable)10829 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
10830 {
10831 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10832 	char ritr_pl[MLXSW_REG_RITR_LEN];
10833 
10834 	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
10835 			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
10836 	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
10837 					     MLXSW_REG_RITR_LOOPBACK_GENERIC);
10838 
10839 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
10840 }
10841 
10842 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,struct mlxsw_sp_crif * ul_crif,struct netlink_ext_ack * extack)10843 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
10844 		       struct mlxsw_sp_crif *ul_crif,
10845 		       struct netlink_ext_ack *extack)
10846 {
10847 	struct mlxsw_sp_rif *ul_rif;
10848 	u8 rif_entries = 1;
10849 	u16 rif_index;
10850 	int err;
10851 
10852 	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
10853 	if (err) {
10854 		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
10855 		return ERR_PTR(err);
10856 	}
10857 
10858 	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
10859 				    ul_crif);
10860 	if (!ul_rif) {
10861 		err = -ENOMEM;
10862 		goto err_rif_alloc;
10863 	}
10864 
10865 	mlxsw_sp->router->rifs[rif_index] = ul_rif;
10866 	ul_rif->mlxsw_sp = mlxsw_sp;
10867 	ul_rif->rif_entries = rif_entries;
10868 	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
10869 	if (err)
10870 		goto ul_rif_op_err;
10871 
10872 	atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
10873 	return ul_rif;
10874 
10875 ul_rif_op_err:
10876 	mlxsw_sp->router->rifs[rif_index] = NULL;
10877 	mlxsw_sp_rif_free(ul_rif);
10878 err_rif_alloc:
10879 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10880 	return ERR_PTR(err);
10881 }
10882 
mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif * ul_rif)10883 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
10884 {
10885 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10886 	u8 rif_entries = ul_rif->rif_entries;
10887 	u16 rif_index = ul_rif->rif_index;
10888 
10889 	atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
10890 	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
10891 	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
10892 	mlxsw_sp_rif_free(ul_rif);
10893 	mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
10894 }
10895 
10896 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct mlxsw_sp_crif * ul_crif,struct netlink_ext_ack * extack)10897 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10898 		    struct mlxsw_sp_crif *ul_crif,
10899 		    struct netlink_ext_ack *extack)
10900 {
10901 	struct mlxsw_sp_vr *vr;
10902 	int err;
10903 
10904 	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10905 	if (IS_ERR(vr))
10906 		return ERR_CAST(vr);
10907 
10908 	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10909 		return vr->ul_rif;
10910 
10911 	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
10912 	if (IS_ERR(vr->ul_rif)) {
10913 		err = PTR_ERR(vr->ul_rif);
10914 		goto err_ul_rif_create;
10915 	}
10916 
10917 	vr->rif_count++;
10918 	refcount_set(&vr->ul_rif_refcnt, 1);
10919 
10920 	return vr->ul_rif;
10921 
10922 err_ul_rif_create:
10923 	mlxsw_sp_vr_put(mlxsw_sp, vr);
10924 	return ERR_PTR(err);
10925 }
10926 
mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif * ul_rif)10927 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10928 {
10929 	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10930 	struct mlxsw_sp_vr *vr;
10931 
10932 	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10933 
10934 	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10935 		return;
10936 
10937 	vr->rif_count--;
10938 	mlxsw_sp_ul_rif_destroy(ul_rif);
10939 	mlxsw_sp_vr_put(mlxsw_sp, vr);
10940 }
10941 
mlxsw_sp_router_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,u16 * ul_rif_index)10942 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10943 			       u16 *ul_rif_index)
10944 {
10945 	struct mlxsw_sp_rif *ul_rif;
10946 	int err = 0;
10947 
10948 	mutex_lock(&mlxsw_sp->router->lock);
10949 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
10950 	if (IS_ERR(ul_rif)) {
10951 		err = PTR_ERR(ul_rif);
10952 		goto out;
10953 	}
10954 	*ul_rif_index = ul_rif->rif_index;
10955 out:
10956 	mutex_unlock(&mlxsw_sp->router->lock);
10957 	return err;
10958 }
10959 
mlxsw_sp_router_ul_rif_put(struct mlxsw_sp * mlxsw_sp,u16 ul_rif_index)10960 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10961 {
10962 	struct mlxsw_sp_rif *ul_rif;
10963 
10964 	mutex_lock(&mlxsw_sp->router->lock);
10965 	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10966 	if (WARN_ON(!ul_rif))
10967 		goto out;
10968 
10969 	mlxsw_sp_ul_rif_put(ul_rif);
10970 out:
10971 	mutex_unlock(&mlxsw_sp->router->lock);
10972 }
10973 
10974 static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10975 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10976 				struct netlink_ext_ack *extack)
10977 {
10978 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10979 	struct net_device *dev = mlxsw_sp_rif_dev(rif);
10980 	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
10981 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10982 	struct mlxsw_sp_rif *ul_rif;
10983 	int err;
10984 
10985 	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
10986 	if (IS_ERR(ul_rif))
10987 		return PTR_ERR(ul_rif);
10988 
10989 	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10990 	if (err)
10991 		goto err_loopback_op;
10992 
10993 	lb_rif->ul_vr_id = 0;
10994 	lb_rif->ul_rif_id = ul_rif->rif_index;
10995 
10996 	return 0;
10997 
10998 err_loopback_op:
10999 	mlxsw_sp_ul_rif_put(ul_rif);
11000 	return err;
11001 }
11002 
mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)11003 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
11004 {
11005 	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
11006 	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
11007 	struct mlxsw_sp_rif *ul_rif;
11008 
11009 	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
11010 	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
11011 	mlxsw_sp_ul_rif_put(ul_rif);
11012 }
11013 
11014 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
11015 	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
11016 	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
11017 	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
11018 	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
11019 	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
11020 };
11021 
11022 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
11023 	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
11024 	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp2_rif_vlan_ops,
11025 	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
11026 	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
11027 };
11028 
mlxsw_sp_rifs_table_init(struct mlxsw_sp * mlxsw_sp)11029 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
11030 {
11031 	struct gen_pool *rifs_table;
11032 	int err;
11033 
11034 	rifs_table = gen_pool_create(0, -1);
11035 	if (!rifs_table)
11036 		return -ENOMEM;
11037 
11038 	gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
11039 			  NULL);
11040 
11041 	err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
11042 			   MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
11043 	if (err)
11044 		goto err_gen_pool_add;
11045 
11046 	mlxsw_sp->router->rifs_table = rifs_table;
11047 
11048 	return 0;
11049 
11050 err_gen_pool_add:
11051 	gen_pool_destroy(rifs_table);
11052 	return err;
11053 }
11054 
mlxsw_sp_rifs_table_fini(struct mlxsw_sp * mlxsw_sp)11055 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
11056 {
11057 	gen_pool_destroy(mlxsw_sp->router->rifs_table);
11058 }
11059 
mlxsw_sp_rifs_init(struct mlxsw_sp * mlxsw_sp)11060 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
11061 {
11062 	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11063 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
11064 	struct mlxsw_core *core = mlxsw_sp->core;
11065 	int err;
11066 
11067 	if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
11068 		return -EIO;
11069 	mlxsw_sp->router->max_rif_mac_profile =
11070 		MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
11071 
11072 	mlxsw_sp->router->rifs = kcalloc(max_rifs,
11073 					 sizeof(struct mlxsw_sp_rif *),
11074 					 GFP_KERNEL);
11075 	if (!mlxsw_sp->router->rifs)
11076 		return -ENOMEM;
11077 
11078 	err = mlxsw_sp_rifs_table_init(mlxsw_sp);
11079 	if (err)
11080 		goto err_rifs_table_init;
11081 
11082 	idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
11083 	atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
11084 	atomic_set(&mlxsw_sp->router->rifs_count, 0);
11085 	devl_resource_occ_get_register(devlink,
11086 				       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
11087 				       mlxsw_sp_rif_mac_profiles_occ_get,
11088 				       mlxsw_sp);
11089 	devl_resource_occ_get_register(devlink,
11090 				       MLXSW_SP_RESOURCE_RIFS,
11091 				       mlxsw_sp_rifs_occ_get,
11092 				       mlxsw_sp);
11093 
11094 	return 0;
11095 
11096 err_rifs_table_init:
11097 	kfree(mlxsw_sp->router->rifs);
11098 	return err;
11099 }
11100 
mlxsw_sp_rifs_fini(struct mlxsw_sp * mlxsw_sp)11101 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
11102 {
11103 	int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11104 	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
11105 	int i;
11106 
11107 	WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
11108 	for (i = 0; i < max_rifs; i++)
11109 		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
11110 
11111 	devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
11112 	devl_resource_occ_get_unregister(devlink,
11113 					 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
11114 	WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
11115 	idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
11116 	mlxsw_sp_rifs_table_fini(mlxsw_sp);
11117 	kfree(mlxsw_sp->router->rifs);
11118 }
11119 
11120 static int
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp * mlxsw_sp)11121 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
11122 {
11123 	char tigcr_pl[MLXSW_REG_TIGCR_LEN];
11124 
11125 	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
11126 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
11127 }
11128 
mlxsw_sp_ipips_init(struct mlxsw_sp * mlxsw_sp)11129 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
11130 {
11131 	int err;
11132 
11133 	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
11134 
11135 	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
11136 	if (err)
11137 		return err;
11138 	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
11139 	if (err)
11140 		return err;
11141 
11142 	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
11143 }
11144 
mlxsw_sp1_ipips_init(struct mlxsw_sp * mlxsw_sp)11145 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
11146 {
11147 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
11148 	return mlxsw_sp_ipips_init(mlxsw_sp);
11149 }
11150 
mlxsw_sp2_ipips_init(struct mlxsw_sp * mlxsw_sp)11151 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
11152 {
11153 	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
11154 	return mlxsw_sp_ipips_init(mlxsw_sp);
11155 }
11156 
mlxsw_sp_ipips_fini(struct mlxsw_sp * mlxsw_sp)11157 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
11158 {
11159 	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
11160 }
11161 
mlxsw_sp_router_fib_dump_flush(struct notifier_block * nb)11162 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
11163 {
11164 	struct mlxsw_sp_router *router;
11165 
11166 	/* Flush pending FIB notifications and then flush the device's
11167 	 * table before requesting another dump. The FIB notification
11168 	 * block is unregistered, so no need to take RTNL.
11169 	 */
11170 	mlxsw_core_flush_owq();
11171 	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
11172 	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
11173 }
11174 
11175 #ifdef CONFIG_IP_ROUTE_MULTIPATH
11176 struct mlxsw_sp_mp_hash_config {
11177 	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
11178 	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
11179 	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
11180 	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
11181 	bool inc_parsing_depth;
11182 };
11183 
11184 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
11185 	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
11186 
11187 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
11188 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
11189 
11190 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
11191 	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
11192 
mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config * config)11193 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
11194 {
11195 	unsigned long *inner_headers = config->inner_headers;
11196 	unsigned long *inner_fields = config->inner_fields;
11197 
11198 	/* IPv4 inner */
11199 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
11200 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
11201 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
11202 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
11203 	/* IPv6 inner */
11204 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11205 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11206 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11207 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11208 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11209 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11210 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11211 	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11212 }
11213 
mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)11214 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11215 {
11216 	unsigned long *headers = config->headers;
11217 	unsigned long *fields = config->fields;
11218 
11219 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11220 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11221 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11222 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11223 }
11224 
11225 static void
mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config * config,u32 hash_fields)11226 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
11227 			      u32 hash_fields)
11228 {
11229 	unsigned long *inner_headers = config->inner_headers;
11230 	unsigned long *inner_fields = config->inner_fields;
11231 
11232 	/* IPv4 Inner */
11233 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
11234 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
11235 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
11236 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
11237 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
11238 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
11239 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11240 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
11241 	/* IPv6 inner */
11242 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
11243 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
11244 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
11245 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
11246 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
11247 	}
11248 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
11249 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
11250 		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
11251 	}
11252 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
11253 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
11254 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
11255 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
11256 	/* L4 inner */
11257 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
11258 	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
11259 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
11260 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
11261 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
11262 		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
11263 }
11264 
mlxsw_sp_mp4_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)11265 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
11266 				   struct mlxsw_sp_mp_hash_config *config)
11267 {
11268 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11269 	unsigned long *headers = config->headers;
11270 	unsigned long *fields = config->fields;
11271 	u32 hash_fields;
11272 
11273 	switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
11274 	case 0:
11275 		mlxsw_sp_mp4_hash_outer_addr(config);
11276 		break;
11277 	case 1:
11278 		mlxsw_sp_mp4_hash_outer_addr(config);
11279 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11280 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11281 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11282 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11283 		break;
11284 	case 2:
11285 		/* Outer */
11286 		mlxsw_sp_mp4_hash_outer_addr(config);
11287 		/* Inner */
11288 		mlxsw_sp_mp_hash_inner_l3(config);
11289 		break;
11290 	case 3:
11291 		hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
11292 		/* Outer */
11293 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
11294 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
11295 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
11296 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
11297 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
11298 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
11299 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
11300 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11301 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
11302 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11303 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11304 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11305 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11306 		/* Inner */
11307 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11308 		break;
11309 	}
11310 }
11311 
mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)11312 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
11313 {
11314 	unsigned long *headers = config->headers;
11315 	unsigned long *fields = config->fields;
11316 
11317 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11318 	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11319 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11320 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11321 	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11322 	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11323 }
11324 
mlxsw_sp_mp6_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)11325 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
11326 				   struct mlxsw_sp_mp_hash_config *config)
11327 {
11328 	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
11329 	unsigned long *headers = config->headers;
11330 	unsigned long *fields = config->fields;
11331 
11332 	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
11333 	case 0:
11334 		mlxsw_sp_mp6_hash_outer_addr(config);
11335 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11336 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11337 		break;
11338 	case 1:
11339 		mlxsw_sp_mp6_hash_outer_addr(config);
11340 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11341 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11342 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11343 		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11344 		break;
11345 	case 2:
11346 		/* Outer */
11347 		mlxsw_sp_mp6_hash_outer_addr(config);
11348 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11349 		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11350 		/* Inner */
11351 		mlxsw_sp_mp_hash_inner_l3(config);
11352 		config->inc_parsing_depth = true;
11353 		break;
11354 	case 3:
11355 		/* Outer */
11356 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
11357 		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
11358 		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
11359 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
11360 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
11361 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
11362 		}
11363 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
11364 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
11365 			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
11366 		}
11367 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
11368 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
11369 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
11370 			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
11371 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
11372 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
11373 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
11374 			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
11375 		/* Inner */
11376 		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
11377 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
11378 			config->inc_parsing_depth = true;
11379 		break;
11380 	}
11381 }
11382 
mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp * mlxsw_sp,bool old_inc_parsing_depth,bool new_inc_parsing_depth)11383 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
11384 						 bool old_inc_parsing_depth,
11385 						 bool new_inc_parsing_depth)
11386 {
11387 	int err;
11388 
11389 	if (!old_inc_parsing_depth && new_inc_parsing_depth) {
11390 		err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
11391 		if (err)
11392 			return err;
11393 		mlxsw_sp->router->inc_parsing_depth = true;
11394 	} else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
11395 		mlxsw_sp_parsing_depth_dec(mlxsw_sp);
11396 		mlxsw_sp->router->inc_parsing_depth = false;
11397 	}
11398 
11399 	return 0;
11400 }
11401 
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)11402 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11403 {
11404 	bool old_inc_parsing_depth, new_inc_parsing_depth;
11405 	struct mlxsw_sp_mp_hash_config config = {};
11406 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11407 	char recr2_pl[MLXSW_REG_RECR2_LEN];
11408 	unsigned long bit;
11409 	u32 seed;
11410 	int err;
11411 
11412 	seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).user_seed;
11413 	if (!seed)
11414 		seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
11415 
11416 	mlxsw_reg_recr2_pack(recr2_pl, seed);
11417 	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
11418 	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
11419 
11420 	old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11421 	new_inc_parsing_depth = config.inc_parsing_depth;
11422 	err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
11423 						    old_inc_parsing_depth,
11424 						    new_inc_parsing_depth);
11425 	if (err)
11426 		return err;
11427 
11428 	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
11429 		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
11430 	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
11431 		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
11432 	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
11433 		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
11434 	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
11435 		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
11436 
11437 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
11438 	if (err)
11439 		goto err_reg_write;
11440 
11441 	return 0;
11442 
11443 err_reg_write:
11444 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
11445 					      old_inc_parsing_depth);
11446 	return err;
11447 }
11448 
mlxsw_sp_mp_hash_fini(struct mlxsw_sp * mlxsw_sp)11449 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11450 {
11451 	bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
11452 
11453 	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
11454 					      false);
11455 }
11456 #else
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)11457 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
11458 {
11459 	return 0;
11460 }
11461 
mlxsw_sp_mp_hash_fini(struct mlxsw_sp * mlxsw_sp)11462 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
11463 {
11464 }
11465 #endif
11466 
mlxsw_sp_dscp_init(struct mlxsw_sp * mlxsw_sp)11467 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
11468 {
11469 	char rdpm_pl[MLXSW_REG_RDPM_LEN];
11470 	unsigned int i;
11471 
11472 	MLXSW_REG_ZERO(rdpm, rdpm_pl);
11473 
11474 	/* HW is determining switch priority based on DSCP-bits, but the
11475 	 * kernel is still doing that based on the ToS. Since there's a
11476 	 * mismatch in bits we need to make sure to translate the right
11477 	 * value ToS would observe, skipping the 2 least-significant ECN bits.
11478 	 */
11479 	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
11480 		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
11481 
11482 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
11483 }
11484 
__mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp)11485 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
11486 {
11487 	struct net *net = mlxsw_sp_net(mlxsw_sp);
11488 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
11489 	u64 max_rifs;
11490 	bool usp;
11491 
11492 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
11493 		return -EIO;
11494 	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
11495 	usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
11496 
11497 	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
11498 	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
11499 	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
11500 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11501 }
11502 
__mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)11503 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11504 {
11505 	char rgcr_pl[MLXSW_REG_RGCR_LEN];
11506 
11507 	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
11508 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
11509 }
11510 
mlxsw_sp_lb_rif_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)11511 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
11512 				struct netlink_ext_ack *extack)
11513 {
11514 	struct mlxsw_sp_router *router = mlxsw_sp->router;
11515 	struct mlxsw_sp_rif *lb_rif;
11516 	int err;
11517 
11518 	router->lb_crif = mlxsw_sp_crif_alloc(NULL);
11519 	if (!router->lb_crif)
11520 		return -ENOMEM;
11521 
11522 	/* Create a generic loopback RIF associated with the main table
11523 	 * (default VRF). Any table can be used, but the main table exists
11524 	 * anyway, so we do not waste resources. Loopback RIFs are usually
11525 	 * created with a NULL CRIF, but this RIF is used as a fallback RIF
11526 	 * for blackhole nexthops, and nexthops expect to have a valid CRIF.
11527 	 */
11528 	lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
11529 				     extack);
11530 	if (IS_ERR(lb_rif)) {
11531 		err = PTR_ERR(lb_rif);
11532 		goto err_ul_rif_get;
11533 	}
11534 
11535 	return 0;
11536 
11537 err_ul_rif_get:
11538 	mlxsw_sp_crif_free(router->lb_crif);
11539 	return err;
11540 }
11541 
mlxsw_sp_lb_rif_fini(struct mlxsw_sp * mlxsw_sp)11542 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
11543 {
11544 	mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
11545 	mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
11546 }
11547 
mlxsw_sp1_router_init(struct mlxsw_sp * mlxsw_sp)11548 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
11549 {
11550 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
11551 
11552 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
11553 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
11554 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11555 
11556 	return 0;
11557 }
11558 
11559 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
11560 	.init = mlxsw_sp1_router_init,
11561 	.ipips_init = mlxsw_sp1_ipips_init,
11562 };
11563 
mlxsw_sp2_router_init(struct mlxsw_sp * mlxsw_sp)11564 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
11565 {
11566 	size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
11567 
11568 	mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
11569 	mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
11570 	mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
11571 
11572 	return 0;
11573 }
11574 
11575 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
11576 	.init = mlxsw_sp2_router_init,
11577 	.ipips_init = mlxsw_sp2_ipips_init,
11578 };
11579 
mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)11580 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
11581 			 struct netlink_ext_ack *extack)
11582 {
11583 	struct mlxsw_sp_router *router;
11584 	struct notifier_block *nb;
11585 	int err;
11586 
11587 	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
11588 	if (!router)
11589 		return -ENOMEM;
11590 	mutex_init(&router->lock);
11591 	mlxsw_sp->router = router;
11592 	router->mlxsw_sp = mlxsw_sp;
11593 
11594 	err = mlxsw_sp->router_ops->init(mlxsw_sp);
11595 	if (err)
11596 		goto err_router_ops_init;
11597 
11598 	INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
11599 	INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
11600 			  mlxsw_sp_nh_grp_activity_work);
11601 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
11602 	err = __mlxsw_sp_router_init(mlxsw_sp);
11603 	if (err)
11604 		goto err_router_init;
11605 
11606 	err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
11607 	if (err)
11608 		goto err_ipips_init;
11609 
11610 	err = rhashtable_init(&mlxsw_sp->router->crif_ht,
11611 			      &mlxsw_sp_crif_ht_params);
11612 	if (err)
11613 		goto err_crif_ht_init;
11614 
11615 	err = mlxsw_sp_rifs_init(mlxsw_sp);
11616 	if (err)
11617 		goto err_rifs_init;
11618 
11619 	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
11620 			      &mlxsw_sp_nexthop_ht_params);
11621 	if (err)
11622 		goto err_nexthop_ht_init;
11623 
11624 	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
11625 			      &mlxsw_sp_nexthop_group_ht_params);
11626 	if (err)
11627 		goto err_nexthop_group_ht_init;
11628 
11629 	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
11630 	err = mlxsw_sp_lpm_init(mlxsw_sp);
11631 	if (err)
11632 		goto err_lpm_init;
11633 
11634 	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
11635 	if (err)
11636 		goto err_mr_init;
11637 
11638 	err = mlxsw_sp_vrs_init(mlxsw_sp);
11639 	if (err)
11640 		goto err_vrs_init;
11641 
11642 	err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
11643 	if (err)
11644 		goto err_lb_rif_init;
11645 
11646 	err = mlxsw_sp_neigh_init(mlxsw_sp);
11647 	if (err)
11648 		goto err_neigh_init;
11649 
11650 	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
11651 	if (err)
11652 		goto err_mp_hash_init;
11653 
11654 	err = mlxsw_sp_dscp_init(mlxsw_sp);
11655 	if (err)
11656 		goto err_dscp_init;
11657 
11658 	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
11659 	err = register_inetaddr_notifier(&router->inetaddr_nb);
11660 	if (err)
11661 		goto err_register_inetaddr_notifier;
11662 
11663 	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
11664 	err = register_inet6addr_notifier(&router->inet6addr_nb);
11665 	if (err)
11666 		goto err_register_inet6addr_notifier;
11667 
11668 	router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
11669 	err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11670 	if (err)
11671 		goto err_register_inetaddr_valid_notifier;
11672 
11673 	nb = &router->inet6addr_valid_nb;
11674 	nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
11675 	err = register_inet6addr_validator_notifier(nb);
11676 	if (err)
11677 		goto err_register_inet6addr_valid_notifier;
11678 
11679 	mlxsw_sp->router->netevent_nb.notifier_call =
11680 		mlxsw_sp_router_netevent_event;
11681 	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11682 	if (err)
11683 		goto err_register_netevent_notifier;
11684 
11685 	mlxsw_sp->router->netdevice_nb.notifier_call =
11686 		mlxsw_sp_router_netdevice_event;
11687 	err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11688 					      &mlxsw_sp->router->netdevice_nb);
11689 	if (err)
11690 		goto err_register_netdev_notifier;
11691 
11692 	mlxsw_sp->router->nexthop_nb.notifier_call =
11693 		mlxsw_sp_nexthop_obj_event;
11694 	err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11695 					&mlxsw_sp->router->nexthop_nb,
11696 					extack);
11697 	if (err)
11698 		goto err_register_nexthop_notifier;
11699 
11700 	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
11701 	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
11702 				    &mlxsw_sp->router->fib_nb,
11703 				    mlxsw_sp_router_fib_dump_flush, extack);
11704 	if (err)
11705 		goto err_register_fib_notifier;
11706 
11707 	return 0;
11708 
11709 err_register_fib_notifier:
11710 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11711 				    &mlxsw_sp->router->nexthop_nb);
11712 err_register_nexthop_notifier:
11713 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11714 					  &router->netdevice_nb);
11715 err_register_netdev_notifier:
11716 	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
11717 err_register_netevent_notifier:
11718 	unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11719 err_register_inet6addr_valid_notifier:
11720 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11721 err_register_inetaddr_valid_notifier:
11722 	unregister_inet6addr_notifier(&router->inet6addr_nb);
11723 err_register_inet6addr_notifier:
11724 	unregister_inetaddr_notifier(&router->inetaddr_nb);
11725 err_register_inetaddr_notifier:
11726 	mlxsw_core_flush_owq();
11727 err_dscp_init:
11728 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
11729 err_mp_hash_init:
11730 	mlxsw_sp_neigh_fini(mlxsw_sp);
11731 err_neigh_init:
11732 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
11733 err_lb_rif_init:
11734 	mlxsw_sp_vrs_fini(mlxsw_sp);
11735 err_vrs_init:
11736 	mlxsw_sp_mr_fini(mlxsw_sp);
11737 err_mr_init:
11738 	mlxsw_sp_lpm_fini(mlxsw_sp);
11739 err_lpm_init:
11740 	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
11741 err_nexthop_group_ht_init:
11742 	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
11743 err_nexthop_ht_init:
11744 	mlxsw_sp_rifs_fini(mlxsw_sp);
11745 err_rifs_init:
11746 	rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11747 err_crif_ht_init:
11748 	mlxsw_sp_ipips_fini(mlxsw_sp);
11749 err_ipips_init:
11750 	__mlxsw_sp_router_fini(mlxsw_sp);
11751 err_router_init:
11752 	cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
11753 err_router_ops_init:
11754 	mutex_destroy(&mlxsw_sp->router->lock);
11755 	kfree(mlxsw_sp->router);
11756 	return err;
11757 }
11758 
mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)11759 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
11760 {
11761 	struct mlxsw_sp_router *router = mlxsw_sp->router;
11762 
11763 	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
11764 	unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
11765 				    &router->nexthop_nb);
11766 	unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
11767 					  &router->netdevice_nb);
11768 	unregister_netevent_notifier(&router->netevent_nb);
11769 	unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
11770 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
11771 	unregister_inet6addr_notifier(&router->inet6addr_nb);
11772 	unregister_inetaddr_notifier(&router->inetaddr_nb);
11773 	mlxsw_core_flush_owq();
11774 	mlxsw_sp_mp_hash_fini(mlxsw_sp);
11775 	mlxsw_sp_neigh_fini(mlxsw_sp);
11776 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
11777 	mlxsw_sp_vrs_fini(mlxsw_sp);
11778 	mlxsw_sp_mr_fini(mlxsw_sp);
11779 	mlxsw_sp_lpm_fini(mlxsw_sp);
11780 	rhashtable_destroy(&router->nexthop_group_ht);
11781 	rhashtable_destroy(&router->nexthop_ht);
11782 	mlxsw_sp_rifs_fini(mlxsw_sp);
11783 	rhashtable_destroy(&mlxsw_sp->router->crif_ht);
11784 	mlxsw_sp_ipips_fini(mlxsw_sp);
11785 	__mlxsw_sp_router_fini(mlxsw_sp);
11786 	cancel_delayed_work_sync(&router->nh_grp_activity_dw);
11787 	mutex_destroy(&router->lock);
11788 	kfree(router);
11789 }
11790