xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
45 };
46 
47 struct mlxsw_sp_bridge_device {
48 	struct net_device *dev;
49 	struct list_head list;
50 	struct list_head ports_list;
51 	struct list_head mids_list;
52 	u8 vlan_enabled:1,
53 	   multicast_enabled:1,
54 	   mrouter:1;
55 	const struct mlxsw_sp_bridge_ops *ops;
56 };
57 
58 struct mlxsw_sp_bridge_port {
59 	struct net_device *dev;
60 	struct mlxsw_sp_bridge_device *bridge_device;
61 	struct list_head list;
62 	struct list_head vlans_list;
63 	unsigned int ref_count;
64 	u8 stp_state;
65 	unsigned long flags;
66 	bool mrouter;
67 	bool lagged;
68 	union {
69 		u16 lag_id;
70 		u16 system_port;
71 	};
72 };
73 
74 struct mlxsw_sp_bridge_vlan {
75 	struct list_head list;
76 	struct list_head port_vlan_list;
77 	u16 vid;
78 };
79 
80 struct mlxsw_sp_bridge_ops {
81 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
82 			 struct mlxsw_sp_bridge_port *bridge_port,
83 			 struct mlxsw_sp_port *mlxsw_sp_port,
84 			 struct netlink_ext_ack *extack);
85 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
86 			   struct mlxsw_sp_bridge_port *bridge_port,
87 			   struct mlxsw_sp_port *mlxsw_sp_port);
88 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
89 			  const struct net_device *vxlan_dev, u16 vid,
90 			  struct netlink_ext_ack *extack);
91 	struct mlxsw_sp_fid *
92 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
93 			   u16 vid, struct netlink_ext_ack *extack);
94 	struct mlxsw_sp_fid *
95 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
96 			      u16 vid);
97 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
98 		       const struct mlxsw_sp_fid *fid);
99 };
100 
101 struct mlxsw_sp_switchdev_ops {
102 	void (*init)(struct mlxsw_sp *mlxsw_sp);
103 };
104 
105 static int
106 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
107 			       struct mlxsw_sp_bridge_port *bridge_port,
108 			       u16 fid_index);
109 
110 static void
111 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
112 			       struct mlxsw_sp_bridge_port *bridge_port);
113 
114 static void
115 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
116 				   struct mlxsw_sp_bridge_device
117 				   *bridge_device);
118 
119 static void
120 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
121 				 struct mlxsw_sp_bridge_port *bridge_port,
122 				 bool add);
123 
124 static struct mlxsw_sp_bridge_device *
125 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
126 			    const struct net_device *br_dev)
127 {
128 	struct mlxsw_sp_bridge_device *bridge_device;
129 
130 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
131 		if (bridge_device->dev == br_dev)
132 			return bridge_device;
133 
134 	return NULL;
135 }
136 
137 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
138 					 const struct net_device *br_dev)
139 {
140 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
141 }
142 
143 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
144 						    struct netdev_nested_priv *priv)
145 {
146 	struct mlxsw_sp *mlxsw_sp = priv->data;
147 
148 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
149 	return 0;
150 }
151 
152 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
153 						struct net_device *dev)
154 {
155 	struct netdev_nested_priv priv = {
156 		.data = (void *)mlxsw_sp,
157 	};
158 
159 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
160 	netdev_walk_all_upper_dev_rcu(dev,
161 				      mlxsw_sp_bridge_device_upper_rif_destroy,
162 				      &priv);
163 }
164 
165 static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
166 					     struct net_device *br_dev,
167 					     struct netlink_ext_ack *extack)
168 {
169 	struct net_device *dev, *stop_dev;
170 	struct list_head *iter;
171 	int err;
172 
173 	netdev_for_each_lower_dev(br_dev, dev, iter) {
174 		if (netif_is_vxlan(dev) && netif_running(dev)) {
175 			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
176 							 br_dev, dev, 0,
177 							 extack);
178 			if (err) {
179 				stop_dev = dev;
180 				goto err_vxlan_join;
181 			}
182 		}
183 	}
184 
185 	return 0;
186 
187 err_vxlan_join:
188 	netdev_for_each_lower_dev(br_dev, dev, iter) {
189 		if (netif_is_vxlan(dev) && netif_running(dev)) {
190 			if (stop_dev == dev)
191 				break;
192 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
193 		}
194 	}
195 	return err;
196 }
197 
198 static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
199 					      struct net_device *br_dev)
200 {
201 	struct net_device *dev;
202 	struct list_head *iter;
203 
204 	netdev_for_each_lower_dev(br_dev, dev, iter) {
205 		if (netif_is_vxlan(dev) && netif_running(dev))
206 			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
207 	}
208 }
209 
210 static struct mlxsw_sp_bridge_device *
211 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
212 			      struct net_device *br_dev,
213 			      struct netlink_ext_ack *extack)
214 {
215 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
216 	struct mlxsw_sp_bridge_device *bridge_device;
217 	bool vlan_enabled = br_vlan_enabled(br_dev);
218 	int err;
219 
220 	if (vlan_enabled && bridge->vlan_enabled_exists) {
221 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
222 		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
223 		return ERR_PTR(-EINVAL);
224 	}
225 
226 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
227 	if (!bridge_device)
228 		return ERR_PTR(-ENOMEM);
229 
230 	bridge_device->dev = br_dev;
231 	bridge_device->vlan_enabled = vlan_enabled;
232 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
233 	bridge_device->mrouter = br_multicast_router(br_dev);
234 	INIT_LIST_HEAD(&bridge_device->ports_list);
235 	if (vlan_enabled) {
236 		u16 proto;
237 
238 		bridge->vlan_enabled_exists = true;
239 		br_vlan_get_proto(br_dev, &proto);
240 		if (proto == ETH_P_8021AD)
241 			bridge_device->ops = bridge->bridge_8021ad_ops;
242 		else
243 			bridge_device->ops = bridge->bridge_8021q_ops;
244 	} else {
245 		bridge_device->ops = bridge->bridge_8021d_ops;
246 	}
247 	INIT_LIST_HEAD(&bridge_device->mids_list);
248 	list_add(&bridge_device->list, &bridge->bridges_list);
249 
250 	/* It is possible we already have VXLAN devices enslaved to the bridge.
251 	 * In which case, we need to replay their configuration as if they were
252 	 * just now enslaved to the bridge.
253 	 */
254 	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
255 	if (err)
256 		goto err_vxlan_init;
257 
258 	return bridge_device;
259 
260 err_vxlan_init:
261 	list_del(&bridge_device->list);
262 	if (bridge_device->vlan_enabled)
263 		bridge->vlan_enabled_exists = false;
264 	kfree(bridge_device);
265 	return ERR_PTR(err);
266 }
267 
268 static void
269 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
270 			       struct mlxsw_sp_bridge_device *bridge_device)
271 {
272 	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
273 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
274 					    bridge_device->dev);
275 	list_del(&bridge_device->list);
276 	if (bridge_device->vlan_enabled)
277 		bridge->vlan_enabled_exists = false;
278 	WARN_ON(!list_empty(&bridge_device->ports_list));
279 	WARN_ON(!list_empty(&bridge_device->mids_list));
280 	kfree(bridge_device);
281 }
282 
283 static struct mlxsw_sp_bridge_device *
284 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
285 			   struct net_device *br_dev,
286 			   struct netlink_ext_ack *extack)
287 {
288 	struct mlxsw_sp_bridge_device *bridge_device;
289 
290 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
291 	if (bridge_device)
292 		return bridge_device;
293 
294 	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
295 }
296 
297 static void
298 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
299 			   struct mlxsw_sp_bridge_device *bridge_device)
300 {
301 	if (list_empty(&bridge_device->ports_list))
302 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
303 }
304 
305 static struct mlxsw_sp_bridge_port *
306 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
307 			    const struct net_device *brport_dev)
308 {
309 	struct mlxsw_sp_bridge_port *bridge_port;
310 
311 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
312 		if (bridge_port->dev == brport_dev)
313 			return bridge_port;
314 	}
315 
316 	return NULL;
317 }
318 
319 struct mlxsw_sp_bridge_port *
320 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
321 			  struct net_device *brport_dev)
322 {
323 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
324 	struct mlxsw_sp_bridge_device *bridge_device;
325 
326 	if (!br_dev)
327 		return NULL;
328 
329 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
330 	if (!bridge_device)
331 		return NULL;
332 
333 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
334 }
335 
336 static struct mlxsw_sp_bridge_port *
337 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
338 			    struct net_device *brport_dev,
339 			    struct netlink_ext_ack *extack)
340 {
341 	struct mlxsw_sp_bridge_port *bridge_port;
342 	struct mlxsw_sp_port *mlxsw_sp_port;
343 	int err;
344 
345 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
346 	if (!bridge_port)
347 		return ERR_PTR(-ENOMEM);
348 
349 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
350 	bridge_port->lagged = mlxsw_sp_port->lagged;
351 	if (bridge_port->lagged)
352 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
353 	else
354 		bridge_port->system_port = mlxsw_sp_port->local_port;
355 	bridge_port->dev = brport_dev;
356 	bridge_port->bridge_device = bridge_device;
357 	bridge_port->stp_state = BR_STATE_DISABLED;
358 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
359 			     BR_MCAST_FLOOD;
360 	INIT_LIST_HEAD(&bridge_port->vlans_list);
361 	list_add(&bridge_port->list, &bridge_device->ports_list);
362 	bridge_port->ref_count = 1;
363 
364 	err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
365 					    NULL, NULL, NULL, false, extack);
366 	if (err)
367 		goto err_switchdev_offload;
368 
369 	return bridge_port;
370 
371 err_switchdev_offload:
372 	list_del(&bridge_port->list);
373 	kfree(bridge_port);
374 	return ERR_PTR(err);
375 }
376 
377 static void
378 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
379 {
380 	switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
381 	list_del(&bridge_port->list);
382 	WARN_ON(!list_empty(&bridge_port->vlans_list));
383 	kfree(bridge_port);
384 }
385 
386 static struct mlxsw_sp_bridge_port *
387 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
388 			 struct net_device *brport_dev,
389 			 struct netlink_ext_ack *extack)
390 {
391 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
392 	struct mlxsw_sp_bridge_device *bridge_device;
393 	struct mlxsw_sp_bridge_port *bridge_port;
394 	int err;
395 
396 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
397 	if (bridge_port) {
398 		bridge_port->ref_count++;
399 		return bridge_port;
400 	}
401 
402 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
403 	if (IS_ERR(bridge_device))
404 		return ERR_CAST(bridge_device);
405 
406 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
407 						  extack);
408 	if (IS_ERR(bridge_port)) {
409 		err = PTR_ERR(bridge_port);
410 		goto err_bridge_port_create;
411 	}
412 
413 	return bridge_port;
414 
415 err_bridge_port_create:
416 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
417 	return ERR_PTR(err);
418 }
419 
420 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
421 				     struct mlxsw_sp_bridge_port *bridge_port)
422 {
423 	struct mlxsw_sp_bridge_device *bridge_device;
424 
425 	if (--bridge_port->ref_count != 0)
426 		return;
427 	bridge_device = bridge_port->bridge_device;
428 	mlxsw_sp_bridge_port_destroy(bridge_port);
429 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
430 }
431 
432 static struct mlxsw_sp_port_vlan *
433 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
434 				  const struct mlxsw_sp_bridge_device *
435 				  bridge_device,
436 				  u16 vid)
437 {
438 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
439 
440 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
441 			    list) {
442 		if (!mlxsw_sp_port_vlan->bridge_port)
443 			continue;
444 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
445 		    bridge_device)
446 			continue;
447 		if (bridge_device->vlan_enabled &&
448 		    mlxsw_sp_port_vlan->vid != vid)
449 			continue;
450 		return mlxsw_sp_port_vlan;
451 	}
452 
453 	return NULL;
454 }
455 
456 static struct mlxsw_sp_port_vlan*
457 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
458 			       u16 fid_index)
459 {
460 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
461 
462 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
463 			    list) {
464 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
465 
466 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
467 			return mlxsw_sp_port_vlan;
468 	}
469 
470 	return NULL;
471 }
472 
473 static struct mlxsw_sp_bridge_vlan *
474 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
475 			  u16 vid)
476 {
477 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
478 
479 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
480 		if (bridge_vlan->vid == vid)
481 			return bridge_vlan;
482 	}
483 
484 	return NULL;
485 }
486 
487 static struct mlxsw_sp_bridge_vlan *
488 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
489 {
490 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
491 
492 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
493 	if (!bridge_vlan)
494 		return NULL;
495 
496 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
497 	bridge_vlan->vid = vid;
498 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
499 
500 	return bridge_vlan;
501 }
502 
503 static void
504 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
505 {
506 	list_del(&bridge_vlan->list);
507 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
508 	kfree(bridge_vlan);
509 }
510 
511 static struct mlxsw_sp_bridge_vlan *
512 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
513 {
514 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
515 
516 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
517 	if (bridge_vlan)
518 		return bridge_vlan;
519 
520 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
521 }
522 
523 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
524 {
525 	if (list_empty(&bridge_vlan->port_vlan_list))
526 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
527 }
528 
529 static int
530 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
531 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
532 				  u8 state)
533 {
534 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
535 
536 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
537 			    bridge_vlan_node) {
538 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
539 			continue;
540 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
541 						 bridge_vlan->vid, state);
542 	}
543 
544 	return 0;
545 }
546 
547 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
548 					    struct net_device *orig_dev,
549 					    u8 state)
550 {
551 	struct mlxsw_sp_bridge_port *bridge_port;
552 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
553 	int err;
554 
555 	/* It's possible we failed to enslave the port, yet this
556 	 * operation is executed due to it being deferred.
557 	 */
558 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
559 						orig_dev);
560 	if (!bridge_port)
561 		return 0;
562 
563 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
564 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
565 							bridge_vlan, state);
566 		if (err)
567 			goto err_port_bridge_vlan_stp_set;
568 	}
569 
570 	bridge_port->stp_state = state;
571 
572 	return 0;
573 
574 err_port_bridge_vlan_stp_set:
575 	list_for_each_entry_continue_reverse(bridge_vlan,
576 					     &bridge_port->vlans_list, list)
577 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
578 						  bridge_port->stp_state);
579 	return err;
580 }
581 
582 static int
583 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
584 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
585 				    enum mlxsw_sp_flood_type packet_type,
586 				    bool member)
587 {
588 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
589 
590 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
591 			    bridge_vlan_node) {
592 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
593 			continue;
594 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
595 					      packet_type,
596 					      mlxsw_sp_port->local_port,
597 					      member);
598 	}
599 
600 	return 0;
601 }
602 
603 static int
604 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
605 				     struct mlxsw_sp_bridge_port *bridge_port,
606 				     enum mlxsw_sp_flood_type packet_type,
607 				     bool member)
608 {
609 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
610 	int err;
611 
612 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
613 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
614 							  bridge_vlan,
615 							  packet_type,
616 							  member);
617 		if (err)
618 			goto err_port_bridge_vlan_flood_set;
619 	}
620 
621 	return 0;
622 
623 err_port_bridge_vlan_flood_set:
624 	list_for_each_entry_continue_reverse(bridge_vlan,
625 					     &bridge_port->vlans_list, list)
626 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
627 						    packet_type, !member);
628 	return err;
629 }
630 
631 static int
632 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
633 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
634 				       bool set)
635 {
636 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
637 	u16 vid = bridge_vlan->vid;
638 
639 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
640 			    bridge_vlan_node) {
641 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
642 			continue;
643 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
644 	}
645 
646 	return 0;
647 }
648 
649 static int
650 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
651 				  struct mlxsw_sp_bridge_port *bridge_port,
652 				  bool set)
653 {
654 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
655 	int err;
656 
657 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
658 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
659 							     bridge_vlan, set);
660 		if (err)
661 			goto err_port_bridge_vlan_learning_set;
662 	}
663 
664 	return 0;
665 
666 err_port_bridge_vlan_learning_set:
667 	list_for_each_entry_continue_reverse(bridge_vlan,
668 					     &bridge_port->vlans_list, list)
669 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
670 						       bridge_vlan, !set);
671 	return err;
672 }
673 
674 static int
675 mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
676 				    struct switchdev_brport_flags flags)
677 {
678 	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
679 		return -EINVAL;
680 
681 	return 0;
682 }
683 
684 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
685 					   struct net_device *orig_dev,
686 					   struct switchdev_brport_flags flags)
687 {
688 	struct mlxsw_sp_bridge_port *bridge_port;
689 	int err;
690 
691 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
692 						orig_dev);
693 	if (!bridge_port)
694 		return 0;
695 
696 	if (flags.mask & BR_FLOOD) {
697 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
698 							   bridge_port,
699 							   MLXSW_SP_FLOOD_TYPE_UC,
700 							   flags.val & BR_FLOOD);
701 		if (err)
702 			return err;
703 	}
704 
705 	if (flags.mask & BR_LEARNING) {
706 		err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
707 							bridge_port,
708 							flags.val & BR_LEARNING);
709 		if (err)
710 			return err;
711 	}
712 
713 	if (bridge_port->bridge_device->multicast_enabled)
714 		goto out;
715 
716 	if (flags.mask & BR_MCAST_FLOOD) {
717 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
718 							   bridge_port,
719 							   MLXSW_SP_FLOOD_TYPE_MC,
720 							   flags.val & BR_MCAST_FLOOD);
721 		if (err)
722 			return err;
723 	}
724 
725 out:
726 	memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
727 	return 0;
728 }
729 
730 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
731 {
732 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
733 	int err;
734 
735 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
736 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
737 	if (err)
738 		return err;
739 	mlxsw_sp->bridge->ageing_time = ageing_time;
740 	return 0;
741 }
742 
743 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
744 					    unsigned long ageing_clock_t)
745 {
746 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
747 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
748 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
749 
750 	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
751 	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
752 		return -ERANGE;
753 
754 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
755 }
756 
757 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
758 					  struct net_device *orig_dev,
759 					  bool vlan_enabled)
760 {
761 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
762 	struct mlxsw_sp_bridge_device *bridge_device;
763 
764 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
765 	if (WARN_ON(!bridge_device))
766 		return -EINVAL;
767 
768 	if (bridge_device->vlan_enabled == vlan_enabled)
769 		return 0;
770 
771 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
772 	return -EINVAL;
773 }
774 
775 static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
776 						struct net_device *orig_dev,
777 						u16 vlan_proto)
778 {
779 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
780 	struct mlxsw_sp_bridge_device *bridge_device;
781 
782 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
783 	if (WARN_ON(!bridge_device))
784 		return -EINVAL;
785 
786 	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
787 	return -EINVAL;
788 }
789 
790 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
791 					  struct net_device *orig_dev,
792 					  bool is_port_mrouter)
793 {
794 	struct mlxsw_sp_bridge_port *bridge_port;
795 	int err;
796 
797 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
798 						orig_dev);
799 	if (!bridge_port)
800 		return 0;
801 
802 	if (!bridge_port->bridge_device->multicast_enabled)
803 		goto out;
804 
805 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
806 						   MLXSW_SP_FLOOD_TYPE_MC,
807 						   is_port_mrouter);
808 	if (err)
809 		return err;
810 
811 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
812 					 is_port_mrouter);
813 out:
814 	bridge_port->mrouter = is_port_mrouter;
815 	return 0;
816 }
817 
818 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
819 {
820 	const struct mlxsw_sp_bridge_device *bridge_device;
821 
822 	bridge_device = bridge_port->bridge_device;
823 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
824 					bridge_port->flags & BR_MCAST_FLOOD;
825 }
826 
827 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
828 					 struct net_device *orig_dev,
829 					 bool mc_disabled)
830 {
831 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
832 	struct mlxsw_sp_bridge_device *bridge_device;
833 	struct mlxsw_sp_bridge_port *bridge_port;
834 	int err;
835 
836 	/* It's possible we failed to enslave the port, yet this
837 	 * operation is executed due to it being deferred.
838 	 */
839 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
840 	if (!bridge_device)
841 		return 0;
842 
843 	if (bridge_device->multicast_enabled != !mc_disabled) {
844 		bridge_device->multicast_enabled = !mc_disabled;
845 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
846 						   bridge_device);
847 	}
848 
849 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
850 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
851 		bool member = mlxsw_sp_mc_flood(bridge_port);
852 
853 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
854 							   bridge_port,
855 							   packet_type, member);
856 		if (err)
857 			return err;
858 	}
859 
860 	bridge_device->multicast_enabled = !mc_disabled;
861 
862 	return 0;
863 }
864 
865 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
866 					 u16 mid_idx, bool add)
867 {
868 	char *smid2_pl;
869 	int err;
870 
871 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
872 	if (!smid2_pl)
873 		return -ENOMEM;
874 
875 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
876 			     mlxsw_sp_router_port(mlxsw_sp), add);
877 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
878 	kfree(smid2_pl);
879 	return err;
880 }
881 
882 static void
883 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
884 				   struct mlxsw_sp_bridge_device *bridge_device,
885 				   bool add)
886 {
887 	struct mlxsw_sp_mid *mid;
888 
889 	list_for_each_entry(mid, &bridge_device->mids_list, list)
890 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
891 }
892 
893 static int
894 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
895 				  struct net_device *orig_dev,
896 				  bool is_mrouter)
897 {
898 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
899 	struct mlxsw_sp_bridge_device *bridge_device;
900 
901 	/* It's possible we failed to enslave the port, yet this
902 	 * operation is executed due to it being deferred.
903 	 */
904 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
905 	if (!bridge_device)
906 		return 0;
907 
908 	if (bridge_device->mrouter != is_mrouter)
909 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
910 						   is_mrouter);
911 	bridge_device->mrouter = is_mrouter;
912 	return 0;
913 }
914 
915 static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
916 				  const struct switchdev_attr *attr,
917 				  struct netlink_ext_ack *extack)
918 {
919 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
920 	int err;
921 
922 	switch (attr->id) {
923 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
924 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
925 						       attr->orig_dev,
926 						       attr->u.stp_state);
927 		break;
928 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
929 		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
930 							  attr->u.brport_flags);
931 		break;
932 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
933 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
934 						      attr->orig_dev,
935 						      attr->u.brport_flags);
936 		break;
937 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
938 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
939 						       attr->u.ageing_time);
940 		break;
941 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
942 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
943 						     attr->orig_dev,
944 						     attr->u.vlan_filtering);
945 		break;
946 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
947 		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
948 							   attr->orig_dev,
949 							   attr->u.vlan_protocol);
950 		break;
951 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
952 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
953 						     attr->orig_dev,
954 						     attr->u.mrouter);
955 		break;
956 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
957 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
958 						    attr->orig_dev,
959 						    attr->u.mc_disabled);
960 		break;
961 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
962 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
963 							attr->orig_dev,
964 							attr->u.mrouter);
965 		break;
966 	default:
967 		err = -EOPNOTSUPP;
968 		break;
969 	}
970 
971 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
972 
973 	return err;
974 }
975 
976 static int
977 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
978 			    struct mlxsw_sp_bridge_port *bridge_port,
979 			    struct netlink_ext_ack *extack)
980 {
981 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
982 	struct mlxsw_sp_bridge_device *bridge_device;
983 	u16 local_port = mlxsw_sp_port->local_port;
984 	u16 vid = mlxsw_sp_port_vlan->vid;
985 	struct mlxsw_sp_fid *fid;
986 	int err;
987 
988 	bridge_device = bridge_port->bridge_device;
989 	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
990 	if (IS_ERR(fid))
991 		return PTR_ERR(fid);
992 
993 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
994 				     bridge_port->flags & BR_FLOOD);
995 	if (err)
996 		goto err_fid_uc_flood_set;
997 
998 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
999 				     mlxsw_sp_mc_flood(bridge_port));
1000 	if (err)
1001 		goto err_fid_mc_flood_set;
1002 
1003 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
1004 				     true);
1005 	if (err)
1006 		goto err_fid_bc_flood_set;
1007 
1008 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
1009 	if (err)
1010 		goto err_fid_port_vid_map;
1011 
1012 	mlxsw_sp_port_vlan->fid = fid;
1013 
1014 	return 0;
1015 
1016 err_fid_port_vid_map:
1017 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1018 err_fid_bc_flood_set:
1019 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1020 err_fid_mc_flood_set:
1021 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1022 err_fid_uc_flood_set:
1023 	mlxsw_sp_fid_put(fid);
1024 	return err;
1025 }
1026 
1027 static void
1028 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1029 {
1030 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1031 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1032 	u16 local_port = mlxsw_sp_port->local_port;
1033 	u16 vid = mlxsw_sp_port_vlan->vid;
1034 
1035 	mlxsw_sp_port_vlan->fid = NULL;
1036 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1037 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1038 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1039 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1040 	mlxsw_sp_fid_put(fid);
1041 }
1042 
1043 static u16
1044 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1045 			     u16 vid, bool is_pvid)
1046 {
1047 	if (is_pvid)
1048 		return vid;
1049 	else if (mlxsw_sp_port->pvid == vid)
1050 		return 0;	/* Dis-allow untagged packets */
1051 	else
1052 		return mlxsw_sp_port->pvid;
1053 }
1054 
1055 static int
1056 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1057 			       struct mlxsw_sp_bridge_port *bridge_port,
1058 			       struct netlink_ext_ack *extack)
1059 {
1060 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1061 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1062 	u16 vid = mlxsw_sp_port_vlan->vid;
1063 	int err;
1064 
1065 	/* No need to continue if only VLAN flags were changed */
1066 	if (mlxsw_sp_port_vlan->bridge_port)
1067 		return 0;
1068 
1069 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1070 					  extack);
1071 	if (err)
1072 		return err;
1073 
1074 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1075 					     bridge_port->flags & BR_LEARNING);
1076 	if (err)
1077 		goto err_port_vid_learning_set;
1078 
1079 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1080 					bridge_port->stp_state);
1081 	if (err)
1082 		goto err_port_vid_stp_set;
1083 
1084 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1085 	if (!bridge_vlan) {
1086 		err = -ENOMEM;
1087 		goto err_bridge_vlan_get;
1088 	}
1089 
1090 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1091 		 &bridge_vlan->port_vlan_list);
1092 
1093 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1094 				 bridge_port->dev, extack);
1095 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1096 
1097 	return 0;
1098 
1099 err_bridge_vlan_get:
1100 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1101 err_port_vid_stp_set:
1102 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1103 err_port_vid_learning_set:
1104 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1105 	return err;
1106 }
1107 
1108 void
1109 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1110 {
1111 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1112 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1113 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1114 	struct mlxsw_sp_bridge_port *bridge_port;
1115 	u16 vid = mlxsw_sp_port_vlan->vid;
1116 	bool last_port, last_vlan;
1117 
1118 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1119 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1120 		return;
1121 
1122 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1123 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1124 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1125 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1126 
1127 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1128 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1129 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1130 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1131 	if (last_port)
1132 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1133 					       bridge_port,
1134 					       mlxsw_sp_fid_index(fid));
1135 	if (last_vlan)
1136 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1137 
1138 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1139 
1140 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1141 	mlxsw_sp_port_vlan->bridge_port = NULL;
1142 }
1143 
1144 static int
1145 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1146 			      struct mlxsw_sp_bridge_port *bridge_port,
1147 			      u16 vid, bool is_untagged, bool is_pvid,
1148 			      struct netlink_ext_ack *extack)
1149 {
1150 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1151 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1152 	u16 old_pvid = mlxsw_sp_port->pvid;
1153 	u16 proto;
1154 	int err;
1155 
1156 	/* The only valid scenario in which a port-vlan already exists, is if
1157 	 * the VLAN flags were changed and the port-vlan is associated with the
1158 	 * correct bridge port
1159 	 */
1160 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1161 	if (mlxsw_sp_port_vlan &&
1162 	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1163 		return -EEXIST;
1164 
1165 	if (!mlxsw_sp_port_vlan) {
1166 		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1167 							       vid);
1168 		if (IS_ERR(mlxsw_sp_port_vlan))
1169 			return PTR_ERR(mlxsw_sp_port_vlan);
1170 	}
1171 
1172 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1173 				     is_untagged);
1174 	if (err)
1175 		goto err_port_vlan_set;
1176 
1177 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1178 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1179 	if (err)
1180 		goto err_port_pvid_set;
1181 
1182 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1183 					     extack);
1184 	if (err)
1185 		goto err_port_vlan_bridge_join;
1186 
1187 	return 0;
1188 
1189 err_port_vlan_bridge_join:
1190 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1191 err_port_pvid_set:
1192 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1193 err_port_vlan_set:
1194 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1195 	return err;
1196 }
1197 
1198 static int
1199 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1200 				const struct net_device *br_dev,
1201 				const struct switchdev_obj_port_vlan *vlan)
1202 {
1203 	u16 pvid;
1204 
1205 	pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
1206 	if (!pvid)
1207 		return 0;
1208 
1209 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1210 		if (vlan->vid != pvid) {
1211 			netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1212 			return -EBUSY;
1213 		}
1214 	} else {
1215 		if (vlan->vid == pvid) {
1216 			netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1217 			return -EBUSY;
1218 		}
1219 	}
1220 
1221 	return 0;
1222 }
1223 
1224 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1225 				   const struct switchdev_obj_port_vlan *vlan,
1226 				   struct netlink_ext_ack *extack)
1227 {
1228 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1229 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1230 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1231 	struct net_device *orig_dev = vlan->obj.orig_dev;
1232 	struct mlxsw_sp_bridge_port *bridge_port;
1233 
1234 	if (netif_is_bridge_master(orig_dev)) {
1235 		int err = 0;
1236 
1237 		if (br_vlan_enabled(orig_dev))
1238 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1239 							      orig_dev, vlan);
1240 		if (!err)
1241 			err = -EOPNOTSUPP;
1242 		return err;
1243 	}
1244 
1245 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1246 	if (WARN_ON(!bridge_port))
1247 		return -EINVAL;
1248 
1249 	if (!bridge_port->bridge_device->vlan_enabled)
1250 		return 0;
1251 
1252 	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1253 					     vlan->vid, flag_untagged,
1254 					     flag_pvid, extack);
1255 }
1256 
1257 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1258 {
1259 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1260 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1261 }
1262 
1263 static int
1264 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1265 			       struct mlxsw_sp_bridge_port *bridge_port,
1266 			       u16 fid_index)
1267 {
1268 	bool lagged = bridge_port->lagged;
1269 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1270 	u16 system_port;
1271 
1272 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1273 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1274 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1275 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1276 
1277 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1278 }
1279 
1280 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1281 {
1282 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1283 			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1284 }
1285 
1286 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1287 {
1288 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1289 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1290 }
1291 
1292 static int
1293 mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
1294 			     const char *mac, u16 fid, __be32 addr, bool adding)
1295 {
1296 	char *sfd_pl;
1297 	u8 num_rec;
1298 	u32 uip;
1299 	int err;
1300 
1301 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1302 	if (!sfd_pl)
1303 		return -ENOMEM;
1304 
1305 	uip = be32_to_cpu(addr);
1306 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1307 	mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
1308 				      mlxsw_sp_sfd_rec_policy(dynamic), mac,
1309 				      fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
1310 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1311 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1312 	if (err)
1313 		goto out;
1314 
1315 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1316 		err = -EBUSY;
1317 
1318 out:
1319 	kfree(sfd_pl);
1320 	return err;
1321 }
1322 
1323 static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
1324 						  const char *mac, u16 fid,
1325 						  u32 kvdl_index, bool adding)
1326 {
1327 	char *sfd_pl;
1328 	u8 num_rec;
1329 	int err;
1330 
1331 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1332 	if (!sfd_pl)
1333 		return -ENOMEM;
1334 
1335 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1336 	mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
1337 				      MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
1338 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1339 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1340 	if (err)
1341 		goto out;
1342 
1343 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1344 		err = -EBUSY;
1345 
1346 out:
1347 	kfree(sfd_pl);
1348 	return err;
1349 }
1350 
1351 static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
1352 					    const char *mac, u16 fid,
1353 					    const struct in6_addr *addr)
1354 {
1355 	u32 kvdl_index;
1356 	int err;
1357 
1358 	err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
1359 	if (err)
1360 		return err;
1361 
1362 	err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
1363 						     kvdl_index, true);
1364 	if (err)
1365 		goto err_sfd_write;
1366 
1367 	err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
1368 	if (err)
1369 		/* Replace can fail only for creating new mapping, so removing
1370 		 * the FDB entry in the error path is OK.
1371 		 */
1372 		goto err_addr_replace;
1373 
1374 	return 0;
1375 
1376 err_addr_replace:
1377 	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
1378 					       false);
1379 err_sfd_write:
1380 	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1381 	return err;
1382 }
1383 
1384 static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
1385 					     const char *mac, u16 fid,
1386 					     const struct in6_addr *addr)
1387 {
1388 	mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
1389 	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
1390 	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1391 }
1392 
1393 static int
1394 mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
1395 			     u16 fid, const struct in6_addr *addr, bool adding)
1396 {
1397 	if (adding)
1398 		return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
1399 							addr);
1400 
1401 	mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
1402 	return 0;
1403 }
1404 
1405 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1406 					  const char *mac, u16 fid,
1407 					  enum mlxsw_sp_l3proto proto,
1408 					  const union mlxsw_sp_l3addr *addr,
1409 					  bool adding, bool dynamic)
1410 {
1411 	switch (proto) {
1412 	case MLXSW_SP_L3_PROTO_IPV4:
1413 		return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
1414 						    addr->addr4, adding);
1415 	case MLXSW_SP_L3_PROTO_IPV6:
1416 		return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
1417 						    &addr->addr6, adding);
1418 	default:
1419 		WARN_ON(1);
1420 		return -EOPNOTSUPP;
1421 	}
1422 }
1423 
1424 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1425 				     const char *mac, u16 fid, bool adding,
1426 				     enum mlxsw_reg_sfd_rec_action action,
1427 				     enum mlxsw_reg_sfd_rec_policy policy)
1428 {
1429 	char *sfd_pl;
1430 	u8 num_rec;
1431 	int err;
1432 
1433 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1434 	if (!sfd_pl)
1435 		return -ENOMEM;
1436 
1437 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1438 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1439 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1440 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1441 	if (err)
1442 		goto out;
1443 
1444 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1445 		err = -EBUSY;
1446 
1447 out:
1448 	kfree(sfd_pl);
1449 	return err;
1450 }
1451 
1452 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1453 				   const char *mac, u16 fid, bool adding,
1454 				   bool dynamic)
1455 {
1456 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1457 					 MLXSW_REG_SFD_REC_ACTION_NOP,
1458 					 mlxsw_sp_sfd_rec_policy(dynamic));
1459 }
1460 
1461 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1462 			bool adding)
1463 {
1464 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1465 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1466 					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1467 }
1468 
1469 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1470 				       const char *mac, u16 fid, u16 lag_vid,
1471 				       bool adding, bool dynamic)
1472 {
1473 	char *sfd_pl;
1474 	u8 num_rec;
1475 	int err;
1476 
1477 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1478 	if (!sfd_pl)
1479 		return -ENOMEM;
1480 
1481 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1482 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1483 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1484 				  lag_vid, lag_id);
1485 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1486 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1487 	if (err)
1488 		goto out;
1489 
1490 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1491 		err = -EBUSY;
1492 
1493 out:
1494 	kfree(sfd_pl);
1495 	return err;
1496 }
1497 
1498 static int
1499 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1500 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1501 {
1502 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1503 	struct net_device *orig_dev = fdb_info->info.dev;
1504 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1505 	struct mlxsw_sp_bridge_device *bridge_device;
1506 	struct mlxsw_sp_bridge_port *bridge_port;
1507 	u16 fid_index, vid;
1508 
1509 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1510 	if (!bridge_port)
1511 		return -EINVAL;
1512 
1513 	bridge_device = bridge_port->bridge_device;
1514 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1515 							       bridge_device,
1516 							       fdb_info->vid);
1517 	if (!mlxsw_sp_port_vlan)
1518 		return 0;
1519 
1520 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1521 	vid = mlxsw_sp_port_vlan->vid;
1522 
1523 	if (!bridge_port->lagged)
1524 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1525 					       bridge_port->system_port,
1526 					       fdb_info->addr, fid_index,
1527 					       adding, false);
1528 	else
1529 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1530 						   bridge_port->lag_id,
1531 						   fdb_info->addr, fid_index,
1532 						   vid, adding, false);
1533 }
1534 
1535 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1536 				u16 fid, u16 mid_idx, bool adding)
1537 {
1538 	char *sfd_pl;
1539 	u8 num_rec;
1540 	int err;
1541 
1542 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1543 	if (!sfd_pl)
1544 		return -ENOMEM;
1545 
1546 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1547 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1548 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1549 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1550 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1551 	if (err)
1552 		goto out;
1553 
1554 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1555 		err = -EBUSY;
1556 
1557 out:
1558 	kfree(sfd_pl);
1559 	return err;
1560 }
1561 
1562 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1563 					 long *ports_bitmap,
1564 					 bool set_router_port)
1565 {
1566 	char *smid2_pl;
1567 	int err, i;
1568 
1569 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
1570 	if (!smid2_pl)
1571 		return -ENOMEM;
1572 
1573 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
1574 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1575 		if (mlxsw_sp->ports[i])
1576 			mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
1577 	}
1578 
1579 	mlxsw_reg_smid2_port_mask_set(smid2_pl,
1580 				      mlxsw_sp_router_port(mlxsw_sp), 1);
1581 
1582 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1583 		mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
1584 
1585 	mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
1586 				 set_router_port);
1587 
1588 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
1589 	kfree(smid2_pl);
1590 	return err;
1591 }
1592 
1593 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1594 				  u16 mid_idx, bool add)
1595 {
1596 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1597 	char *smid2_pl;
1598 	int err;
1599 
1600 	smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
1601 	if (!smid2_pl)
1602 		return -ENOMEM;
1603 
1604 	mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
1605 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
1606 	kfree(smid2_pl);
1607 	return err;
1608 }
1609 
1610 static struct
1611 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1612 				const unsigned char *addr,
1613 				u16 fid)
1614 {
1615 	struct mlxsw_sp_mid *mid;
1616 
1617 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1618 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1619 			return mid;
1620 	}
1621 	return NULL;
1622 }
1623 
1624 static void
1625 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1626 				      struct mlxsw_sp_bridge_port *bridge_port,
1627 				      unsigned long *ports_bitmap)
1628 {
1629 	struct mlxsw_sp_port *mlxsw_sp_port;
1630 	u64 max_lag_members, i;
1631 	int lag_id;
1632 
1633 	if (!bridge_port->lagged) {
1634 		set_bit(bridge_port->system_port, ports_bitmap);
1635 	} else {
1636 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1637 						     MAX_LAG_MEMBERS);
1638 		lag_id = bridge_port->lag_id;
1639 		for (i = 0; i < max_lag_members; i++) {
1640 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1641 								 lag_id, i);
1642 			if (mlxsw_sp_port)
1643 				set_bit(mlxsw_sp_port->local_port,
1644 					ports_bitmap);
1645 		}
1646 	}
1647 }
1648 
1649 static void
1650 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1651 				struct mlxsw_sp_bridge_device *bridge_device,
1652 				struct mlxsw_sp *mlxsw_sp)
1653 {
1654 	struct mlxsw_sp_bridge_port *bridge_port;
1655 
1656 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1657 		if (bridge_port->mrouter) {
1658 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1659 							      bridge_port,
1660 							      flood_bitmap);
1661 		}
1662 	}
1663 }
1664 
1665 static bool
1666 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1667 			    struct mlxsw_sp_mid *mid,
1668 			    struct mlxsw_sp_bridge_device *bridge_device)
1669 {
1670 	long *flood_bitmap;
1671 	int num_of_ports;
1672 	u16 mid_idx;
1673 	int err;
1674 
1675 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1676 				      MLXSW_SP_MID_MAX);
1677 	if (mid_idx == MLXSW_SP_MID_MAX)
1678 		return false;
1679 
1680 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1681 	flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
1682 	if (!flood_bitmap)
1683 		return false;
1684 
1685 	bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
1686 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1687 
1688 	mid->mid = mid_idx;
1689 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1690 					    bridge_device->mrouter);
1691 	bitmap_free(flood_bitmap);
1692 	if (err)
1693 		return false;
1694 
1695 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1696 				   true);
1697 	if (err)
1698 		return false;
1699 
1700 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1701 	mid->in_hw = true;
1702 	return true;
1703 }
1704 
1705 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1706 					struct mlxsw_sp_mid *mid)
1707 {
1708 	if (!mid->in_hw)
1709 		return 0;
1710 
1711 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1712 	mid->in_hw = false;
1713 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1714 				    false);
1715 }
1716 
1717 static struct
1718 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1719 				  struct mlxsw_sp_bridge_device *bridge_device,
1720 				  const unsigned char *addr,
1721 				  u16 fid)
1722 {
1723 	struct mlxsw_sp_mid *mid;
1724 
1725 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1726 	if (!mid)
1727 		return NULL;
1728 
1729 	mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
1730 					  GFP_KERNEL);
1731 	if (!mid->ports_in_mid)
1732 		goto err_ports_in_mid_alloc;
1733 
1734 	ether_addr_copy(mid->addr, addr);
1735 	mid->fid = fid;
1736 	mid->in_hw = false;
1737 
1738 	if (!bridge_device->multicast_enabled)
1739 		goto out;
1740 
1741 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1742 		goto err_write_mdb_entry;
1743 
1744 out:
1745 	list_add_tail(&mid->list, &bridge_device->mids_list);
1746 	return mid;
1747 
1748 err_write_mdb_entry:
1749 	bitmap_free(mid->ports_in_mid);
1750 err_ports_in_mid_alloc:
1751 	kfree(mid);
1752 	return NULL;
1753 }
1754 
1755 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1756 					 struct mlxsw_sp_mid *mid)
1757 {
1758 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1759 	int err = 0;
1760 
1761 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1762 	if (bitmap_empty(mid->ports_in_mid,
1763 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1764 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1765 		list_del(&mid->list);
1766 		bitmap_free(mid->ports_in_mid);
1767 		kfree(mid);
1768 	}
1769 	return err;
1770 }
1771 
1772 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1773 				 const struct switchdev_obj_port_mdb *mdb)
1774 {
1775 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1776 	struct net_device *orig_dev = mdb->obj.orig_dev;
1777 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1778 	struct net_device *dev = mlxsw_sp_port->dev;
1779 	struct mlxsw_sp_bridge_device *bridge_device;
1780 	struct mlxsw_sp_bridge_port *bridge_port;
1781 	struct mlxsw_sp_mid *mid;
1782 	u16 fid_index;
1783 	int err = 0;
1784 
1785 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1786 	if (!bridge_port)
1787 		return 0;
1788 
1789 	bridge_device = bridge_port->bridge_device;
1790 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1791 							       bridge_device,
1792 							       mdb->vid);
1793 	if (!mlxsw_sp_port_vlan)
1794 		return 0;
1795 
1796 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1797 
1798 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1799 	if (!mid) {
1800 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1801 					  fid_index);
1802 		if (!mid) {
1803 			netdev_err(dev, "Unable to allocate MC group\n");
1804 			return -ENOMEM;
1805 		}
1806 	}
1807 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1808 
1809 	if (!bridge_device->multicast_enabled)
1810 		return 0;
1811 
1812 	if (bridge_port->mrouter)
1813 		return 0;
1814 
1815 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1816 	if (err) {
1817 		netdev_err(dev, "Unable to set SMID\n");
1818 		goto err_out;
1819 	}
1820 
1821 	return 0;
1822 
1823 err_out:
1824 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1825 	return err;
1826 }
1827 
1828 static void
1829 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1830 				   struct mlxsw_sp_bridge_device
1831 				   *bridge_device)
1832 {
1833 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1834 	struct mlxsw_sp_mid *mid;
1835 	bool mc_enabled;
1836 
1837 	mc_enabled = bridge_device->multicast_enabled;
1838 
1839 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1840 		if (mc_enabled)
1841 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1842 						    bridge_device);
1843 		else
1844 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1845 	}
1846 }
1847 
1848 static void
1849 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1850 				 struct mlxsw_sp_bridge_port *bridge_port,
1851 				 bool add)
1852 {
1853 	struct mlxsw_sp_bridge_device *bridge_device;
1854 	struct mlxsw_sp_mid *mid;
1855 
1856 	bridge_device = bridge_port->bridge_device;
1857 
1858 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1859 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1860 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1861 	}
1862 }
1863 
1864 static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
1865 				 const struct switchdev_obj *obj,
1866 				 struct netlink_ext_ack *extack)
1867 {
1868 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1869 	const struct switchdev_obj_port_vlan *vlan;
1870 	int err = 0;
1871 
1872 	switch (obj->id) {
1873 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1874 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1875 
1876 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
1877 
1878 		/* The event is emitted before the changes are actually
1879 		 * applied to the bridge. Therefore schedule the respin
1880 		 * call for later, so that the respin logic sees the
1881 		 * updated bridge state.
1882 		 */
1883 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1884 		break;
1885 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1886 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1887 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1888 		break;
1889 	default:
1890 		err = -EOPNOTSUPP;
1891 		break;
1892 	}
1893 
1894 	return err;
1895 }
1896 
1897 static void
1898 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1899 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1900 {
1901 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1902 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1903 	u16 proto;
1904 
1905 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1906 	if (WARN_ON(!mlxsw_sp_port_vlan))
1907 		return;
1908 
1909 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1910 	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1911 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1912 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1913 	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1914 }
1915 
1916 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1917 				   const struct switchdev_obj_port_vlan *vlan)
1918 {
1919 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1920 	struct net_device *orig_dev = vlan->obj.orig_dev;
1921 	struct mlxsw_sp_bridge_port *bridge_port;
1922 
1923 	if (netif_is_bridge_master(orig_dev))
1924 		return -EOPNOTSUPP;
1925 
1926 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1927 	if (WARN_ON(!bridge_port))
1928 		return -EINVAL;
1929 
1930 	if (!bridge_port->bridge_device->vlan_enabled)
1931 		return 0;
1932 
1933 	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
1934 
1935 	return 0;
1936 }
1937 
1938 static int
1939 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1940 			struct mlxsw_sp_bridge_port *bridge_port,
1941 			struct mlxsw_sp_mid *mid)
1942 {
1943 	struct net_device *dev = mlxsw_sp_port->dev;
1944 	int err;
1945 
1946 	if (bridge_port->bridge_device->multicast_enabled &&
1947 	    !bridge_port->mrouter) {
1948 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1949 		if (err)
1950 			netdev_err(dev, "Unable to remove port from SMID\n");
1951 	}
1952 
1953 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1954 	if (err)
1955 		netdev_err(dev, "Unable to remove MC SFD\n");
1956 
1957 	return err;
1958 }
1959 
1960 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1961 				 const struct switchdev_obj_port_mdb *mdb)
1962 {
1963 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1964 	struct net_device *orig_dev = mdb->obj.orig_dev;
1965 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1966 	struct mlxsw_sp_bridge_device *bridge_device;
1967 	struct net_device *dev = mlxsw_sp_port->dev;
1968 	struct mlxsw_sp_bridge_port *bridge_port;
1969 	struct mlxsw_sp_mid *mid;
1970 	u16 fid_index;
1971 
1972 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1973 	if (!bridge_port)
1974 		return 0;
1975 
1976 	bridge_device = bridge_port->bridge_device;
1977 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1978 							       bridge_device,
1979 							       mdb->vid);
1980 	if (!mlxsw_sp_port_vlan)
1981 		return 0;
1982 
1983 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1984 
1985 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1986 	if (!mid) {
1987 		netdev_err(dev, "Unable to remove port from MC DB\n");
1988 		return -EINVAL;
1989 	}
1990 
1991 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1992 }
1993 
1994 static void
1995 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1996 			       struct mlxsw_sp_bridge_port *bridge_port)
1997 {
1998 	struct mlxsw_sp_bridge_device *bridge_device;
1999 	struct mlxsw_sp_mid *mid, *tmp;
2000 
2001 	bridge_device = bridge_port->bridge_device;
2002 
2003 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
2004 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
2005 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
2006 						mid);
2007 		} else if (bridge_device->multicast_enabled &&
2008 			   bridge_port->mrouter) {
2009 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
2010 		}
2011 	}
2012 }
2013 
2014 static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
2015 				 const struct switchdev_obj *obj)
2016 {
2017 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2018 	int err = 0;
2019 
2020 	switch (obj->id) {
2021 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2022 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
2023 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
2024 		break;
2025 	case SWITCHDEV_OBJ_ID_PORT_MDB:
2026 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
2027 					    SWITCHDEV_OBJ_PORT_MDB(obj));
2028 		break;
2029 	default:
2030 		err = -EOPNOTSUPP;
2031 		break;
2032 	}
2033 
2034 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2035 
2036 	return err;
2037 }
2038 
2039 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
2040 						   u16 lag_id)
2041 {
2042 	struct mlxsw_sp_port *mlxsw_sp_port;
2043 	u64 max_lag_members;
2044 	int i;
2045 
2046 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
2047 					     MAX_LAG_MEMBERS);
2048 	for (i = 0; i < max_lag_members; i++) {
2049 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2050 		if (mlxsw_sp_port)
2051 			return mlxsw_sp_port;
2052 	}
2053 	return NULL;
2054 }
2055 
2056 static int
2057 mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
2058 				     struct mlxsw_sp_port *mlxsw_sp_port,
2059 				     struct netlink_ext_ack *extack)
2060 {
2061 	if (is_vlan_dev(bridge_port->dev)) {
2062 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
2063 		return -EINVAL;
2064 	}
2065 
2066 	/* Port is no longer usable as a router interface */
2067 	if (mlxsw_sp_port->default_vlan->fid)
2068 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
2069 
2070 	return 0;
2071 }
2072 
2073 static int
2074 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2075 				struct mlxsw_sp_bridge_port *bridge_port,
2076 				struct mlxsw_sp_port *mlxsw_sp_port,
2077 				struct netlink_ext_ack *extack)
2078 {
2079 	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2080 						    extack);
2081 }
2082 
2083 static void
2084 mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2085 {
2086 	/* Make sure untagged frames are allowed to ingress */
2087 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
2088 			       ETH_P_8021Q);
2089 }
2090 
2091 static void
2092 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2093 				 struct mlxsw_sp_bridge_port *bridge_port,
2094 				 struct mlxsw_sp_port *mlxsw_sp_port)
2095 {
2096 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2097 }
2098 
2099 static int
2100 mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2101 				      const struct net_device *vxlan_dev,
2102 				      u16 vid, u16 ethertype,
2103 				      struct netlink_ext_ack *extack)
2104 {
2105 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2106 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2107 	struct mlxsw_sp_nve_params params = {
2108 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2109 		.vni = vxlan->cfg.vni,
2110 		.dev = vxlan_dev,
2111 		.ethertype = ethertype,
2112 	};
2113 	struct mlxsw_sp_fid *fid;
2114 	int err;
2115 
2116 	/* If the VLAN is 0, we need to find the VLAN that is configured as
2117 	 * PVID and egress untagged on the bridge port of the VxLAN device.
2118 	 * It is possible no such VLAN exists
2119 	 */
2120 	if (!vid) {
2121 		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2122 		if (err || !vid)
2123 			return err;
2124 	}
2125 
2126 	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2127 	if (IS_ERR(fid)) {
2128 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2129 		return PTR_ERR(fid);
2130 	}
2131 
2132 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2133 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2134 		err = -EINVAL;
2135 		goto err_vni_exists;
2136 	}
2137 
2138 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2139 	if (err)
2140 		goto err_nve_fid_enable;
2141 
2142 	return 0;
2143 
2144 err_nve_fid_enable:
2145 err_vni_exists:
2146 	mlxsw_sp_fid_put(fid);
2147 	return err;
2148 }
2149 
2150 static int
2151 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2152 				 const struct net_device *vxlan_dev, u16 vid,
2153 				 struct netlink_ext_ack *extack)
2154 {
2155 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2156 						     vid, ETH_P_8021Q, extack);
2157 }
2158 
2159 static struct net_device *
2160 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2161 {
2162 	struct net_device *dev;
2163 	struct list_head *iter;
2164 
2165 	netdev_for_each_lower_dev(br_dev, dev, iter) {
2166 		u16 pvid;
2167 		int err;
2168 
2169 		if (!netif_is_vxlan(dev))
2170 			continue;
2171 
2172 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2173 		if (err || pvid != vid)
2174 			continue;
2175 
2176 		return dev;
2177 	}
2178 
2179 	return NULL;
2180 }
2181 
2182 static struct mlxsw_sp_fid *
2183 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2184 			      u16 vid, struct netlink_ext_ack *extack)
2185 {
2186 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2187 
2188 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2189 }
2190 
2191 static struct mlxsw_sp_fid *
2192 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2193 				 u16 vid)
2194 {
2195 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2196 
2197 	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2198 }
2199 
2200 static u16
2201 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2202 			      const struct mlxsw_sp_fid *fid)
2203 {
2204 	return mlxsw_sp_fid_8021q_vid(fid);
2205 }
2206 
2207 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2208 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2209 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2210 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2211 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2212 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2213 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2214 };
2215 
2216 static bool
2217 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2218 			   const struct net_device *br_dev)
2219 {
2220 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2221 
2222 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2223 			    list) {
2224 		if (mlxsw_sp_port_vlan->bridge_port &&
2225 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2226 		    br_dev)
2227 			return true;
2228 	}
2229 
2230 	return false;
2231 }
2232 
2233 static int
2234 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2235 				struct mlxsw_sp_bridge_port *bridge_port,
2236 				struct mlxsw_sp_port *mlxsw_sp_port,
2237 				struct netlink_ext_ack *extack)
2238 {
2239 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2240 	struct net_device *dev = bridge_port->dev;
2241 	u16 vid;
2242 
2243 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2244 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2245 	if (WARN_ON(!mlxsw_sp_port_vlan))
2246 		return -EINVAL;
2247 
2248 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2249 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2250 		return -EINVAL;
2251 	}
2252 
2253 	/* Port is no longer usable as a router interface */
2254 	if (mlxsw_sp_port_vlan->fid)
2255 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2256 
2257 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2258 					      extack);
2259 }
2260 
2261 static void
2262 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2263 				 struct mlxsw_sp_bridge_port *bridge_port,
2264 				 struct mlxsw_sp_port *mlxsw_sp_port)
2265 {
2266 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2267 	struct net_device *dev = bridge_port->dev;
2268 	u16 vid;
2269 
2270 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2271 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2272 	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2273 		return;
2274 
2275 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2276 }
2277 
2278 static int
2279 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2280 				 const struct net_device *vxlan_dev, u16 vid,
2281 				 struct netlink_ext_ack *extack)
2282 {
2283 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2284 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2285 	struct mlxsw_sp_nve_params params = {
2286 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2287 		.vni = vxlan->cfg.vni,
2288 		.dev = vxlan_dev,
2289 		.ethertype = ETH_P_8021Q,
2290 	};
2291 	struct mlxsw_sp_fid *fid;
2292 	int err;
2293 
2294 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2295 	if (IS_ERR(fid)) {
2296 		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2297 		return -EINVAL;
2298 	}
2299 
2300 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2301 		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2302 		err = -EINVAL;
2303 		goto err_vni_exists;
2304 	}
2305 
2306 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2307 	if (err)
2308 		goto err_nve_fid_enable;
2309 
2310 	return 0;
2311 
2312 err_nve_fid_enable:
2313 err_vni_exists:
2314 	mlxsw_sp_fid_put(fid);
2315 	return err;
2316 }
2317 
2318 static struct mlxsw_sp_fid *
2319 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2320 			      u16 vid, struct netlink_ext_ack *extack)
2321 {
2322 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2323 
2324 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2325 }
2326 
2327 static struct mlxsw_sp_fid *
2328 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2329 				 u16 vid)
2330 {
2331 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2332 
2333 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2334 	if (vid)
2335 		return NULL;
2336 
2337 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2338 }
2339 
2340 static u16
2341 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2342 			      const struct mlxsw_sp_fid *fid)
2343 {
2344 	return 0;
2345 }
2346 
2347 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2348 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2349 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2350 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2351 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2352 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2353 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2354 };
2355 
2356 static int
2357 mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2358 				 struct mlxsw_sp_bridge_port *bridge_port,
2359 				 struct mlxsw_sp_port *mlxsw_sp_port,
2360 				 struct netlink_ext_ack *extack)
2361 {
2362 	int err;
2363 
2364 	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2365 	if (err)
2366 		return err;
2367 
2368 	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2369 						   extack);
2370 	if (err)
2371 		goto err_bridge_vlan_aware_port_join;
2372 
2373 	return 0;
2374 
2375 err_bridge_vlan_aware_port_join:
2376 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2377 	return err;
2378 }
2379 
2380 static void
2381 mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2382 				  struct mlxsw_sp_bridge_port *bridge_port,
2383 				  struct mlxsw_sp_port *mlxsw_sp_port)
2384 {
2385 	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2386 	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2387 }
2388 
2389 static int
2390 mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2391 				  const struct net_device *vxlan_dev, u16 vid,
2392 				  struct netlink_ext_ack *extack)
2393 {
2394 	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2395 						     vid, ETH_P_8021AD, extack);
2396 }
2397 
2398 static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
2399 	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2400 	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2401 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2402 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2403 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2404 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2405 };
2406 
2407 static int
2408 mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2409 				  struct mlxsw_sp_bridge_port *bridge_port,
2410 				  struct mlxsw_sp_port *mlxsw_sp_port,
2411 				  struct netlink_ext_ack *extack)
2412 {
2413 	int err;
2414 
2415 	/* The EtherType of decapsulated packets is determined at the egress
2416 	 * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
2417 	 * co-exist.
2418 	 */
2419 	err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
2420 	if (err)
2421 		return err;
2422 
2423 	err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
2424 					       mlxsw_sp_port, extack);
2425 	if (err)
2426 		goto err_bridge_8021ad_port_join;
2427 
2428 	return 0;
2429 
2430 err_bridge_8021ad_port_join:
2431 	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2432 	return err;
2433 }
2434 
2435 static void
2436 mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2437 				   struct mlxsw_sp_bridge_port *bridge_port,
2438 				   struct mlxsw_sp_port *mlxsw_sp_port)
2439 {
2440 	mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
2441 					  mlxsw_sp_port);
2442 	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2443 }
2444 
2445 static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
2446 	.port_join	= mlxsw_sp2_bridge_8021ad_port_join,
2447 	.port_leave	= mlxsw_sp2_bridge_8021ad_port_leave,
2448 	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2449 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2450 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2451 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2452 };
2453 
2454 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2455 			      struct net_device *brport_dev,
2456 			      struct net_device *br_dev,
2457 			      struct netlink_ext_ack *extack)
2458 {
2459 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2460 	struct mlxsw_sp_bridge_device *bridge_device;
2461 	struct mlxsw_sp_bridge_port *bridge_port;
2462 	int err;
2463 
2464 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2465 					       extack);
2466 	if (IS_ERR(bridge_port))
2467 		return PTR_ERR(bridge_port);
2468 	bridge_device = bridge_port->bridge_device;
2469 
2470 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2471 					    mlxsw_sp_port, extack);
2472 	if (err)
2473 		goto err_port_join;
2474 
2475 	return 0;
2476 
2477 err_port_join:
2478 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2479 	return err;
2480 }
2481 
2482 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2483 				struct net_device *brport_dev,
2484 				struct net_device *br_dev)
2485 {
2486 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2487 	struct mlxsw_sp_bridge_device *bridge_device;
2488 	struct mlxsw_sp_bridge_port *bridge_port;
2489 
2490 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2491 	if (!bridge_device)
2492 		return;
2493 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2494 	if (!bridge_port)
2495 		return;
2496 
2497 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2498 				       mlxsw_sp_port);
2499 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2500 }
2501 
2502 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2503 			       const struct net_device *br_dev,
2504 			       const struct net_device *vxlan_dev, u16 vid,
2505 			       struct netlink_ext_ack *extack)
2506 {
2507 	struct mlxsw_sp_bridge_device *bridge_device;
2508 
2509 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2510 	if (WARN_ON(!bridge_device))
2511 		return -EINVAL;
2512 
2513 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2514 					      extack);
2515 }
2516 
2517 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2518 				 const struct net_device *vxlan_dev)
2519 {
2520 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2521 	struct mlxsw_sp_fid *fid;
2522 
2523 	/* If the VxLAN device is down, then the FID does not have a VNI */
2524 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2525 	if (!fid)
2526 		return;
2527 
2528 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2529 	/* Drop both the reference we just took during lookup and the reference
2530 	 * the VXLAN device took.
2531 	 */
2532 	mlxsw_sp_fid_put(fid);
2533 	mlxsw_sp_fid_put(fid);
2534 }
2535 
2536 static void
2537 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2538 				      enum mlxsw_sp_l3proto *proto,
2539 				      union mlxsw_sp_l3addr *addr)
2540 {
2541 	if (vxlan_addr->sa.sa_family == AF_INET) {
2542 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2543 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2544 	} else {
2545 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2546 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2547 	}
2548 }
2549 
2550 static void
2551 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2552 				      const union mlxsw_sp_l3addr *addr,
2553 				      union vxlan_addr *vxlan_addr)
2554 {
2555 	switch (proto) {
2556 	case MLXSW_SP_L3_PROTO_IPV4:
2557 		vxlan_addr->sa.sa_family = AF_INET;
2558 		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2559 		break;
2560 	case MLXSW_SP_L3_PROTO_IPV6:
2561 		vxlan_addr->sa.sa_family = AF_INET6;
2562 		vxlan_addr->sin6.sin6_addr = addr->addr6;
2563 		break;
2564 	}
2565 }
2566 
2567 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2568 					      const char *mac,
2569 					      enum mlxsw_sp_l3proto proto,
2570 					      union mlxsw_sp_l3addr *addr,
2571 					      __be32 vni, bool adding)
2572 {
2573 	struct switchdev_notifier_vxlan_fdb_info info;
2574 	struct vxlan_dev *vxlan = netdev_priv(dev);
2575 	enum switchdev_notifier_type type;
2576 
2577 	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2578 			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2579 	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2580 	info.remote_port = vxlan->cfg.dst_port;
2581 	info.remote_vni = vni;
2582 	info.remote_ifindex = 0;
2583 	ether_addr_copy(info.eth_addr, mac);
2584 	info.vni = vni;
2585 	info.offloaded = adding;
2586 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2587 }
2588 
2589 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2590 					    const char *mac,
2591 					    enum mlxsw_sp_l3proto proto,
2592 					    union mlxsw_sp_l3addr *addr,
2593 					    __be32 vni,
2594 					    bool adding)
2595 {
2596 	if (netif_is_vxlan(dev))
2597 		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2598 						  adding);
2599 }
2600 
2601 static void
2602 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2603 			    const char *mac, u16 vid,
2604 			    struct net_device *dev, bool offloaded)
2605 {
2606 	struct switchdev_notifier_fdb_info info = {};
2607 
2608 	info.addr = mac;
2609 	info.vid = vid;
2610 	info.offloaded = offloaded;
2611 	call_switchdev_notifiers(type, dev, &info.info, NULL);
2612 }
2613 
2614 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2615 					    char *sfn_pl, int rec_index,
2616 					    bool adding)
2617 {
2618 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2619 	struct mlxsw_sp_bridge_device *bridge_device;
2620 	struct mlxsw_sp_bridge_port *bridge_port;
2621 	struct mlxsw_sp_port *mlxsw_sp_port;
2622 	enum switchdev_notifier_type type;
2623 	char mac[ETH_ALEN];
2624 	u16 local_port;
2625 	u16 vid, fid;
2626 	bool do_notification = true;
2627 	int err;
2628 
2629 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2630 
2631 	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2632 		return;
2633 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2634 	if (!mlxsw_sp_port) {
2635 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2636 		goto just_remove;
2637 	}
2638 
2639 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2640 		goto just_remove;
2641 
2642 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2643 	if (!mlxsw_sp_port_vlan) {
2644 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2645 		goto just_remove;
2646 	}
2647 
2648 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2649 	if (!bridge_port) {
2650 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2651 		goto just_remove;
2652 	}
2653 
2654 	bridge_device = bridge_port->bridge_device;
2655 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2656 
2657 do_fdb_op:
2658 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2659 				      adding, true);
2660 	if (err) {
2661 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2662 		return;
2663 	}
2664 
2665 	if (!do_notification)
2666 		return;
2667 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2668 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2669 
2670 	return;
2671 
2672 just_remove:
2673 	adding = false;
2674 	do_notification = false;
2675 	goto do_fdb_op;
2676 }
2677 
2678 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2679 						char *sfn_pl, int rec_index,
2680 						bool adding)
2681 {
2682 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2683 	struct mlxsw_sp_bridge_device *bridge_device;
2684 	struct mlxsw_sp_bridge_port *bridge_port;
2685 	struct mlxsw_sp_port *mlxsw_sp_port;
2686 	enum switchdev_notifier_type type;
2687 	char mac[ETH_ALEN];
2688 	u16 lag_vid = 0;
2689 	u16 lag_id;
2690 	u16 vid, fid;
2691 	bool do_notification = true;
2692 	int err;
2693 
2694 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2695 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2696 	if (!mlxsw_sp_port) {
2697 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2698 		goto just_remove;
2699 	}
2700 
2701 	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2702 		goto just_remove;
2703 
2704 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2705 	if (!mlxsw_sp_port_vlan) {
2706 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2707 		goto just_remove;
2708 	}
2709 
2710 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2711 	if (!bridge_port) {
2712 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2713 		goto just_remove;
2714 	}
2715 
2716 	bridge_device = bridge_port->bridge_device;
2717 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2718 	lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2719 		  mlxsw_sp_port_vlan->vid : 0;
2720 
2721 do_fdb_op:
2722 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2723 					  adding, true);
2724 	if (err) {
2725 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2726 		return;
2727 	}
2728 
2729 	if (!do_notification)
2730 		return;
2731 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2732 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2733 
2734 	return;
2735 
2736 just_remove:
2737 	adding = false;
2738 	do_notification = false;
2739 	goto do_fdb_op;
2740 }
2741 
2742 static int
2743 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2744 					    const struct mlxsw_sp_fid *fid,
2745 					    bool adding,
2746 					    struct net_device **nve_dev,
2747 					    u16 *p_vid, __be32 *p_vni)
2748 {
2749 	struct mlxsw_sp_bridge_device *bridge_device;
2750 	struct net_device *br_dev, *dev;
2751 	int nve_ifindex;
2752 	int err;
2753 
2754 	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2755 	if (err)
2756 		return err;
2757 
2758 	err = mlxsw_sp_fid_vni(fid, p_vni);
2759 	if (err)
2760 		return err;
2761 
2762 	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
2763 	if (!dev)
2764 		return -EINVAL;
2765 	*nve_dev = dev;
2766 
2767 	if (!netif_running(dev))
2768 		return -EINVAL;
2769 
2770 	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2771 		return -EINVAL;
2772 
2773 	if (adding && netif_is_vxlan(dev)) {
2774 		struct vxlan_dev *vxlan = netdev_priv(dev);
2775 
2776 		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2777 			return -EINVAL;
2778 	}
2779 
2780 	br_dev = netdev_master_upper_dev_get(dev);
2781 	if (!br_dev)
2782 		return -EINVAL;
2783 
2784 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2785 	if (!bridge_device)
2786 		return -EINVAL;
2787 
2788 	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2789 
2790 	return 0;
2791 }
2792 
2793 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2794 						      char *sfn_pl,
2795 						      int rec_index,
2796 						      bool adding)
2797 {
2798 	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2799 	enum switchdev_notifier_type type;
2800 	struct net_device *nve_dev;
2801 	union mlxsw_sp_l3addr addr;
2802 	struct mlxsw_sp_fid *fid;
2803 	char mac[ETH_ALEN];
2804 	u16 fid_index, vid;
2805 	__be32 vni;
2806 	u32 uip;
2807 	int err;
2808 
2809 	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2810 				       &uip, &sfn_proto);
2811 
2812 	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2813 	if (!fid)
2814 		goto err_fid_lookup;
2815 
2816 	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2817 					      (enum mlxsw_sp_l3proto) sfn_proto,
2818 					      &addr);
2819 	if (err)
2820 		goto err_ip_resolve;
2821 
2822 	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2823 							  &nve_dev, &vid, &vni);
2824 	if (err)
2825 		goto err_fdb_process;
2826 
2827 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2828 					     (enum mlxsw_sp_l3proto) sfn_proto,
2829 					     &addr, adding, true);
2830 	if (err)
2831 		goto err_fdb_op;
2832 
2833 	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2834 					(enum mlxsw_sp_l3proto) sfn_proto,
2835 					&addr, vni, adding);
2836 
2837 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2838 			SWITCHDEV_FDB_DEL_TO_BRIDGE;
2839 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2840 
2841 	mlxsw_sp_fid_put(fid);
2842 
2843 	return;
2844 
2845 err_fdb_op:
2846 err_fdb_process:
2847 err_ip_resolve:
2848 	mlxsw_sp_fid_put(fid);
2849 err_fid_lookup:
2850 	/* Remove an FDB entry in case we cannot process it. Otherwise the
2851 	 * device will keep sending the same notification over and over again.
2852 	 */
2853 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2854 				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2855 				       false, true);
2856 }
2857 
2858 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2859 					    char *sfn_pl, int rec_index)
2860 {
2861 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2862 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2863 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2864 						rec_index, true);
2865 		break;
2866 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2867 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2868 						rec_index, false);
2869 		break;
2870 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2871 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2872 						    rec_index, true);
2873 		break;
2874 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2875 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2876 						    rec_index, false);
2877 		break;
2878 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2879 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2880 							  rec_index, true);
2881 		break;
2882 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2883 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2884 							  rec_index, false);
2885 		break;
2886 	}
2887 }
2888 
2889 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
2890 					      bool no_delay)
2891 {
2892 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2893 	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
2894 
2895 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2896 			       msecs_to_jiffies(interval));
2897 }
2898 
2899 #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
2900 
2901 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2902 {
2903 	struct mlxsw_sp_bridge *bridge;
2904 	struct mlxsw_sp *mlxsw_sp;
2905 	char *sfn_pl;
2906 	int queries;
2907 	u8 num_rec;
2908 	int i;
2909 	int err;
2910 
2911 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2912 	if (!sfn_pl)
2913 		return;
2914 
2915 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2916 	mlxsw_sp = bridge->mlxsw_sp;
2917 
2918 	rtnl_lock();
2919 	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
2920 	while (queries > 0) {
2921 		mlxsw_reg_sfn_pack(sfn_pl);
2922 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2923 		if (err) {
2924 			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2925 			goto out;
2926 		}
2927 		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2928 		for (i = 0; i < num_rec; i++)
2929 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2930 		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
2931 			goto out;
2932 		queries--;
2933 	}
2934 
2935 out:
2936 	rtnl_unlock();
2937 	kfree(sfn_pl);
2938 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
2939 }
2940 
2941 struct mlxsw_sp_switchdev_event_work {
2942 	struct work_struct work;
2943 	union {
2944 		struct switchdev_notifier_fdb_info fdb_info;
2945 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2946 	};
2947 	struct net_device *dev;
2948 	unsigned long event;
2949 };
2950 
2951 static void
2952 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2953 					  struct mlxsw_sp_switchdev_event_work *
2954 					  switchdev_work,
2955 					  struct mlxsw_sp_fid *fid, __be32 vni)
2956 {
2957 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2958 	struct switchdev_notifier_fdb_info *fdb_info;
2959 	struct net_device *dev = switchdev_work->dev;
2960 	enum mlxsw_sp_l3proto proto;
2961 	union mlxsw_sp_l3addr addr;
2962 	int err;
2963 
2964 	fdb_info = &switchdev_work->fdb_info;
2965 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2966 	if (err)
2967 		return;
2968 
2969 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2970 					      &proto, &addr);
2971 
2972 	switch (switchdev_work->event) {
2973 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2974 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2975 						     vxlan_fdb_info.eth_addr,
2976 						     mlxsw_sp_fid_index(fid),
2977 						     proto, &addr, true, false);
2978 		if (err)
2979 			return;
2980 		vxlan_fdb_info.offloaded = true;
2981 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2982 					 &vxlan_fdb_info.info, NULL);
2983 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2984 					    vxlan_fdb_info.eth_addr,
2985 					    fdb_info->vid, dev, true);
2986 		break;
2987 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2988 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2989 						     vxlan_fdb_info.eth_addr,
2990 						     mlxsw_sp_fid_index(fid),
2991 						     proto, &addr, false,
2992 						     false);
2993 		vxlan_fdb_info.offloaded = false;
2994 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2995 					 &vxlan_fdb_info.info, NULL);
2996 		break;
2997 	}
2998 }
2999 
3000 static void
3001 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
3002 					switchdev_work)
3003 {
3004 	struct mlxsw_sp_bridge_device *bridge_device;
3005 	struct net_device *dev = switchdev_work->dev;
3006 	struct net_device *br_dev;
3007 	struct mlxsw_sp *mlxsw_sp;
3008 	struct mlxsw_sp_fid *fid;
3009 	__be32 vni;
3010 	int err;
3011 
3012 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
3013 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
3014 		return;
3015 
3016 	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
3017 	    (!switchdev_work->fdb_info.added_by_user ||
3018 	     switchdev_work->fdb_info.is_local))
3019 		return;
3020 
3021 	if (!netif_running(dev))
3022 		return;
3023 	br_dev = netdev_master_upper_dev_get(dev);
3024 	if (!br_dev)
3025 		return;
3026 	if (!netif_is_bridge_master(br_dev))
3027 		return;
3028 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3029 	if (!mlxsw_sp)
3030 		return;
3031 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3032 	if (!bridge_device)
3033 		return;
3034 
3035 	fid = bridge_device->ops->fid_lookup(bridge_device,
3036 					     switchdev_work->fdb_info.vid);
3037 	if (!fid)
3038 		return;
3039 
3040 	err = mlxsw_sp_fid_vni(fid, &vni);
3041 	if (err)
3042 		goto out;
3043 
3044 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
3045 						  vni);
3046 
3047 out:
3048 	mlxsw_sp_fid_put(fid);
3049 }
3050 
3051 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
3052 {
3053 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3054 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3055 	struct net_device *dev = switchdev_work->dev;
3056 	struct switchdev_notifier_fdb_info *fdb_info;
3057 	struct mlxsw_sp_port *mlxsw_sp_port;
3058 	int err;
3059 
3060 	rtnl_lock();
3061 	if (netif_is_vxlan(dev)) {
3062 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
3063 		goto out;
3064 	}
3065 
3066 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3067 	if (!mlxsw_sp_port)
3068 		goto out;
3069 
3070 	switch (switchdev_work->event) {
3071 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3072 		fdb_info = &switchdev_work->fdb_info;
3073 		if (!fdb_info->added_by_user || fdb_info->is_local)
3074 			break;
3075 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
3076 		if (err)
3077 			break;
3078 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3079 					    fdb_info->addr,
3080 					    fdb_info->vid, dev, true);
3081 		break;
3082 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3083 		fdb_info = &switchdev_work->fdb_info;
3084 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
3085 		break;
3086 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3087 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3088 		/* These events are only used to potentially update an existing
3089 		 * SPAN mirror.
3090 		 */
3091 		break;
3092 	}
3093 
3094 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
3095 
3096 out:
3097 	rtnl_unlock();
3098 	kfree(switchdev_work->fdb_info.addr);
3099 	kfree(switchdev_work);
3100 	dev_put(dev);
3101 }
3102 
3103 static void
3104 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
3105 				 struct mlxsw_sp_switchdev_event_work *
3106 				 switchdev_work)
3107 {
3108 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3109 	struct mlxsw_sp_bridge_device *bridge_device;
3110 	struct net_device *dev = switchdev_work->dev;
3111 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3112 	enum mlxsw_sp_l3proto proto;
3113 	union mlxsw_sp_l3addr addr;
3114 	struct net_device *br_dev;
3115 	struct mlxsw_sp_fid *fid;
3116 	u16 vid;
3117 	int err;
3118 
3119 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3120 	br_dev = netdev_master_upper_dev_get(dev);
3121 
3122 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3123 	if (!bridge_device)
3124 		return;
3125 
3126 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3127 	if (!fid)
3128 		return;
3129 
3130 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3131 					      &proto, &addr);
3132 
3133 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3134 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
3135 		if (err) {
3136 			mlxsw_sp_fid_put(fid);
3137 			return;
3138 		}
3139 		vxlan_fdb_info->offloaded = true;
3140 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3141 					 &vxlan_fdb_info->info, NULL);
3142 		mlxsw_sp_fid_put(fid);
3143 		return;
3144 	}
3145 
3146 	/* The device has a single FDB table, whereas Linux has two - one
3147 	 * in the bridge driver and another in the VxLAN driver. We only
3148 	 * program an entry to the device if the MAC points to the VxLAN
3149 	 * device in the bridge's FDB table
3150 	 */
3151 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3152 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3153 		goto err_br_fdb_find;
3154 
3155 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3156 					     mlxsw_sp_fid_index(fid), proto,
3157 					     &addr, true, false);
3158 	if (err)
3159 		goto err_fdb_tunnel_uc_op;
3160 	vxlan_fdb_info->offloaded = true;
3161 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3162 				 &vxlan_fdb_info->info, NULL);
3163 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3164 				    vxlan_fdb_info->eth_addr, vid, dev, true);
3165 
3166 	mlxsw_sp_fid_put(fid);
3167 
3168 	return;
3169 
3170 err_fdb_tunnel_uc_op:
3171 err_br_fdb_find:
3172 	mlxsw_sp_fid_put(fid);
3173 }
3174 
3175 static void
3176 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3177 				 struct mlxsw_sp_switchdev_event_work *
3178 				 switchdev_work)
3179 {
3180 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3181 	struct mlxsw_sp_bridge_device *bridge_device;
3182 	struct net_device *dev = switchdev_work->dev;
3183 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3184 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3185 	enum mlxsw_sp_l3proto proto;
3186 	union mlxsw_sp_l3addr addr;
3187 	struct mlxsw_sp_fid *fid;
3188 	u16 vid;
3189 
3190 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3191 
3192 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3193 	if (!bridge_device)
3194 		return;
3195 
3196 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3197 	if (!fid)
3198 		return;
3199 
3200 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3201 					      &proto, &addr);
3202 
3203 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3204 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3205 		mlxsw_sp_fid_put(fid);
3206 		return;
3207 	}
3208 
3209 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3210 				       mlxsw_sp_fid_index(fid), proto, &addr,
3211 				       false, false);
3212 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3213 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3214 				    vxlan_fdb_info->eth_addr, vid, dev, false);
3215 
3216 	mlxsw_sp_fid_put(fid);
3217 }
3218 
3219 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3220 {
3221 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3222 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3223 	struct net_device *dev = switchdev_work->dev;
3224 	struct mlxsw_sp *mlxsw_sp;
3225 	struct net_device *br_dev;
3226 
3227 	rtnl_lock();
3228 
3229 	if (!netif_running(dev))
3230 		goto out;
3231 	br_dev = netdev_master_upper_dev_get(dev);
3232 	if (!br_dev)
3233 		goto out;
3234 	if (!netif_is_bridge_master(br_dev))
3235 		goto out;
3236 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3237 	if (!mlxsw_sp)
3238 		goto out;
3239 
3240 	switch (switchdev_work->event) {
3241 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3242 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3243 		break;
3244 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3245 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3246 		break;
3247 	}
3248 
3249 out:
3250 	rtnl_unlock();
3251 	kfree(switchdev_work);
3252 	dev_put(dev);
3253 }
3254 
3255 static int
3256 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3257 				      switchdev_work,
3258 				      struct switchdev_notifier_info *info)
3259 {
3260 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3261 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3262 	struct vxlan_config *cfg = &vxlan->cfg;
3263 	struct netlink_ext_ack *extack;
3264 
3265 	extack = switchdev_notifier_info_to_extack(info);
3266 	vxlan_fdb_info = container_of(info,
3267 				      struct switchdev_notifier_vxlan_fdb_info,
3268 				      info);
3269 
3270 	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3271 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3272 		return -EOPNOTSUPP;
3273 	}
3274 	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3275 	    vxlan_fdb_info->vni != cfg->vni) {
3276 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3277 		return -EOPNOTSUPP;
3278 	}
3279 	if (vxlan_fdb_info->remote_ifindex) {
3280 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3281 		return -EOPNOTSUPP;
3282 	}
3283 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3284 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3285 		return -EOPNOTSUPP;
3286 	}
3287 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3288 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3289 		return -EOPNOTSUPP;
3290 	}
3291 
3292 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3293 
3294 	return 0;
3295 }
3296 
3297 /* Called under rcu_read_lock() */
3298 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3299 				    unsigned long event, void *ptr)
3300 {
3301 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3302 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3303 	struct switchdev_notifier_fdb_info *fdb_info;
3304 	struct switchdev_notifier_info *info = ptr;
3305 	struct net_device *br_dev;
3306 	int err;
3307 
3308 	if (event == SWITCHDEV_PORT_ATTR_SET) {
3309 		err = switchdev_handle_port_attr_set(dev, ptr,
3310 						     mlxsw_sp_port_dev_check,
3311 						     mlxsw_sp_port_attr_set);
3312 		return notifier_from_errno(err);
3313 	}
3314 
3315 	/* Tunnel devices are not our uppers, so check their master instead */
3316 	br_dev = netdev_master_upper_dev_get_rcu(dev);
3317 	if (!br_dev)
3318 		return NOTIFY_DONE;
3319 	if (!netif_is_bridge_master(br_dev))
3320 		return NOTIFY_DONE;
3321 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3322 		return NOTIFY_DONE;
3323 
3324 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3325 	if (!switchdev_work)
3326 		return NOTIFY_BAD;
3327 
3328 	switchdev_work->dev = dev;
3329 	switchdev_work->event = event;
3330 
3331 	switch (event) {
3332 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3333 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3334 	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3335 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3336 		fdb_info = container_of(info,
3337 					struct switchdev_notifier_fdb_info,
3338 					info);
3339 		INIT_WORK(&switchdev_work->work,
3340 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3341 		memcpy(&switchdev_work->fdb_info, ptr,
3342 		       sizeof(switchdev_work->fdb_info));
3343 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3344 		if (!switchdev_work->fdb_info.addr)
3345 			goto err_addr_alloc;
3346 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3347 				fdb_info->addr);
3348 		/* Take a reference on the device. This can be either
3349 		 * upper device containig mlxsw_sp_port or just a
3350 		 * mlxsw_sp_port
3351 		 */
3352 		dev_hold(dev);
3353 		break;
3354 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3355 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3356 		INIT_WORK(&switchdev_work->work,
3357 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3358 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3359 							    info);
3360 		if (err)
3361 			goto err_vxlan_work_prepare;
3362 		dev_hold(dev);
3363 		break;
3364 	default:
3365 		kfree(switchdev_work);
3366 		return NOTIFY_DONE;
3367 	}
3368 
3369 	mlxsw_core_schedule_work(&switchdev_work->work);
3370 
3371 	return NOTIFY_DONE;
3372 
3373 err_vxlan_work_prepare:
3374 err_addr_alloc:
3375 	kfree(switchdev_work);
3376 	return NOTIFY_BAD;
3377 }
3378 
3379 struct notifier_block mlxsw_sp_switchdev_notifier = {
3380 	.notifier_call = mlxsw_sp_switchdev_event,
3381 };
3382 
3383 static int
3384 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3385 				  struct mlxsw_sp_bridge_device *bridge_device,
3386 				  const struct net_device *vxlan_dev, u16 vid,
3387 				  bool flag_untagged, bool flag_pvid,
3388 				  struct netlink_ext_ack *extack)
3389 {
3390 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3391 	__be32 vni = vxlan->cfg.vni;
3392 	struct mlxsw_sp_fid *fid;
3393 	u16 old_vid;
3394 	int err;
3395 
3396 	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3397 	 * VxLAN devices. Note that we get this notification before the VLAN is
3398 	 * actually added to the bridge's database, so it is not possible for
3399 	 * the lookup function to return 'vxlan_dev'
3400 	 */
3401 	if (flag_untagged && flag_pvid &&
3402 	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3403 		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3404 		return -EINVAL;
3405 	}
3406 
3407 	if (!netif_running(vxlan_dev))
3408 		return 0;
3409 
3410 	/* First case: FID is not associated with this VNI, but the new VLAN
3411 	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3412 	 * it exists
3413 	 */
3414 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3415 	if (!fid) {
3416 		if (!flag_untagged || !flag_pvid)
3417 			return 0;
3418 		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3419 						      vid, extack);
3420 	}
3421 
3422 	/* Second case: FID is associated with the VNI and the VLAN associated
3423 	 * with the FID is the same as the notified VLAN. This means the flags
3424 	 * (PVID / egress untagged) were toggled and that NVE should be
3425 	 * disabled on the FID
3426 	 */
3427 	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3428 	if (vid == old_vid) {
3429 		if (WARN_ON(flag_untagged && flag_pvid)) {
3430 			mlxsw_sp_fid_put(fid);
3431 			return -EINVAL;
3432 		}
3433 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3434 		mlxsw_sp_fid_put(fid);
3435 		return 0;
3436 	}
3437 
3438 	/* Third case: A new VLAN was configured on the VxLAN device, but this
3439 	 * VLAN is not PVID, so there is nothing to do.
3440 	 */
3441 	if (!flag_pvid) {
3442 		mlxsw_sp_fid_put(fid);
3443 		return 0;
3444 	}
3445 
3446 	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3447 	 * mapped to the VNI should be unmapped
3448 	 */
3449 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3450 	mlxsw_sp_fid_put(fid);
3451 
3452 	/* Fifth case: The new VLAN is also egress untagged, which means the
3453 	 * VLAN needs to be mapped to the VNI
3454 	 */
3455 	if (!flag_untagged)
3456 		return 0;
3457 
3458 	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3459 	if (err)
3460 		goto err_vxlan_join;
3461 
3462 	return 0;
3463 
3464 err_vxlan_join:
3465 	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3466 	return err;
3467 }
3468 
3469 static void
3470 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3471 				  struct mlxsw_sp_bridge_device *bridge_device,
3472 				  const struct net_device *vxlan_dev, u16 vid)
3473 {
3474 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3475 	__be32 vni = vxlan->cfg.vni;
3476 	struct mlxsw_sp_fid *fid;
3477 
3478 	if (!netif_running(vxlan_dev))
3479 		return;
3480 
3481 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3482 	if (!fid)
3483 		return;
3484 
3485 	/* A different VLAN than the one mapped to the VNI is deleted */
3486 	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3487 		goto out;
3488 
3489 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3490 
3491 out:
3492 	mlxsw_sp_fid_put(fid);
3493 }
3494 
3495 static int
3496 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3497 				   struct switchdev_notifier_port_obj_info *
3498 				   port_obj_info)
3499 {
3500 	struct switchdev_obj_port_vlan *vlan =
3501 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3502 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3503 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3504 	struct mlxsw_sp_bridge_device *bridge_device;
3505 	struct netlink_ext_ack *extack;
3506 	struct mlxsw_sp *mlxsw_sp;
3507 	struct net_device *br_dev;
3508 
3509 	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3510 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3511 	if (!br_dev)
3512 		return 0;
3513 
3514 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3515 	if (!mlxsw_sp)
3516 		return 0;
3517 
3518 	port_obj_info->handled = true;
3519 
3520 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3521 	if (!bridge_device)
3522 		return -EINVAL;
3523 
3524 	if (!bridge_device->vlan_enabled)
3525 		return 0;
3526 
3527 	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3528 						 vxlan_dev, vlan->vid,
3529 						 flag_untagged,
3530 						 flag_pvid, extack);
3531 }
3532 
3533 static void
3534 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3535 				   struct switchdev_notifier_port_obj_info *
3536 				   port_obj_info)
3537 {
3538 	struct switchdev_obj_port_vlan *vlan =
3539 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3540 	struct mlxsw_sp_bridge_device *bridge_device;
3541 	struct mlxsw_sp *mlxsw_sp;
3542 	struct net_device *br_dev;
3543 
3544 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3545 	if (!br_dev)
3546 		return;
3547 
3548 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3549 	if (!mlxsw_sp)
3550 		return;
3551 
3552 	port_obj_info->handled = true;
3553 
3554 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3555 	if (!bridge_device)
3556 		return;
3557 
3558 	if (!bridge_device->vlan_enabled)
3559 		return;
3560 
3561 	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3562 					  vlan->vid);
3563 }
3564 
3565 static int
3566 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3567 					struct switchdev_notifier_port_obj_info *
3568 					port_obj_info)
3569 {
3570 	int err = 0;
3571 
3572 	switch (port_obj_info->obj->id) {
3573 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3574 		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3575 							 port_obj_info);
3576 		break;
3577 	default:
3578 		break;
3579 	}
3580 
3581 	return err;
3582 }
3583 
3584 static void
3585 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3586 					struct switchdev_notifier_port_obj_info *
3587 					port_obj_info)
3588 {
3589 	switch (port_obj_info->obj->id) {
3590 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3591 		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3592 		break;
3593 	default:
3594 		break;
3595 	}
3596 }
3597 
3598 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3599 					     unsigned long event, void *ptr)
3600 {
3601 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3602 	int err = 0;
3603 
3604 	switch (event) {
3605 	case SWITCHDEV_PORT_OBJ_ADD:
3606 		if (netif_is_vxlan(dev))
3607 			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3608 		else
3609 			err = switchdev_handle_port_obj_add(dev, ptr,
3610 							mlxsw_sp_port_dev_check,
3611 							mlxsw_sp_port_obj_add);
3612 		return notifier_from_errno(err);
3613 	case SWITCHDEV_PORT_OBJ_DEL:
3614 		if (netif_is_vxlan(dev))
3615 			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3616 		else
3617 			err = switchdev_handle_port_obj_del(dev, ptr,
3618 							mlxsw_sp_port_dev_check,
3619 							mlxsw_sp_port_obj_del);
3620 		return notifier_from_errno(err);
3621 	case SWITCHDEV_PORT_ATTR_SET:
3622 		err = switchdev_handle_port_attr_set(dev, ptr,
3623 						     mlxsw_sp_port_dev_check,
3624 						     mlxsw_sp_port_attr_set);
3625 		return notifier_from_errno(err);
3626 	}
3627 
3628 	return NOTIFY_DONE;
3629 }
3630 
3631 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3632 	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3633 };
3634 
3635 u8
3636 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3637 {
3638 	return bridge_port->stp_state;
3639 }
3640 
3641 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3642 {
3643 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3644 	struct notifier_block *nb;
3645 	int err;
3646 
3647 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3648 	if (err) {
3649 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3650 		return err;
3651 	}
3652 
3653 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3654 	if (err) {
3655 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3656 		return err;
3657 	}
3658 
3659 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3660 	err = register_switchdev_blocking_notifier(nb);
3661 	if (err) {
3662 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3663 		goto err_register_switchdev_blocking_notifier;
3664 	}
3665 
3666 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3667 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3668 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
3669 	return 0;
3670 
3671 err_register_switchdev_blocking_notifier:
3672 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3673 	return err;
3674 }
3675 
3676 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3677 {
3678 	struct notifier_block *nb;
3679 
3680 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3681 
3682 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3683 	unregister_switchdev_blocking_notifier(nb);
3684 
3685 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3686 }
3687 
3688 static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3689 {
3690 	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
3691 }
3692 
3693 const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
3694 	.init	= mlxsw_sp1_switchdev_init,
3695 };
3696 
3697 static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3698 {
3699 	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
3700 }
3701 
3702 const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
3703 	.init	= mlxsw_sp2_switchdev_init,
3704 };
3705 
3706 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3707 {
3708 	struct mlxsw_sp_bridge *bridge;
3709 
3710 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3711 	if (!bridge)
3712 		return -ENOMEM;
3713 	mlxsw_sp->bridge = bridge;
3714 	bridge->mlxsw_sp = mlxsw_sp;
3715 
3716 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3717 
3718 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3719 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3720 
3721 	mlxsw_sp->switchdev_ops->init(mlxsw_sp);
3722 
3723 	return mlxsw_sp_fdb_init(mlxsw_sp);
3724 }
3725 
3726 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3727 {
3728 	mlxsw_sp_fdb_fini(mlxsw_sp);
3729 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3730 	kfree(mlxsw_sp->bridge);
3731 }
3732 
3733