xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c (revision 02ff58dcf70ad7d11b01523dc404166ed11021da)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19 
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 };
45 
46 struct mlxsw_sp_bridge_device {
47 	struct net_device *dev;
48 	struct list_head list;
49 	struct list_head ports_list;
50 	struct list_head mids_list;
51 	u8 vlan_enabled:1,
52 	   multicast_enabled:1,
53 	   mrouter:1;
54 	const struct mlxsw_sp_bridge_ops *ops;
55 };
56 
57 struct mlxsw_sp_bridge_port {
58 	struct net_device *dev;
59 	struct mlxsw_sp_bridge_device *bridge_device;
60 	struct list_head list;
61 	struct list_head vlans_list;
62 	unsigned int ref_count;
63 	u8 stp_state;
64 	unsigned long flags;
65 	bool mrouter;
66 	bool lagged;
67 	union {
68 		u16 lag_id;
69 		u16 system_port;
70 	};
71 };
72 
73 struct mlxsw_sp_bridge_vlan {
74 	struct list_head list;
75 	struct list_head port_vlan_list;
76 	u16 vid;
77 };
78 
79 struct mlxsw_sp_bridge_ops {
80 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 			 struct mlxsw_sp_bridge_port *bridge_port,
82 			 struct mlxsw_sp_port *mlxsw_sp_port,
83 			 struct netlink_ext_ack *extack);
84 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 			   struct mlxsw_sp_bridge_port *bridge_port,
86 			   struct mlxsw_sp_port *mlxsw_sp_port);
87 	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88 			  const struct net_device *vxlan_dev, u16 vid,
89 			  struct netlink_ext_ack *extack);
90 	struct mlxsw_sp_fid *
91 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
92 			   u16 vid);
93 	struct mlxsw_sp_fid *
94 		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
95 			      u16 vid);
96 	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
97 		       const struct mlxsw_sp_fid *fid);
98 };
99 
100 static int
101 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
102 			       struct mlxsw_sp_bridge_port *bridge_port,
103 			       u16 fid_index);
104 
105 static void
106 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
107 			       struct mlxsw_sp_bridge_port *bridge_port);
108 
109 static void
110 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
111 				   struct mlxsw_sp_bridge_device
112 				   *bridge_device);
113 
114 static void
115 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
116 				 struct mlxsw_sp_bridge_port *bridge_port,
117 				 bool add);
118 
119 static struct mlxsw_sp_bridge_device *
120 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
121 			    const struct net_device *br_dev)
122 {
123 	struct mlxsw_sp_bridge_device *bridge_device;
124 
125 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
126 		if (bridge_device->dev == br_dev)
127 			return bridge_device;
128 
129 	return NULL;
130 }
131 
132 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
133 					 const struct net_device *br_dev)
134 {
135 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
136 }
137 
138 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
139 						    void *data)
140 {
141 	struct mlxsw_sp *mlxsw_sp = data;
142 
143 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
144 	return 0;
145 }
146 
147 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
148 						struct net_device *dev)
149 {
150 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
151 	netdev_walk_all_upper_dev_rcu(dev,
152 				      mlxsw_sp_bridge_device_upper_rif_destroy,
153 				      mlxsw_sp);
154 }
155 
156 static struct mlxsw_sp_bridge_device *
157 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
158 			      struct net_device *br_dev)
159 {
160 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
161 	struct mlxsw_sp_bridge_device *bridge_device;
162 	bool vlan_enabled = br_vlan_enabled(br_dev);
163 
164 	if (vlan_enabled && bridge->vlan_enabled_exists) {
165 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
166 		return ERR_PTR(-EINVAL);
167 	}
168 
169 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
170 	if (!bridge_device)
171 		return ERR_PTR(-ENOMEM);
172 
173 	bridge_device->dev = br_dev;
174 	bridge_device->vlan_enabled = vlan_enabled;
175 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
176 	bridge_device->mrouter = br_multicast_router(br_dev);
177 	INIT_LIST_HEAD(&bridge_device->ports_list);
178 	if (vlan_enabled) {
179 		bridge->vlan_enabled_exists = true;
180 		bridge_device->ops = bridge->bridge_8021q_ops;
181 	} else {
182 		bridge_device->ops = bridge->bridge_8021d_ops;
183 	}
184 	INIT_LIST_HEAD(&bridge_device->mids_list);
185 	list_add(&bridge_device->list, &bridge->bridges_list);
186 
187 	return bridge_device;
188 }
189 
190 static void
191 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
192 			       struct mlxsw_sp_bridge_device *bridge_device)
193 {
194 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
195 					    bridge_device->dev);
196 	list_del(&bridge_device->list);
197 	if (bridge_device->vlan_enabled)
198 		bridge->vlan_enabled_exists = false;
199 	WARN_ON(!list_empty(&bridge_device->ports_list));
200 	WARN_ON(!list_empty(&bridge_device->mids_list));
201 	kfree(bridge_device);
202 }
203 
204 static struct mlxsw_sp_bridge_device *
205 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
206 			   struct net_device *br_dev)
207 {
208 	struct mlxsw_sp_bridge_device *bridge_device;
209 
210 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
211 	if (bridge_device)
212 		return bridge_device;
213 
214 	return mlxsw_sp_bridge_device_create(bridge, br_dev);
215 }
216 
217 static void
218 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
219 			   struct mlxsw_sp_bridge_device *bridge_device)
220 {
221 	if (list_empty(&bridge_device->ports_list))
222 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
223 }
224 
225 static struct mlxsw_sp_bridge_port *
226 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
227 			    const struct net_device *brport_dev)
228 {
229 	struct mlxsw_sp_bridge_port *bridge_port;
230 
231 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
232 		if (bridge_port->dev == brport_dev)
233 			return bridge_port;
234 	}
235 
236 	return NULL;
237 }
238 
239 struct mlxsw_sp_bridge_port *
240 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
241 			  struct net_device *brport_dev)
242 {
243 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
244 	struct mlxsw_sp_bridge_device *bridge_device;
245 
246 	if (!br_dev)
247 		return NULL;
248 
249 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
250 	if (!bridge_device)
251 		return NULL;
252 
253 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
254 }
255 
256 static struct mlxsw_sp_bridge_port *
257 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
258 			    struct net_device *brport_dev)
259 {
260 	struct mlxsw_sp_bridge_port *bridge_port;
261 	struct mlxsw_sp_port *mlxsw_sp_port;
262 
263 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
264 	if (!bridge_port)
265 		return NULL;
266 
267 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
268 	bridge_port->lagged = mlxsw_sp_port->lagged;
269 	if (bridge_port->lagged)
270 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
271 	else
272 		bridge_port->system_port = mlxsw_sp_port->local_port;
273 	bridge_port->dev = brport_dev;
274 	bridge_port->bridge_device = bridge_device;
275 	bridge_port->stp_state = BR_STATE_DISABLED;
276 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
277 			     BR_MCAST_FLOOD;
278 	INIT_LIST_HEAD(&bridge_port->vlans_list);
279 	list_add(&bridge_port->list, &bridge_device->ports_list);
280 	bridge_port->ref_count = 1;
281 
282 	return bridge_port;
283 }
284 
285 static void
286 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
287 {
288 	list_del(&bridge_port->list);
289 	WARN_ON(!list_empty(&bridge_port->vlans_list));
290 	kfree(bridge_port);
291 }
292 
293 static bool
294 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
295 				    bridge_port)
296 {
297 	struct net_device *dev = bridge_port->dev;
298 	struct mlxsw_sp *mlxsw_sp;
299 
300 	if (is_vlan_dev(dev))
301 		mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
302 	else
303 		mlxsw_sp = mlxsw_sp_lower_get(dev);
304 
305 	/* In case ports were pulled from out of a bridged LAG, then
306 	 * it's possible the reference count isn't zero, yet the bridge
307 	 * port should be destroyed, as it's no longer an upper of ours.
308 	 */
309 	if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
310 		return true;
311 	else if (bridge_port->ref_count == 0)
312 		return true;
313 	else
314 		return false;
315 }
316 
317 static struct mlxsw_sp_bridge_port *
318 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
319 			 struct net_device *brport_dev)
320 {
321 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
322 	struct mlxsw_sp_bridge_device *bridge_device;
323 	struct mlxsw_sp_bridge_port *bridge_port;
324 	int err;
325 
326 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
327 	if (bridge_port) {
328 		bridge_port->ref_count++;
329 		return bridge_port;
330 	}
331 
332 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
333 	if (IS_ERR(bridge_device))
334 		return ERR_CAST(bridge_device);
335 
336 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
337 	if (!bridge_port) {
338 		err = -ENOMEM;
339 		goto err_bridge_port_create;
340 	}
341 
342 	return bridge_port;
343 
344 err_bridge_port_create:
345 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
346 	return ERR_PTR(err);
347 }
348 
349 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
350 				     struct mlxsw_sp_bridge_port *bridge_port)
351 {
352 	struct mlxsw_sp_bridge_device *bridge_device;
353 
354 	bridge_port->ref_count--;
355 	if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
356 		return;
357 	bridge_device = bridge_port->bridge_device;
358 	mlxsw_sp_bridge_port_destroy(bridge_port);
359 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
360 }
361 
362 static struct mlxsw_sp_port_vlan *
363 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
364 				  const struct mlxsw_sp_bridge_device *
365 				  bridge_device,
366 				  u16 vid)
367 {
368 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
369 
370 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
371 			    list) {
372 		if (!mlxsw_sp_port_vlan->bridge_port)
373 			continue;
374 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
375 		    bridge_device)
376 			continue;
377 		if (bridge_device->vlan_enabled &&
378 		    mlxsw_sp_port_vlan->vid != vid)
379 			continue;
380 		return mlxsw_sp_port_vlan;
381 	}
382 
383 	return NULL;
384 }
385 
386 static struct mlxsw_sp_port_vlan*
387 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
388 			       u16 fid_index)
389 {
390 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
391 
392 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
393 			    list) {
394 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
395 
396 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
397 			return mlxsw_sp_port_vlan;
398 	}
399 
400 	return NULL;
401 }
402 
403 static struct mlxsw_sp_bridge_vlan *
404 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
405 			  u16 vid)
406 {
407 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
408 
409 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
410 		if (bridge_vlan->vid == vid)
411 			return bridge_vlan;
412 	}
413 
414 	return NULL;
415 }
416 
417 static struct mlxsw_sp_bridge_vlan *
418 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
419 {
420 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
421 
422 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
423 	if (!bridge_vlan)
424 		return NULL;
425 
426 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
427 	bridge_vlan->vid = vid;
428 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
429 
430 	return bridge_vlan;
431 }
432 
433 static void
434 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
435 {
436 	list_del(&bridge_vlan->list);
437 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
438 	kfree(bridge_vlan);
439 }
440 
441 static struct mlxsw_sp_bridge_vlan *
442 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
443 {
444 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
445 
446 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
447 	if (bridge_vlan)
448 		return bridge_vlan;
449 
450 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
451 }
452 
453 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
454 {
455 	if (list_empty(&bridge_vlan->port_vlan_list))
456 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
457 }
458 
459 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
460 					   struct net_device *dev,
461 					   unsigned long *brport_flags)
462 {
463 	struct mlxsw_sp_bridge_port *bridge_port;
464 
465 	bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
466 	if (WARN_ON(!bridge_port))
467 		return;
468 
469 	memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
470 }
471 
472 static int mlxsw_sp_port_attr_get(struct net_device *dev,
473 				  struct switchdev_attr *attr)
474 {
475 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
476 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
477 
478 	switch (attr->id) {
479 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
480 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
481 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
482 		       attr->u.ppid.id_len);
483 		break;
484 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
485 		mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
486 					       &attr->u.brport_flags);
487 		break;
488 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
489 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
490 					       BR_MCAST_FLOOD;
491 		break;
492 	default:
493 		return -EOPNOTSUPP;
494 	}
495 
496 	return 0;
497 }
498 
499 static int
500 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
501 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
502 				  u8 state)
503 {
504 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
505 
506 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
507 			    bridge_vlan_node) {
508 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
509 			continue;
510 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
511 						 bridge_vlan->vid, state);
512 	}
513 
514 	return 0;
515 }
516 
517 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
518 					    struct switchdev_trans *trans,
519 					    struct net_device *orig_dev,
520 					    u8 state)
521 {
522 	struct mlxsw_sp_bridge_port *bridge_port;
523 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
524 	int err;
525 
526 	if (switchdev_trans_ph_prepare(trans))
527 		return 0;
528 
529 	/* It's possible we failed to enslave the port, yet this
530 	 * operation is executed due to it being deferred.
531 	 */
532 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
533 						orig_dev);
534 	if (!bridge_port)
535 		return 0;
536 
537 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
538 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
539 							bridge_vlan, state);
540 		if (err)
541 			goto err_port_bridge_vlan_stp_set;
542 	}
543 
544 	bridge_port->stp_state = state;
545 
546 	return 0;
547 
548 err_port_bridge_vlan_stp_set:
549 	list_for_each_entry_continue_reverse(bridge_vlan,
550 					     &bridge_port->vlans_list, list)
551 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
552 						  bridge_port->stp_state);
553 	return err;
554 }
555 
556 static int
557 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
558 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
559 				    enum mlxsw_sp_flood_type packet_type,
560 				    bool member)
561 {
562 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
563 
564 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
565 			    bridge_vlan_node) {
566 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
567 			continue;
568 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
569 					      packet_type,
570 					      mlxsw_sp_port->local_port,
571 					      member);
572 	}
573 
574 	return 0;
575 }
576 
577 static int
578 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
579 				     struct mlxsw_sp_bridge_port *bridge_port,
580 				     enum mlxsw_sp_flood_type packet_type,
581 				     bool member)
582 {
583 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
584 	int err;
585 
586 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
587 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
588 							  bridge_vlan,
589 							  packet_type,
590 							  member);
591 		if (err)
592 			goto err_port_bridge_vlan_flood_set;
593 	}
594 
595 	return 0;
596 
597 err_port_bridge_vlan_flood_set:
598 	list_for_each_entry_continue_reverse(bridge_vlan,
599 					     &bridge_port->vlans_list, list)
600 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
601 						    packet_type, !member);
602 	return err;
603 }
604 
605 static int
606 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
607 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
608 				       bool set)
609 {
610 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
611 	u16 vid = bridge_vlan->vid;
612 
613 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
614 			    bridge_vlan_node) {
615 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
616 			continue;
617 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
618 	}
619 
620 	return 0;
621 }
622 
623 static int
624 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
625 				  struct mlxsw_sp_bridge_port *bridge_port,
626 				  bool set)
627 {
628 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
629 	int err;
630 
631 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
632 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
633 							     bridge_vlan, set);
634 		if (err)
635 			goto err_port_bridge_vlan_learning_set;
636 	}
637 
638 	return 0;
639 
640 err_port_bridge_vlan_learning_set:
641 	list_for_each_entry_continue_reverse(bridge_vlan,
642 					     &bridge_port->vlans_list, list)
643 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
644 						       bridge_vlan, !set);
645 	return err;
646 }
647 
648 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
649 					   struct switchdev_trans *trans,
650 					   struct net_device *orig_dev,
651 					   unsigned long brport_flags)
652 {
653 	struct mlxsw_sp_bridge_port *bridge_port;
654 	int err;
655 
656 	if (switchdev_trans_ph_prepare(trans))
657 		return 0;
658 
659 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
660 						orig_dev);
661 	if (!bridge_port)
662 		return 0;
663 
664 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
665 						   MLXSW_SP_FLOOD_TYPE_UC,
666 						   brport_flags & BR_FLOOD);
667 	if (err)
668 		return err;
669 
670 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
671 						brport_flags & BR_LEARNING);
672 	if (err)
673 		return err;
674 
675 	if (bridge_port->bridge_device->multicast_enabled)
676 		goto out;
677 
678 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
679 						   MLXSW_SP_FLOOD_TYPE_MC,
680 						   brport_flags &
681 						   BR_MCAST_FLOOD);
682 	if (err)
683 		return err;
684 
685 out:
686 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
687 	return 0;
688 }
689 
690 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
691 {
692 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
693 	int err;
694 
695 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
696 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
697 	if (err)
698 		return err;
699 	mlxsw_sp->bridge->ageing_time = ageing_time;
700 	return 0;
701 }
702 
703 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
704 					    struct switchdev_trans *trans,
705 					    unsigned long ageing_clock_t)
706 {
707 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
708 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
709 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
710 
711 	if (switchdev_trans_ph_prepare(trans)) {
712 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
713 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
714 			return -ERANGE;
715 		else
716 			return 0;
717 	}
718 
719 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
720 }
721 
722 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
723 					  struct switchdev_trans *trans,
724 					  struct net_device *orig_dev,
725 					  bool vlan_enabled)
726 {
727 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
728 	struct mlxsw_sp_bridge_device *bridge_device;
729 
730 	if (!switchdev_trans_ph_prepare(trans))
731 		return 0;
732 
733 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
734 	if (WARN_ON(!bridge_device))
735 		return -EINVAL;
736 
737 	if (bridge_device->vlan_enabled == vlan_enabled)
738 		return 0;
739 
740 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
741 	return -EINVAL;
742 }
743 
744 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
745 					  struct switchdev_trans *trans,
746 					  struct net_device *orig_dev,
747 					  bool is_port_mrouter)
748 {
749 	struct mlxsw_sp_bridge_port *bridge_port;
750 	int err;
751 
752 	if (switchdev_trans_ph_prepare(trans))
753 		return 0;
754 
755 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
756 						orig_dev);
757 	if (!bridge_port)
758 		return 0;
759 
760 	if (!bridge_port->bridge_device->multicast_enabled)
761 		goto out;
762 
763 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
764 						   MLXSW_SP_FLOOD_TYPE_MC,
765 						   is_port_mrouter);
766 	if (err)
767 		return err;
768 
769 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
770 					 is_port_mrouter);
771 out:
772 	bridge_port->mrouter = is_port_mrouter;
773 	return 0;
774 }
775 
776 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
777 {
778 	const struct mlxsw_sp_bridge_device *bridge_device;
779 
780 	bridge_device = bridge_port->bridge_device;
781 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
782 					bridge_port->flags & BR_MCAST_FLOOD;
783 }
784 
785 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
786 					 struct switchdev_trans *trans,
787 					 struct net_device *orig_dev,
788 					 bool mc_disabled)
789 {
790 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
791 	struct mlxsw_sp_bridge_device *bridge_device;
792 	struct mlxsw_sp_bridge_port *bridge_port;
793 	int err;
794 
795 	if (switchdev_trans_ph_prepare(trans))
796 		return 0;
797 
798 	/* It's possible we failed to enslave the port, yet this
799 	 * operation is executed due to it being deferred.
800 	 */
801 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
802 	if (!bridge_device)
803 		return 0;
804 
805 	if (bridge_device->multicast_enabled != !mc_disabled) {
806 		bridge_device->multicast_enabled = !mc_disabled;
807 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
808 						   bridge_device);
809 	}
810 
811 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
812 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
813 		bool member = mlxsw_sp_mc_flood(bridge_port);
814 
815 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
816 							   bridge_port,
817 							   packet_type, member);
818 		if (err)
819 			return err;
820 	}
821 
822 	bridge_device->multicast_enabled = !mc_disabled;
823 
824 	return 0;
825 }
826 
827 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
828 					 u16 mid_idx, bool add)
829 {
830 	char *smid_pl;
831 	int err;
832 
833 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
834 	if (!smid_pl)
835 		return -ENOMEM;
836 
837 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
838 			    mlxsw_sp_router_port(mlxsw_sp), add);
839 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
840 	kfree(smid_pl);
841 	return err;
842 }
843 
844 static void
845 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
846 				   struct mlxsw_sp_bridge_device *bridge_device,
847 				   bool add)
848 {
849 	struct mlxsw_sp_mid *mid;
850 
851 	list_for_each_entry(mid, &bridge_device->mids_list, list)
852 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
853 }
854 
855 static int
856 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
857 				  struct switchdev_trans *trans,
858 				  struct net_device *orig_dev,
859 				  bool is_mrouter)
860 {
861 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
862 	struct mlxsw_sp_bridge_device *bridge_device;
863 
864 	if (switchdev_trans_ph_prepare(trans))
865 		return 0;
866 
867 	/* It's possible we failed to enslave the port, yet this
868 	 * operation is executed due to it being deferred.
869 	 */
870 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
871 	if (!bridge_device)
872 		return 0;
873 
874 	if (bridge_device->mrouter != is_mrouter)
875 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
876 						   is_mrouter);
877 	bridge_device->mrouter = is_mrouter;
878 	return 0;
879 }
880 
881 static int mlxsw_sp_port_attr_set(struct net_device *dev,
882 				  const struct switchdev_attr *attr,
883 				  struct switchdev_trans *trans)
884 {
885 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
886 	int err;
887 
888 	switch (attr->id) {
889 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
890 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
891 						       attr->orig_dev,
892 						       attr->u.stp_state);
893 		break;
894 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
895 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
896 						      attr->orig_dev,
897 						      attr->u.brport_flags);
898 		break;
899 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
900 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
901 						       attr->u.ageing_time);
902 		break;
903 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
904 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
905 						     attr->orig_dev,
906 						     attr->u.vlan_filtering);
907 		break;
908 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
909 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
910 						     attr->orig_dev,
911 						     attr->u.mrouter);
912 		break;
913 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
914 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
915 						    attr->orig_dev,
916 						    attr->u.mc_disabled);
917 		break;
918 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
919 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
920 							attr->orig_dev,
921 							attr->u.mrouter);
922 		break;
923 	default:
924 		err = -EOPNOTSUPP;
925 		break;
926 	}
927 
928 	if (switchdev_trans_ph_commit(trans))
929 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
930 
931 	return err;
932 }
933 
934 static int
935 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
936 			    struct mlxsw_sp_bridge_port *bridge_port)
937 {
938 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
939 	struct mlxsw_sp_bridge_device *bridge_device;
940 	u8 local_port = mlxsw_sp_port->local_port;
941 	u16 vid = mlxsw_sp_port_vlan->vid;
942 	struct mlxsw_sp_fid *fid;
943 	int err;
944 
945 	bridge_device = bridge_port->bridge_device;
946 	fid = bridge_device->ops->fid_get(bridge_device, vid);
947 	if (IS_ERR(fid))
948 		return PTR_ERR(fid);
949 
950 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
951 				     bridge_port->flags & BR_FLOOD);
952 	if (err)
953 		goto err_fid_uc_flood_set;
954 
955 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
956 				     mlxsw_sp_mc_flood(bridge_port));
957 	if (err)
958 		goto err_fid_mc_flood_set;
959 
960 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
961 				     true);
962 	if (err)
963 		goto err_fid_bc_flood_set;
964 
965 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
966 	if (err)
967 		goto err_fid_port_vid_map;
968 
969 	mlxsw_sp_port_vlan->fid = fid;
970 
971 	return 0;
972 
973 err_fid_port_vid_map:
974 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
975 err_fid_bc_flood_set:
976 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
977 err_fid_mc_flood_set:
978 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
979 err_fid_uc_flood_set:
980 	mlxsw_sp_fid_put(fid);
981 	return err;
982 }
983 
984 static void
985 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
986 {
987 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
988 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
989 	u8 local_port = mlxsw_sp_port->local_port;
990 	u16 vid = mlxsw_sp_port_vlan->vid;
991 
992 	mlxsw_sp_port_vlan->fid = NULL;
993 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
994 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
995 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
996 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
997 	mlxsw_sp_fid_put(fid);
998 }
999 
1000 static u16
1001 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1002 			     u16 vid, bool is_pvid)
1003 {
1004 	if (is_pvid)
1005 		return vid;
1006 	else if (mlxsw_sp_port->pvid == vid)
1007 		return 0;	/* Dis-allow untagged packets */
1008 	else
1009 		return mlxsw_sp_port->pvid;
1010 }
1011 
1012 static int
1013 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1014 			       struct mlxsw_sp_bridge_port *bridge_port)
1015 {
1016 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1017 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1018 	u16 vid = mlxsw_sp_port_vlan->vid;
1019 	int err;
1020 
1021 	/* No need to continue if only VLAN flags were changed */
1022 	if (mlxsw_sp_port_vlan->bridge_port) {
1023 		mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1024 		return 0;
1025 	}
1026 
1027 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
1028 	if (err)
1029 		return err;
1030 
1031 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1032 					     bridge_port->flags & BR_LEARNING);
1033 	if (err)
1034 		goto err_port_vid_learning_set;
1035 
1036 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1037 					bridge_port->stp_state);
1038 	if (err)
1039 		goto err_port_vid_stp_set;
1040 
1041 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1042 	if (!bridge_vlan) {
1043 		err = -ENOMEM;
1044 		goto err_bridge_vlan_get;
1045 	}
1046 
1047 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1048 		 &bridge_vlan->port_vlan_list);
1049 
1050 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1051 				 bridge_port->dev);
1052 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1053 
1054 	return 0;
1055 
1056 err_bridge_vlan_get:
1057 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1058 err_port_vid_stp_set:
1059 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1060 err_port_vid_learning_set:
1061 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1062 	return err;
1063 }
1064 
1065 void
1066 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1067 {
1068 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1069 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1070 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1071 	struct mlxsw_sp_bridge_port *bridge_port;
1072 	u16 vid = mlxsw_sp_port_vlan->vid;
1073 	bool last_port, last_vlan;
1074 
1075 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1076 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1077 		return;
1078 
1079 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1080 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1081 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1082 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1083 
1084 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1085 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1086 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1087 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1088 	if (last_port)
1089 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1090 					       bridge_port,
1091 					       mlxsw_sp_fid_index(fid));
1092 	if (last_vlan)
1093 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1094 
1095 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1096 
1097 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1098 	mlxsw_sp_port_vlan->bridge_port = NULL;
1099 }
1100 
1101 static int
1102 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1103 			      struct mlxsw_sp_bridge_port *bridge_port,
1104 			      u16 vid, bool is_untagged, bool is_pvid)
1105 {
1106 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1107 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1108 	u16 old_pvid = mlxsw_sp_port->pvid;
1109 	int err;
1110 
1111 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1112 	if (IS_ERR(mlxsw_sp_port_vlan))
1113 		return PTR_ERR(mlxsw_sp_port_vlan);
1114 
1115 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1116 				     is_untagged);
1117 	if (err)
1118 		goto err_port_vlan_set;
1119 
1120 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1121 	if (err)
1122 		goto err_port_pvid_set;
1123 
1124 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1125 	if (err)
1126 		goto err_port_vlan_bridge_join;
1127 
1128 	return 0;
1129 
1130 err_port_vlan_bridge_join:
1131 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1132 err_port_pvid_set:
1133 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1134 err_port_vlan_set:
1135 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1136 	return err;
1137 }
1138 
1139 static int
1140 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1141 				const struct net_device *br_dev,
1142 				const struct switchdev_obj_port_vlan *vlan)
1143 {
1144 	struct mlxsw_sp_rif *rif;
1145 	struct mlxsw_sp_fid *fid;
1146 	u16 pvid;
1147 	u16 vid;
1148 
1149 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1150 	if (!rif)
1151 		return 0;
1152 	fid = mlxsw_sp_rif_fid(rif);
1153 	pvid = mlxsw_sp_fid_8021q_vid(fid);
1154 
1155 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1156 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1157 			if (vid != pvid) {
1158 				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1159 				return -EBUSY;
1160 			}
1161 		} else {
1162 			if (vid == pvid) {
1163 				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1164 				return -EBUSY;
1165 			}
1166 		}
1167 	}
1168 
1169 	return 0;
1170 }
1171 
1172 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1173 				   const struct switchdev_obj_port_vlan *vlan,
1174 				   struct switchdev_trans *trans)
1175 {
1176 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1177 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1178 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1179 	struct net_device *orig_dev = vlan->obj.orig_dev;
1180 	struct mlxsw_sp_bridge_port *bridge_port;
1181 	u16 vid;
1182 
1183 	if (netif_is_bridge_master(orig_dev)) {
1184 		int err = 0;
1185 
1186 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1187 		    br_vlan_enabled(orig_dev) &&
1188 		    switchdev_trans_ph_prepare(trans))
1189 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1190 							      orig_dev, vlan);
1191 		if (!err)
1192 			err = -EOPNOTSUPP;
1193 		return err;
1194 	}
1195 
1196 	if (switchdev_trans_ph_prepare(trans))
1197 		return 0;
1198 
1199 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1200 	if (WARN_ON(!bridge_port))
1201 		return -EINVAL;
1202 
1203 	if (!bridge_port->bridge_device->vlan_enabled)
1204 		return 0;
1205 
1206 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1207 		int err;
1208 
1209 		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1210 						    vid, flag_untagged,
1211 						    flag_pvid);
1212 		if (err)
1213 			return err;
1214 	}
1215 
1216 	return 0;
1217 }
1218 
1219 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1220 {
1221 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1222 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1223 }
1224 
1225 static int
1226 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1227 			       struct mlxsw_sp_bridge_port *bridge_port,
1228 			       u16 fid_index)
1229 {
1230 	bool lagged = bridge_port->lagged;
1231 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1232 	u16 system_port;
1233 
1234 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1235 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1236 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1237 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1238 
1239 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1240 }
1241 
1242 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1243 {
1244 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1245 			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1246 }
1247 
1248 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1249 {
1250 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1251 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1252 }
1253 
1254 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1255 					  const char *mac, u16 fid,
1256 					  enum mlxsw_sp_l3proto proto,
1257 					  const union mlxsw_sp_l3addr *addr,
1258 					  bool adding, bool dynamic)
1259 {
1260 	enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1261 	char *sfd_pl;
1262 	u8 num_rec;
1263 	u32 uip;
1264 	int err;
1265 
1266 	switch (proto) {
1267 	case MLXSW_SP_L3_PROTO_IPV4:
1268 		uip = be32_to_cpu(addr->addr4);
1269 		sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1270 		break;
1271 	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1272 	default:
1273 		WARN_ON(1);
1274 		return -EOPNOTSUPP;
1275 	}
1276 
1277 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1278 	if (!sfd_pl)
1279 		return -ENOMEM;
1280 
1281 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1282 	mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1283 				     mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1284 				     MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1285 				     sfd_proto);
1286 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1287 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1288 	if (err)
1289 		goto out;
1290 
1291 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1292 		err = -EBUSY;
1293 
1294 out:
1295 	kfree(sfd_pl);
1296 	return err;
1297 }
1298 
1299 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1300 				     const char *mac, u16 fid, bool adding,
1301 				     enum mlxsw_reg_sfd_rec_action action,
1302 				     bool dynamic)
1303 {
1304 	char *sfd_pl;
1305 	u8 num_rec;
1306 	int err;
1307 
1308 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1309 	if (!sfd_pl)
1310 		return -ENOMEM;
1311 
1312 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1313 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1314 			      mac, fid, action, local_port);
1315 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1316 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1317 	if (err)
1318 		goto out;
1319 
1320 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1321 		err = -EBUSY;
1322 
1323 out:
1324 	kfree(sfd_pl);
1325 	return err;
1326 }
1327 
1328 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1329 				   const char *mac, u16 fid, bool adding,
1330 				   bool dynamic)
1331 {
1332 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1333 					 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1334 }
1335 
1336 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1337 			bool adding)
1338 {
1339 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1340 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1341 					 false);
1342 }
1343 
1344 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1345 				       const char *mac, u16 fid, u16 lag_vid,
1346 				       bool adding, bool dynamic)
1347 {
1348 	char *sfd_pl;
1349 	u8 num_rec;
1350 	int err;
1351 
1352 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1353 	if (!sfd_pl)
1354 		return -ENOMEM;
1355 
1356 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1357 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1358 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1359 				  lag_vid, lag_id);
1360 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1361 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1362 	if (err)
1363 		goto out;
1364 
1365 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1366 		err = -EBUSY;
1367 
1368 out:
1369 	kfree(sfd_pl);
1370 	return err;
1371 }
1372 
1373 static int
1374 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1375 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1376 {
1377 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1378 	struct net_device *orig_dev = fdb_info->info.dev;
1379 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1380 	struct mlxsw_sp_bridge_device *bridge_device;
1381 	struct mlxsw_sp_bridge_port *bridge_port;
1382 	u16 fid_index, vid;
1383 
1384 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1385 	if (!bridge_port)
1386 		return -EINVAL;
1387 
1388 	bridge_device = bridge_port->bridge_device;
1389 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1390 							       bridge_device,
1391 							       fdb_info->vid);
1392 	if (!mlxsw_sp_port_vlan)
1393 		return 0;
1394 
1395 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1396 	vid = mlxsw_sp_port_vlan->vid;
1397 
1398 	if (!bridge_port->lagged)
1399 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1400 					       bridge_port->system_port,
1401 					       fdb_info->addr, fid_index,
1402 					       adding, false);
1403 	else
1404 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1405 						   bridge_port->lag_id,
1406 						   fdb_info->addr, fid_index,
1407 						   vid, adding, false);
1408 }
1409 
1410 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1411 				u16 fid, u16 mid_idx, bool adding)
1412 {
1413 	char *sfd_pl;
1414 	u8 num_rec;
1415 	int err;
1416 
1417 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1418 	if (!sfd_pl)
1419 		return -ENOMEM;
1420 
1421 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1422 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1423 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1424 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1425 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1426 	if (err)
1427 		goto out;
1428 
1429 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1430 		err = -EBUSY;
1431 
1432 out:
1433 	kfree(sfd_pl);
1434 	return err;
1435 }
1436 
1437 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1438 					 long *ports_bitmap,
1439 					 bool set_router_port)
1440 {
1441 	char *smid_pl;
1442 	int err, i;
1443 
1444 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1445 	if (!smid_pl)
1446 		return -ENOMEM;
1447 
1448 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1449 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1450 		if (mlxsw_sp->ports[i])
1451 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1452 	}
1453 
1454 	mlxsw_reg_smid_port_mask_set(smid_pl,
1455 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1456 
1457 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1458 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1459 
1460 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1461 				set_router_port);
1462 
1463 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1464 	kfree(smid_pl);
1465 	return err;
1466 }
1467 
1468 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1469 				  u16 mid_idx, bool add)
1470 {
1471 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1472 	char *smid_pl;
1473 	int err;
1474 
1475 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1476 	if (!smid_pl)
1477 		return -ENOMEM;
1478 
1479 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1480 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1481 	kfree(smid_pl);
1482 	return err;
1483 }
1484 
1485 static struct
1486 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1487 				const unsigned char *addr,
1488 				u16 fid)
1489 {
1490 	struct mlxsw_sp_mid *mid;
1491 
1492 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1493 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1494 			return mid;
1495 	}
1496 	return NULL;
1497 }
1498 
1499 static void
1500 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1501 				      struct mlxsw_sp_bridge_port *bridge_port,
1502 				      unsigned long *ports_bitmap)
1503 {
1504 	struct mlxsw_sp_port *mlxsw_sp_port;
1505 	u64 max_lag_members, i;
1506 	int lag_id;
1507 
1508 	if (!bridge_port->lagged) {
1509 		set_bit(bridge_port->system_port, ports_bitmap);
1510 	} else {
1511 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1512 						     MAX_LAG_MEMBERS);
1513 		lag_id = bridge_port->lag_id;
1514 		for (i = 0; i < max_lag_members; i++) {
1515 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1516 								 lag_id, i);
1517 			if (mlxsw_sp_port)
1518 				set_bit(mlxsw_sp_port->local_port,
1519 					ports_bitmap);
1520 		}
1521 	}
1522 }
1523 
1524 static void
1525 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1526 				struct mlxsw_sp_bridge_device *bridge_device,
1527 				struct mlxsw_sp *mlxsw_sp)
1528 {
1529 	struct mlxsw_sp_bridge_port *bridge_port;
1530 
1531 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1532 		if (bridge_port->mrouter) {
1533 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1534 							      bridge_port,
1535 							      flood_bitmap);
1536 		}
1537 	}
1538 }
1539 
1540 static bool
1541 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1542 			    struct mlxsw_sp_mid *mid,
1543 			    struct mlxsw_sp_bridge_device *bridge_device)
1544 {
1545 	long *flood_bitmap;
1546 	int num_of_ports;
1547 	int alloc_size;
1548 	u16 mid_idx;
1549 	int err;
1550 
1551 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1552 				      MLXSW_SP_MID_MAX);
1553 	if (mid_idx == MLXSW_SP_MID_MAX)
1554 		return false;
1555 
1556 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1557 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1558 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1559 	if (!flood_bitmap)
1560 		return false;
1561 
1562 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1563 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1564 
1565 	mid->mid = mid_idx;
1566 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1567 					    bridge_device->mrouter);
1568 	kfree(flood_bitmap);
1569 	if (err)
1570 		return false;
1571 
1572 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1573 				   true);
1574 	if (err)
1575 		return false;
1576 
1577 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1578 	mid->in_hw = true;
1579 	return true;
1580 }
1581 
1582 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1583 					struct mlxsw_sp_mid *mid)
1584 {
1585 	if (!mid->in_hw)
1586 		return 0;
1587 
1588 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1589 	mid->in_hw = false;
1590 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1591 				    false);
1592 }
1593 
1594 static struct
1595 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1596 				  struct mlxsw_sp_bridge_device *bridge_device,
1597 				  const unsigned char *addr,
1598 				  u16 fid)
1599 {
1600 	struct mlxsw_sp_mid *mid;
1601 	size_t alloc_size;
1602 
1603 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1604 	if (!mid)
1605 		return NULL;
1606 
1607 	alloc_size = sizeof(unsigned long) *
1608 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1609 
1610 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1611 	if (!mid->ports_in_mid)
1612 		goto err_ports_in_mid_alloc;
1613 
1614 	ether_addr_copy(mid->addr, addr);
1615 	mid->fid = fid;
1616 	mid->in_hw = false;
1617 
1618 	if (!bridge_device->multicast_enabled)
1619 		goto out;
1620 
1621 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1622 		goto err_write_mdb_entry;
1623 
1624 out:
1625 	list_add_tail(&mid->list, &bridge_device->mids_list);
1626 	return mid;
1627 
1628 err_write_mdb_entry:
1629 	kfree(mid->ports_in_mid);
1630 err_ports_in_mid_alloc:
1631 	kfree(mid);
1632 	return NULL;
1633 }
1634 
1635 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1636 					 struct mlxsw_sp_mid *mid)
1637 {
1638 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1639 	int err = 0;
1640 
1641 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1642 	if (bitmap_empty(mid->ports_in_mid,
1643 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1644 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1645 		list_del(&mid->list);
1646 		kfree(mid->ports_in_mid);
1647 		kfree(mid);
1648 	}
1649 	return err;
1650 }
1651 
1652 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1653 				 const struct switchdev_obj_port_mdb *mdb,
1654 				 struct switchdev_trans *trans)
1655 {
1656 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1657 	struct net_device *orig_dev = mdb->obj.orig_dev;
1658 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1659 	struct net_device *dev = mlxsw_sp_port->dev;
1660 	struct mlxsw_sp_bridge_device *bridge_device;
1661 	struct mlxsw_sp_bridge_port *bridge_port;
1662 	struct mlxsw_sp_mid *mid;
1663 	u16 fid_index;
1664 	int err = 0;
1665 
1666 	if (switchdev_trans_ph_prepare(trans))
1667 		return 0;
1668 
1669 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1670 	if (!bridge_port)
1671 		return 0;
1672 
1673 	bridge_device = bridge_port->bridge_device;
1674 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1675 							       bridge_device,
1676 							       mdb->vid);
1677 	if (!mlxsw_sp_port_vlan)
1678 		return 0;
1679 
1680 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1681 
1682 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1683 	if (!mid) {
1684 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1685 					  fid_index);
1686 		if (!mid) {
1687 			netdev_err(dev, "Unable to allocate MC group\n");
1688 			return -ENOMEM;
1689 		}
1690 	}
1691 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1692 
1693 	if (!bridge_device->multicast_enabled)
1694 		return 0;
1695 
1696 	if (bridge_port->mrouter)
1697 		return 0;
1698 
1699 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1700 	if (err) {
1701 		netdev_err(dev, "Unable to set SMID\n");
1702 		goto err_out;
1703 	}
1704 
1705 	return 0;
1706 
1707 err_out:
1708 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1709 	return err;
1710 }
1711 
1712 static void
1713 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1714 				   struct mlxsw_sp_bridge_device
1715 				   *bridge_device)
1716 {
1717 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1718 	struct mlxsw_sp_mid *mid;
1719 	bool mc_enabled;
1720 
1721 	mc_enabled = bridge_device->multicast_enabled;
1722 
1723 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1724 		if (mc_enabled)
1725 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1726 						    bridge_device);
1727 		else
1728 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1729 	}
1730 }
1731 
1732 static void
1733 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1734 				 struct mlxsw_sp_bridge_port *bridge_port,
1735 				 bool add)
1736 {
1737 	struct mlxsw_sp_bridge_device *bridge_device;
1738 	struct mlxsw_sp_mid *mid;
1739 
1740 	bridge_device = bridge_port->bridge_device;
1741 
1742 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1743 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1744 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1745 	}
1746 }
1747 
1748 struct mlxsw_sp_span_respin_work {
1749 	struct work_struct work;
1750 	struct mlxsw_sp *mlxsw_sp;
1751 };
1752 
1753 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1754 {
1755 	struct mlxsw_sp_span_respin_work *respin_work =
1756 		container_of(work, struct mlxsw_sp_span_respin_work, work);
1757 
1758 	rtnl_lock();
1759 	mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1760 	rtnl_unlock();
1761 	kfree(respin_work);
1762 }
1763 
1764 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1765 {
1766 	struct mlxsw_sp_span_respin_work *respin_work;
1767 
1768 	respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1769 	if (!respin_work)
1770 		return;
1771 
1772 	INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1773 	respin_work->mlxsw_sp = mlxsw_sp;
1774 
1775 	mlxsw_core_schedule_work(&respin_work->work);
1776 }
1777 
1778 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1779 				 const struct switchdev_obj *obj,
1780 				 struct switchdev_trans *trans)
1781 {
1782 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1783 	const struct switchdev_obj_port_vlan *vlan;
1784 	int err = 0;
1785 
1786 	switch (obj->id) {
1787 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1788 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1789 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1790 
1791 		if (switchdev_trans_ph_prepare(trans)) {
1792 			/* The event is emitted before the changes are actually
1793 			 * applied to the bridge. Therefore schedule the respin
1794 			 * call for later, so that the respin logic sees the
1795 			 * updated bridge state.
1796 			 */
1797 			mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1798 		}
1799 		break;
1800 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1801 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1802 					    SWITCHDEV_OBJ_PORT_MDB(obj),
1803 					    trans);
1804 		break;
1805 	default:
1806 		err = -EOPNOTSUPP;
1807 		break;
1808 	}
1809 
1810 	return err;
1811 }
1812 
1813 static void
1814 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1815 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1816 {
1817 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1818 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1819 
1820 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1821 	if (WARN_ON(!mlxsw_sp_port_vlan))
1822 		return;
1823 
1824 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1825 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1826 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1827 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1828 }
1829 
1830 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1831 				   const struct switchdev_obj_port_vlan *vlan)
1832 {
1833 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1834 	struct net_device *orig_dev = vlan->obj.orig_dev;
1835 	struct mlxsw_sp_bridge_port *bridge_port;
1836 	u16 vid;
1837 
1838 	if (netif_is_bridge_master(orig_dev))
1839 		return -EOPNOTSUPP;
1840 
1841 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1842 	if (WARN_ON(!bridge_port))
1843 		return -EINVAL;
1844 
1845 	if (!bridge_port->bridge_device->vlan_enabled)
1846 		return 0;
1847 
1848 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1849 		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1850 
1851 	return 0;
1852 }
1853 
1854 static int
1855 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1856 			struct mlxsw_sp_bridge_port *bridge_port,
1857 			struct mlxsw_sp_mid *mid)
1858 {
1859 	struct net_device *dev = mlxsw_sp_port->dev;
1860 	int err;
1861 
1862 	if (bridge_port->bridge_device->multicast_enabled &&
1863 	    !bridge_port->mrouter) {
1864 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1865 		if (err)
1866 			netdev_err(dev, "Unable to remove port from SMID\n");
1867 	}
1868 
1869 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1870 	if (err)
1871 		netdev_err(dev, "Unable to remove MC SFD\n");
1872 
1873 	return err;
1874 }
1875 
1876 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1877 				 const struct switchdev_obj_port_mdb *mdb)
1878 {
1879 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1880 	struct net_device *orig_dev = mdb->obj.orig_dev;
1881 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1882 	struct mlxsw_sp_bridge_device *bridge_device;
1883 	struct net_device *dev = mlxsw_sp_port->dev;
1884 	struct mlxsw_sp_bridge_port *bridge_port;
1885 	struct mlxsw_sp_mid *mid;
1886 	u16 fid_index;
1887 
1888 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1889 	if (!bridge_port)
1890 		return 0;
1891 
1892 	bridge_device = bridge_port->bridge_device;
1893 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1894 							       bridge_device,
1895 							       mdb->vid);
1896 	if (!mlxsw_sp_port_vlan)
1897 		return 0;
1898 
1899 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1900 
1901 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1902 	if (!mid) {
1903 		netdev_err(dev, "Unable to remove port from MC DB\n");
1904 		return -EINVAL;
1905 	}
1906 
1907 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1908 }
1909 
1910 static void
1911 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1912 			       struct mlxsw_sp_bridge_port *bridge_port)
1913 {
1914 	struct mlxsw_sp_bridge_device *bridge_device;
1915 	struct mlxsw_sp_mid *mid, *tmp;
1916 
1917 	bridge_device = bridge_port->bridge_device;
1918 
1919 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1920 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1921 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1922 						mid);
1923 		} else if (bridge_device->multicast_enabled &&
1924 			   bridge_port->mrouter) {
1925 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1926 		}
1927 	}
1928 }
1929 
1930 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1931 				 const struct switchdev_obj *obj)
1932 {
1933 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1934 	int err = 0;
1935 
1936 	switch (obj->id) {
1937 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1938 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1939 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1940 		break;
1941 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1942 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1943 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1944 		break;
1945 	default:
1946 		err = -EOPNOTSUPP;
1947 		break;
1948 	}
1949 
1950 	mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1951 
1952 	return err;
1953 }
1954 
1955 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1956 						   u16 lag_id)
1957 {
1958 	struct mlxsw_sp_port *mlxsw_sp_port;
1959 	u64 max_lag_members;
1960 	int i;
1961 
1962 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1963 					     MAX_LAG_MEMBERS);
1964 	for (i = 0; i < max_lag_members; i++) {
1965 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1966 		if (mlxsw_sp_port)
1967 			return mlxsw_sp_port;
1968 	}
1969 	return NULL;
1970 }
1971 
1972 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1973 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1974 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1975 };
1976 
1977 static int
1978 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1979 				struct mlxsw_sp_bridge_port *bridge_port,
1980 				struct mlxsw_sp_port *mlxsw_sp_port,
1981 				struct netlink_ext_ack *extack)
1982 {
1983 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1984 
1985 	if (is_vlan_dev(bridge_port->dev)) {
1986 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1987 		return -EINVAL;
1988 	}
1989 
1990 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1991 	if (WARN_ON(!mlxsw_sp_port_vlan))
1992 		return -EINVAL;
1993 
1994 	/* Let VLAN-aware bridge take care of its own VLANs */
1995 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1996 
1997 	return 0;
1998 }
1999 
2000 static void
2001 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2002 				 struct mlxsw_sp_bridge_port *bridge_port,
2003 				 struct mlxsw_sp_port *mlxsw_sp_port)
2004 {
2005 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2006 	/* Make sure untagged frames are allowed to ingress */
2007 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2008 }
2009 
2010 static int
2011 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2012 				 const struct net_device *vxlan_dev, u16 vid,
2013 				 struct netlink_ext_ack *extack)
2014 {
2015 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2016 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2017 	struct mlxsw_sp_nve_params params = {
2018 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2019 		.vni = vxlan->cfg.vni,
2020 		.dev = vxlan_dev,
2021 	};
2022 	struct mlxsw_sp_fid *fid;
2023 	int err;
2024 
2025 	/* If the VLAN is 0, we need to find the VLAN that is configured as
2026 	 * PVID and egress untagged on the bridge port of the VxLAN device.
2027 	 * It is possible no such VLAN exists
2028 	 */
2029 	if (!vid) {
2030 		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2031 		if (err || !vid)
2032 			return err;
2033 	}
2034 
2035 	/* If no other port is member in the VLAN, then the FID does not exist.
2036 	 * NVE will be enabled on the FID once a port joins the VLAN
2037 	 */
2038 	fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2039 	if (!fid)
2040 		return 0;
2041 
2042 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2043 		err = -EINVAL;
2044 		goto err_vni_exists;
2045 	}
2046 
2047 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2048 	if (err)
2049 		goto err_nve_fid_enable;
2050 
2051 	/* The tunnel port does not hold a reference on the FID. Only
2052 	 * local ports and the router port
2053 	 */
2054 	mlxsw_sp_fid_put(fid);
2055 
2056 	return 0;
2057 
2058 err_nve_fid_enable:
2059 err_vni_exists:
2060 	mlxsw_sp_fid_put(fid);
2061 	return err;
2062 }
2063 
2064 static struct net_device *
2065 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2066 {
2067 	struct net_device *dev;
2068 	struct list_head *iter;
2069 
2070 	netdev_for_each_lower_dev(br_dev, dev, iter) {
2071 		u16 pvid;
2072 		int err;
2073 
2074 		if (!netif_is_vxlan(dev))
2075 			continue;
2076 
2077 		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2078 		if (err || pvid != vid)
2079 			continue;
2080 
2081 		return dev;
2082 	}
2083 
2084 	return NULL;
2085 }
2086 
2087 static struct mlxsw_sp_fid *
2088 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2089 			      u16 vid)
2090 {
2091 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2092 	struct net_device *vxlan_dev;
2093 	struct mlxsw_sp_fid *fid;
2094 	int err;
2095 
2096 	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2097 	if (IS_ERR(fid))
2098 		return fid;
2099 
2100 	if (mlxsw_sp_fid_vni_is_set(fid))
2101 		return fid;
2102 
2103 	/* Find the VxLAN device that has the specified VLAN configured as
2104 	 * PVID and egress untagged. There can be at most one such device
2105 	 */
2106 	vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
2107 							 vid);
2108 	if (!vxlan_dev)
2109 		return fid;
2110 
2111 	if (!netif_running(vxlan_dev))
2112 		return fid;
2113 
2114 	err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
2115 					       NULL);
2116 	if (err)
2117 		goto err_vxlan_join;
2118 
2119 	return fid;
2120 
2121 err_vxlan_join:
2122 	mlxsw_sp_fid_put(fid);
2123 	return ERR_PTR(err);
2124 }
2125 
2126 static struct mlxsw_sp_fid *
2127 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2128 				 u16 vid)
2129 {
2130 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2131 
2132 	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2133 }
2134 
2135 static u16
2136 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2137 			      const struct mlxsw_sp_fid *fid)
2138 {
2139 	return mlxsw_sp_fid_8021q_vid(fid);
2140 }
2141 
2142 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2143 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2144 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2145 	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2146 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2147 	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2148 	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2149 };
2150 
2151 static bool
2152 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2153 			   const struct net_device *br_dev)
2154 {
2155 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2156 
2157 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2158 			    list) {
2159 		if (mlxsw_sp_port_vlan->bridge_port &&
2160 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2161 		    br_dev)
2162 			return true;
2163 	}
2164 
2165 	return false;
2166 }
2167 
2168 static int
2169 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2170 				struct mlxsw_sp_bridge_port *bridge_port,
2171 				struct mlxsw_sp_port *mlxsw_sp_port,
2172 				struct netlink_ext_ack *extack)
2173 {
2174 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2175 	struct net_device *dev = bridge_port->dev;
2176 	u16 vid;
2177 
2178 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2179 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2180 	if (WARN_ON(!mlxsw_sp_port_vlan))
2181 		return -EINVAL;
2182 
2183 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2184 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2185 		return -EINVAL;
2186 	}
2187 
2188 	/* Port is no longer usable as a router interface */
2189 	if (mlxsw_sp_port_vlan->fid)
2190 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2191 
2192 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
2193 }
2194 
2195 static void
2196 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2197 				 struct mlxsw_sp_bridge_port *bridge_port,
2198 				 struct mlxsw_sp_port *mlxsw_sp_port)
2199 {
2200 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2201 	struct net_device *dev = bridge_port->dev;
2202 	u16 vid;
2203 
2204 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2205 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2206 	if (!mlxsw_sp_port_vlan)
2207 		return;
2208 
2209 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2210 }
2211 
2212 static int
2213 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2214 				 const struct net_device *vxlan_dev, u16 vid,
2215 				 struct netlink_ext_ack *extack)
2216 {
2217 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2218 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2219 	struct mlxsw_sp_nve_params params = {
2220 		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2221 		.vni = vxlan->cfg.vni,
2222 		.dev = vxlan_dev,
2223 	};
2224 	struct mlxsw_sp_fid *fid;
2225 	int err;
2226 
2227 	fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2228 	if (!fid)
2229 		return -EINVAL;
2230 
2231 	if (mlxsw_sp_fid_vni_is_set(fid)) {
2232 		err = -EINVAL;
2233 		goto err_vni_exists;
2234 	}
2235 
2236 	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2237 	if (err)
2238 		goto err_nve_fid_enable;
2239 
2240 	/* The tunnel port does not hold a reference on the FID. Only
2241 	 * local ports and the router port
2242 	 */
2243 	mlxsw_sp_fid_put(fid);
2244 
2245 	return 0;
2246 
2247 err_nve_fid_enable:
2248 err_vni_exists:
2249 	mlxsw_sp_fid_put(fid);
2250 	return err;
2251 }
2252 
2253 static struct mlxsw_sp_fid *
2254 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2255 			      u16 vid)
2256 {
2257 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2258 	struct net_device *vxlan_dev;
2259 	struct mlxsw_sp_fid *fid;
2260 	int err;
2261 
2262 	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2263 	if (IS_ERR(fid))
2264 		return fid;
2265 
2266 	if (mlxsw_sp_fid_vni_is_set(fid))
2267 		return fid;
2268 
2269 	vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
2270 	if (!vxlan_dev)
2271 		return fid;
2272 
2273 	if (!netif_running(vxlan_dev))
2274 		return fid;
2275 
2276 	err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
2277 					       NULL);
2278 	if (err)
2279 		goto err_vxlan_join;
2280 
2281 	return fid;
2282 
2283 err_vxlan_join:
2284 	mlxsw_sp_fid_put(fid);
2285 	return ERR_PTR(err);
2286 }
2287 
2288 static struct mlxsw_sp_fid *
2289 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2290 				 u16 vid)
2291 {
2292 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2293 
2294 	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2295 	if (vid)
2296 		return NULL;
2297 
2298 	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2299 }
2300 
2301 static u16
2302 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2303 			      const struct mlxsw_sp_fid *fid)
2304 {
2305 	return 0;
2306 }
2307 
2308 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2309 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2310 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2311 	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2312 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2313 	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2314 	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2315 };
2316 
2317 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2318 			      struct net_device *brport_dev,
2319 			      struct net_device *br_dev,
2320 			      struct netlink_ext_ack *extack)
2321 {
2322 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2323 	struct mlxsw_sp_bridge_device *bridge_device;
2324 	struct mlxsw_sp_bridge_port *bridge_port;
2325 	int err;
2326 
2327 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2328 	if (IS_ERR(bridge_port))
2329 		return PTR_ERR(bridge_port);
2330 	bridge_device = bridge_port->bridge_device;
2331 
2332 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2333 					    mlxsw_sp_port, extack);
2334 	if (err)
2335 		goto err_port_join;
2336 
2337 	return 0;
2338 
2339 err_port_join:
2340 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2341 	return err;
2342 }
2343 
2344 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2345 				struct net_device *brport_dev,
2346 				struct net_device *br_dev)
2347 {
2348 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2349 	struct mlxsw_sp_bridge_device *bridge_device;
2350 	struct mlxsw_sp_bridge_port *bridge_port;
2351 
2352 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2353 	if (!bridge_device)
2354 		return;
2355 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2356 	if (!bridge_port)
2357 		return;
2358 
2359 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2360 				       mlxsw_sp_port);
2361 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2362 }
2363 
2364 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2365 			       const struct net_device *br_dev,
2366 			       const struct net_device *vxlan_dev, u16 vid,
2367 			       struct netlink_ext_ack *extack)
2368 {
2369 	struct mlxsw_sp_bridge_device *bridge_device;
2370 
2371 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2372 	if (WARN_ON(!bridge_device))
2373 		return -EINVAL;
2374 
2375 	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2376 					      extack);
2377 }
2378 
2379 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2380 				 const struct net_device *vxlan_dev)
2381 {
2382 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2383 	struct mlxsw_sp_fid *fid;
2384 
2385 	/* If the VxLAN device is down, then the FID does not have a VNI */
2386 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2387 	if (!fid)
2388 		return;
2389 
2390 	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2391 	mlxsw_sp_fid_put(fid);
2392 }
2393 
2394 static void
2395 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2396 				      enum mlxsw_sp_l3proto *proto,
2397 				      union mlxsw_sp_l3addr *addr)
2398 {
2399 	if (vxlan_addr->sa.sa_family == AF_INET) {
2400 		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2401 		*proto = MLXSW_SP_L3_PROTO_IPV4;
2402 	} else {
2403 		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2404 		*proto = MLXSW_SP_L3_PROTO_IPV6;
2405 	}
2406 }
2407 
2408 static void
2409 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2410 				      const union mlxsw_sp_l3addr *addr,
2411 				      union vxlan_addr *vxlan_addr)
2412 {
2413 	switch (proto) {
2414 	case MLXSW_SP_L3_PROTO_IPV4:
2415 		vxlan_addr->sa.sa_family = AF_INET;
2416 		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2417 		break;
2418 	case MLXSW_SP_L3_PROTO_IPV6:
2419 		vxlan_addr->sa.sa_family = AF_INET6;
2420 		vxlan_addr->sin6.sin6_addr = addr->addr6;
2421 		break;
2422 	}
2423 }
2424 
2425 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2426 					      const char *mac,
2427 					      enum mlxsw_sp_l3proto proto,
2428 					      union mlxsw_sp_l3addr *addr,
2429 					      __be32 vni, bool adding)
2430 {
2431 	struct switchdev_notifier_vxlan_fdb_info info;
2432 	struct vxlan_dev *vxlan = netdev_priv(dev);
2433 	enum switchdev_notifier_type type;
2434 
2435 	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2436 			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2437 	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2438 	info.remote_port = vxlan->cfg.dst_port;
2439 	info.remote_vni = vni;
2440 	info.remote_ifindex = 0;
2441 	ether_addr_copy(info.eth_addr, mac);
2442 	info.vni = vni;
2443 	info.offloaded = adding;
2444 	call_switchdev_notifiers(type, dev, &info.info);
2445 }
2446 
2447 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2448 					    const char *mac,
2449 					    enum mlxsw_sp_l3proto proto,
2450 					    union mlxsw_sp_l3addr *addr,
2451 					    __be32 vni,
2452 					    bool adding)
2453 {
2454 	if (netif_is_vxlan(dev))
2455 		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2456 						  adding);
2457 }
2458 
2459 static void
2460 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2461 			    const char *mac, u16 vid,
2462 			    struct net_device *dev, bool offloaded)
2463 {
2464 	struct switchdev_notifier_fdb_info info;
2465 
2466 	info.addr = mac;
2467 	info.vid = vid;
2468 	info.offloaded = offloaded;
2469 	call_switchdev_notifiers(type, dev, &info.info);
2470 }
2471 
2472 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2473 					    char *sfn_pl, int rec_index,
2474 					    bool adding)
2475 {
2476 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2477 	struct mlxsw_sp_bridge_device *bridge_device;
2478 	struct mlxsw_sp_bridge_port *bridge_port;
2479 	struct mlxsw_sp_port *mlxsw_sp_port;
2480 	enum switchdev_notifier_type type;
2481 	char mac[ETH_ALEN];
2482 	u8 local_port;
2483 	u16 vid, fid;
2484 	bool do_notification = true;
2485 	int err;
2486 
2487 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2488 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2489 	if (!mlxsw_sp_port) {
2490 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2491 		goto just_remove;
2492 	}
2493 
2494 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2495 	if (!mlxsw_sp_port_vlan) {
2496 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2497 		goto just_remove;
2498 	}
2499 
2500 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2501 	if (!bridge_port) {
2502 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2503 		goto just_remove;
2504 	}
2505 
2506 	bridge_device = bridge_port->bridge_device;
2507 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2508 
2509 do_fdb_op:
2510 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2511 				      adding, true);
2512 	if (err) {
2513 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2514 		return;
2515 	}
2516 
2517 	if (!do_notification)
2518 		return;
2519 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2520 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2521 
2522 	return;
2523 
2524 just_remove:
2525 	adding = false;
2526 	do_notification = false;
2527 	goto do_fdb_op;
2528 }
2529 
2530 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2531 						char *sfn_pl, int rec_index,
2532 						bool adding)
2533 {
2534 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2535 	struct mlxsw_sp_bridge_device *bridge_device;
2536 	struct mlxsw_sp_bridge_port *bridge_port;
2537 	struct mlxsw_sp_port *mlxsw_sp_port;
2538 	enum switchdev_notifier_type type;
2539 	char mac[ETH_ALEN];
2540 	u16 lag_vid = 0;
2541 	u16 lag_id;
2542 	u16 vid, fid;
2543 	bool do_notification = true;
2544 	int err;
2545 
2546 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2547 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2548 	if (!mlxsw_sp_port) {
2549 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2550 		goto just_remove;
2551 	}
2552 
2553 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2554 	if (!mlxsw_sp_port_vlan) {
2555 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2556 		goto just_remove;
2557 	}
2558 
2559 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2560 	if (!bridge_port) {
2561 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2562 		goto just_remove;
2563 	}
2564 
2565 	bridge_device = bridge_port->bridge_device;
2566 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2567 	lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2568 		  mlxsw_sp_port_vlan->vid : 0;
2569 
2570 do_fdb_op:
2571 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2572 					  adding, true);
2573 	if (err) {
2574 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2575 		return;
2576 	}
2577 
2578 	if (!do_notification)
2579 		return;
2580 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2581 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2582 
2583 	return;
2584 
2585 just_remove:
2586 	adding = false;
2587 	do_notification = false;
2588 	goto do_fdb_op;
2589 }
2590 
2591 static int
2592 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2593 					    const struct mlxsw_sp_fid *fid,
2594 					    bool adding,
2595 					    struct net_device **nve_dev,
2596 					    u16 *p_vid, __be32 *p_vni)
2597 {
2598 	struct mlxsw_sp_bridge_device *bridge_device;
2599 	struct net_device *br_dev, *dev;
2600 	int nve_ifindex;
2601 	int err;
2602 
2603 	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2604 	if (err)
2605 		return err;
2606 
2607 	err = mlxsw_sp_fid_vni(fid, p_vni);
2608 	if (err)
2609 		return err;
2610 
2611 	dev = __dev_get_by_index(&init_net, nve_ifindex);
2612 	if (!dev)
2613 		return -EINVAL;
2614 	*nve_dev = dev;
2615 
2616 	if (!netif_running(dev))
2617 		return -EINVAL;
2618 
2619 	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2620 		return -EINVAL;
2621 
2622 	if (adding && netif_is_vxlan(dev)) {
2623 		struct vxlan_dev *vxlan = netdev_priv(dev);
2624 
2625 		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2626 			return -EINVAL;
2627 	}
2628 
2629 	br_dev = netdev_master_upper_dev_get(dev);
2630 	if (!br_dev)
2631 		return -EINVAL;
2632 
2633 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2634 	if (!bridge_device)
2635 		return -EINVAL;
2636 
2637 	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2638 
2639 	return 0;
2640 }
2641 
2642 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2643 						      char *sfn_pl,
2644 						      int rec_index,
2645 						      bool adding)
2646 {
2647 	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2648 	enum switchdev_notifier_type type;
2649 	struct net_device *nve_dev;
2650 	union mlxsw_sp_l3addr addr;
2651 	struct mlxsw_sp_fid *fid;
2652 	char mac[ETH_ALEN];
2653 	u16 fid_index, vid;
2654 	__be32 vni;
2655 	u32 uip;
2656 	int err;
2657 
2658 	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2659 				       &uip, &sfn_proto);
2660 
2661 	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2662 	if (!fid)
2663 		goto err_fid_lookup;
2664 
2665 	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2666 					      (enum mlxsw_sp_l3proto) sfn_proto,
2667 					      &addr);
2668 	if (err)
2669 		goto err_ip_resolve;
2670 
2671 	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2672 							  &nve_dev, &vid, &vni);
2673 	if (err)
2674 		goto err_fdb_process;
2675 
2676 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2677 					     (enum mlxsw_sp_l3proto) sfn_proto,
2678 					     &addr, adding, true);
2679 	if (err)
2680 		goto err_fdb_op;
2681 
2682 	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2683 					(enum mlxsw_sp_l3proto) sfn_proto,
2684 					&addr, vni, adding);
2685 
2686 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2687 			SWITCHDEV_FDB_DEL_TO_BRIDGE;
2688 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2689 
2690 	mlxsw_sp_fid_put(fid);
2691 
2692 	return;
2693 
2694 err_fdb_op:
2695 err_fdb_process:
2696 err_ip_resolve:
2697 	mlxsw_sp_fid_put(fid);
2698 err_fid_lookup:
2699 	/* Remove an FDB entry in case we cannot process it. Otherwise the
2700 	 * device will keep sending the same notification over and over again.
2701 	 */
2702 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2703 				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2704 				       false, true);
2705 }
2706 
2707 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2708 					    char *sfn_pl, int rec_index)
2709 {
2710 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2711 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2712 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2713 						rec_index, true);
2714 		break;
2715 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2716 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2717 						rec_index, false);
2718 		break;
2719 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2720 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2721 						    rec_index, true);
2722 		break;
2723 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2724 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2725 						    rec_index, false);
2726 		break;
2727 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2728 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2729 							  rec_index, true);
2730 		break;
2731 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2732 		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2733 							  rec_index, false);
2734 		break;
2735 	}
2736 }
2737 
2738 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2739 {
2740 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2741 
2742 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2743 			       msecs_to_jiffies(bridge->fdb_notify.interval));
2744 }
2745 
2746 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2747 {
2748 	struct mlxsw_sp_bridge *bridge;
2749 	struct mlxsw_sp *mlxsw_sp;
2750 	char *sfn_pl;
2751 	u8 num_rec;
2752 	int i;
2753 	int err;
2754 
2755 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2756 	if (!sfn_pl)
2757 		return;
2758 
2759 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2760 	mlxsw_sp = bridge->mlxsw_sp;
2761 
2762 	rtnl_lock();
2763 	mlxsw_reg_sfn_pack(sfn_pl);
2764 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2765 	if (err) {
2766 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2767 		goto out;
2768 	}
2769 	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2770 	for (i = 0; i < num_rec; i++)
2771 		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2772 
2773 out:
2774 	rtnl_unlock();
2775 	kfree(sfn_pl);
2776 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2777 }
2778 
2779 struct mlxsw_sp_switchdev_event_work {
2780 	struct work_struct work;
2781 	union {
2782 		struct switchdev_notifier_fdb_info fdb_info;
2783 		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2784 	};
2785 	struct net_device *dev;
2786 	unsigned long event;
2787 };
2788 
2789 static void
2790 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2791 					  struct mlxsw_sp_switchdev_event_work *
2792 					  switchdev_work,
2793 					  struct mlxsw_sp_fid *fid, __be32 vni)
2794 {
2795 	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2796 	struct switchdev_notifier_fdb_info *fdb_info;
2797 	struct net_device *dev = switchdev_work->dev;
2798 	enum mlxsw_sp_l3proto proto;
2799 	union mlxsw_sp_l3addr addr;
2800 	int err;
2801 
2802 	fdb_info = &switchdev_work->fdb_info;
2803 	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2804 	if (err)
2805 		return;
2806 
2807 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2808 					      &proto, &addr);
2809 
2810 	switch (switchdev_work->event) {
2811 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2812 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2813 						     vxlan_fdb_info.eth_addr,
2814 						     mlxsw_sp_fid_index(fid),
2815 						     proto, &addr, true, false);
2816 		if (err)
2817 			return;
2818 		vxlan_fdb_info.offloaded = true;
2819 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2820 					 &vxlan_fdb_info.info);
2821 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2822 					    vxlan_fdb_info.eth_addr,
2823 					    fdb_info->vid, dev, true);
2824 		break;
2825 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2826 		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2827 						     vxlan_fdb_info.eth_addr,
2828 						     mlxsw_sp_fid_index(fid),
2829 						     proto, &addr, false,
2830 						     false);
2831 		vxlan_fdb_info.offloaded = false;
2832 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2833 					 &vxlan_fdb_info.info);
2834 		break;
2835 	}
2836 }
2837 
2838 static void
2839 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2840 					switchdev_work)
2841 {
2842 	struct mlxsw_sp_bridge_device *bridge_device;
2843 	struct net_device *dev = switchdev_work->dev;
2844 	struct net_device *br_dev;
2845 	struct mlxsw_sp *mlxsw_sp;
2846 	struct mlxsw_sp_fid *fid;
2847 	__be32 vni;
2848 	int err;
2849 
2850 	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2851 	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2852 		return;
2853 
2854 	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2855 	    !switchdev_work->fdb_info.added_by_user)
2856 		return;
2857 
2858 	if (!netif_running(dev))
2859 		return;
2860 	br_dev = netdev_master_upper_dev_get(dev);
2861 	if (!br_dev)
2862 		return;
2863 	if (!netif_is_bridge_master(br_dev))
2864 		return;
2865 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2866 	if (!mlxsw_sp)
2867 		return;
2868 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2869 	if (!bridge_device)
2870 		return;
2871 
2872 	fid = bridge_device->ops->fid_lookup(bridge_device,
2873 					     switchdev_work->fdb_info.vid);
2874 	if (!fid)
2875 		return;
2876 
2877 	err = mlxsw_sp_fid_vni(fid, &vni);
2878 	if (err)
2879 		goto out;
2880 
2881 	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2882 						  vni);
2883 
2884 out:
2885 	mlxsw_sp_fid_put(fid);
2886 }
2887 
2888 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2889 {
2890 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2891 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2892 	struct net_device *dev = switchdev_work->dev;
2893 	struct switchdev_notifier_fdb_info *fdb_info;
2894 	struct mlxsw_sp_port *mlxsw_sp_port;
2895 	int err;
2896 
2897 	rtnl_lock();
2898 	if (netif_is_vxlan(dev)) {
2899 		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2900 		goto out;
2901 	}
2902 
2903 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2904 	if (!mlxsw_sp_port)
2905 		goto out;
2906 
2907 	switch (switchdev_work->event) {
2908 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2909 		fdb_info = &switchdev_work->fdb_info;
2910 		if (!fdb_info->added_by_user)
2911 			break;
2912 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2913 		if (err)
2914 			break;
2915 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2916 					    fdb_info->addr,
2917 					    fdb_info->vid, dev, true);
2918 		break;
2919 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2920 		fdb_info = &switchdev_work->fdb_info;
2921 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2922 		break;
2923 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2924 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2925 		/* These events are only used to potentially update an existing
2926 		 * SPAN mirror.
2927 		 */
2928 		break;
2929 	}
2930 
2931 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2932 
2933 out:
2934 	rtnl_unlock();
2935 	kfree(switchdev_work->fdb_info.addr);
2936 	kfree(switchdev_work);
2937 	dev_put(dev);
2938 }
2939 
2940 static void
2941 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2942 				 struct mlxsw_sp_switchdev_event_work *
2943 				 switchdev_work)
2944 {
2945 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2946 	struct mlxsw_sp_bridge_device *bridge_device;
2947 	struct net_device *dev = switchdev_work->dev;
2948 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2949 	enum mlxsw_sp_l3proto proto;
2950 	union mlxsw_sp_l3addr addr;
2951 	struct net_device *br_dev;
2952 	struct mlxsw_sp_fid *fid;
2953 	u16 vid;
2954 	int err;
2955 
2956 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2957 	br_dev = netdev_master_upper_dev_get(dev);
2958 
2959 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2960 	if (!bridge_device)
2961 		return;
2962 
2963 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2964 	if (!fid)
2965 		return;
2966 
2967 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2968 					      &proto, &addr);
2969 
2970 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2971 		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2972 		if (err) {
2973 			mlxsw_sp_fid_put(fid);
2974 			return;
2975 		}
2976 		vxlan_fdb_info->offloaded = true;
2977 		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2978 					 &vxlan_fdb_info->info);
2979 		mlxsw_sp_fid_put(fid);
2980 		return;
2981 	}
2982 
2983 	/* The device has a single FDB table, whereas Linux has two - one
2984 	 * in the bridge driver and another in the VxLAN driver. We only
2985 	 * program an entry to the device if the MAC points to the VxLAN
2986 	 * device in the bridge's FDB table
2987 	 */
2988 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2989 	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2990 		goto err_br_fdb_find;
2991 
2992 	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2993 					     mlxsw_sp_fid_index(fid), proto,
2994 					     &addr, true, false);
2995 	if (err)
2996 		goto err_fdb_tunnel_uc_op;
2997 	vxlan_fdb_info->offloaded = true;
2998 	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2999 				 &vxlan_fdb_info->info);
3000 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3001 				    vxlan_fdb_info->eth_addr, vid, dev, true);
3002 
3003 	mlxsw_sp_fid_put(fid);
3004 
3005 	return;
3006 
3007 err_fdb_tunnel_uc_op:
3008 err_br_fdb_find:
3009 	mlxsw_sp_fid_put(fid);
3010 }
3011 
3012 static void
3013 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3014 				 struct mlxsw_sp_switchdev_event_work *
3015 				 switchdev_work)
3016 {
3017 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3018 	struct mlxsw_sp_bridge_device *bridge_device;
3019 	struct net_device *dev = switchdev_work->dev;
3020 	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3021 	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3022 	enum mlxsw_sp_l3proto proto;
3023 	union mlxsw_sp_l3addr addr;
3024 	struct mlxsw_sp_fid *fid;
3025 	u16 vid;
3026 
3027 	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3028 
3029 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3030 	if (!bridge_device)
3031 		return;
3032 
3033 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3034 	if (!fid)
3035 		return;
3036 
3037 	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3038 					      &proto, &addr);
3039 
3040 	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3041 		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3042 		mlxsw_sp_fid_put(fid);
3043 		return;
3044 	}
3045 
3046 	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3047 				       mlxsw_sp_fid_index(fid), proto, &addr,
3048 				       false, false);
3049 	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3050 	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3051 				    vxlan_fdb_info->eth_addr, vid, dev, false);
3052 
3053 	mlxsw_sp_fid_put(fid);
3054 }
3055 
3056 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3057 {
3058 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3059 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3060 	struct net_device *dev = switchdev_work->dev;
3061 	struct mlxsw_sp *mlxsw_sp;
3062 	struct net_device *br_dev;
3063 
3064 	rtnl_lock();
3065 
3066 	if (!netif_running(dev))
3067 		goto out;
3068 	br_dev = netdev_master_upper_dev_get(dev);
3069 	if (!br_dev)
3070 		goto out;
3071 	if (!netif_is_bridge_master(br_dev))
3072 		goto out;
3073 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3074 	if (!mlxsw_sp)
3075 		goto out;
3076 
3077 	switch (switchdev_work->event) {
3078 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3079 		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3080 		break;
3081 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3082 		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3083 		break;
3084 	}
3085 
3086 out:
3087 	rtnl_unlock();
3088 	kfree(switchdev_work);
3089 	dev_put(dev);
3090 }
3091 
3092 static int
3093 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3094 				      switchdev_work,
3095 				      struct switchdev_notifier_info *info)
3096 {
3097 	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3098 	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3099 	struct vxlan_config *cfg = &vxlan->cfg;
3100 
3101 	vxlan_fdb_info = container_of(info,
3102 				      struct switchdev_notifier_vxlan_fdb_info,
3103 				      info);
3104 
3105 	if (vxlan_fdb_info->remote_port != cfg->dst_port)
3106 		return -EOPNOTSUPP;
3107 	if (vxlan_fdb_info->remote_vni != cfg->vni)
3108 		return -EOPNOTSUPP;
3109 	if (vxlan_fdb_info->vni != cfg->vni)
3110 		return -EOPNOTSUPP;
3111 	if (vxlan_fdb_info->remote_ifindex)
3112 		return -EOPNOTSUPP;
3113 	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
3114 		return -EOPNOTSUPP;
3115 	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
3116 		return -EOPNOTSUPP;
3117 
3118 	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3119 
3120 	return 0;
3121 }
3122 
3123 /* Called under rcu_read_lock() */
3124 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3125 				    unsigned long event, void *ptr)
3126 {
3127 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3128 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3129 	struct switchdev_notifier_fdb_info *fdb_info;
3130 	struct switchdev_notifier_info *info = ptr;
3131 	struct net_device *br_dev;
3132 	int err;
3133 
3134 	/* Tunnel devices are not our uppers, so check their master instead */
3135 	br_dev = netdev_master_upper_dev_get_rcu(dev);
3136 	if (!br_dev)
3137 		return NOTIFY_DONE;
3138 	if (!netif_is_bridge_master(br_dev))
3139 		return NOTIFY_DONE;
3140 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3141 		return NOTIFY_DONE;
3142 
3143 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3144 	if (!switchdev_work)
3145 		return NOTIFY_BAD;
3146 
3147 	switchdev_work->dev = dev;
3148 	switchdev_work->event = event;
3149 
3150 	switch (event) {
3151 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
3152 	case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
3153 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
3154 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3155 		fdb_info = container_of(info,
3156 					struct switchdev_notifier_fdb_info,
3157 					info);
3158 		INIT_WORK(&switchdev_work->work,
3159 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3160 		memcpy(&switchdev_work->fdb_info, ptr,
3161 		       sizeof(switchdev_work->fdb_info));
3162 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3163 		if (!switchdev_work->fdb_info.addr)
3164 			goto err_addr_alloc;
3165 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3166 				fdb_info->addr);
3167 		/* Take a reference on the device. This can be either
3168 		 * upper device containig mlxsw_sp_port or just a
3169 		 * mlxsw_sp_port
3170 		 */
3171 		dev_hold(dev);
3172 		break;
3173 	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
3174 	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3175 		INIT_WORK(&switchdev_work->work,
3176 			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3177 		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3178 							    info);
3179 		if (err)
3180 			goto err_vxlan_work_prepare;
3181 		dev_hold(dev);
3182 		break;
3183 	default:
3184 		kfree(switchdev_work);
3185 		return NOTIFY_DONE;
3186 	}
3187 
3188 	mlxsw_core_schedule_work(&switchdev_work->work);
3189 
3190 	return NOTIFY_DONE;
3191 
3192 err_vxlan_work_prepare:
3193 err_addr_alloc:
3194 	kfree(switchdev_work);
3195 	return NOTIFY_BAD;
3196 }
3197 
3198 struct notifier_block mlxsw_sp_switchdev_notifier = {
3199 	.notifier_call = mlxsw_sp_switchdev_event,
3200 };
3201 
3202 static int
3203 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3204 				  struct mlxsw_sp_bridge_device *bridge_device,
3205 				  const struct net_device *vxlan_dev, u16 vid,
3206 				  bool flag_untagged, bool flag_pvid,
3207 				  struct switchdev_trans *trans)
3208 {
3209 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3210 	__be32 vni = vxlan->cfg.vni;
3211 	struct mlxsw_sp_fid *fid;
3212 	u16 old_vid;
3213 	int err;
3214 
3215 	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3216 	 * VxLAN devices. Note that we get this notification before the VLAN is
3217 	 * actually added to the bridge's database, so it is not possible for
3218 	 * the lookup function to return 'vxlan_dev'
3219 	 */
3220 	if (flag_untagged && flag_pvid &&
3221 	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
3222 		return -EINVAL;
3223 
3224 	if (switchdev_trans_ph_prepare(trans))
3225 		return 0;
3226 
3227 	if (!netif_running(vxlan_dev))
3228 		return 0;
3229 
3230 	/* First case: FID is not associated with this VNI, but the new VLAN
3231 	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3232 	 * it exists
3233 	 */
3234 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3235 	if (!fid) {
3236 		if (!flag_untagged || !flag_pvid)
3237 			return 0;
3238 		return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
3239 							vxlan_dev, vid, NULL);
3240 	}
3241 
3242 	/* Second case: FID is associated with the VNI and the VLAN associated
3243 	 * with the FID is the same as the notified VLAN. This means the flags
3244 	 * (PVID / egress untagged) were toggled and that NVE should be
3245 	 * disabled on the FID
3246 	 */
3247 	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3248 	if (vid == old_vid) {
3249 		if (WARN_ON(flag_untagged && flag_pvid)) {
3250 			mlxsw_sp_fid_put(fid);
3251 			return -EINVAL;
3252 		}
3253 		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3254 		mlxsw_sp_fid_put(fid);
3255 		return 0;
3256 	}
3257 
3258 	/* Third case: A new VLAN was configured on the VxLAN device, but this
3259 	 * VLAN is not PVID, so there is nothing to do.
3260 	 */
3261 	if (!flag_pvid) {
3262 		mlxsw_sp_fid_put(fid);
3263 		return 0;
3264 	}
3265 
3266 	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3267 	 * mapped to the VNI should be unmapped
3268 	 */
3269 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3270 	mlxsw_sp_fid_put(fid);
3271 
3272 	/* Fifth case: The new VLAN is also egress untagged, which means the
3273 	 * VLAN needs to be mapped to the VNI
3274 	 */
3275 	if (!flag_untagged)
3276 		return 0;
3277 
3278 	err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
3279 					       NULL);
3280 	if (err)
3281 		goto err_vxlan_join;
3282 
3283 	return 0;
3284 
3285 err_vxlan_join:
3286 	mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
3287 					 NULL);
3288 	return err;
3289 }
3290 
3291 static void
3292 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3293 				  struct mlxsw_sp_bridge_device *bridge_device,
3294 				  const struct net_device *vxlan_dev, u16 vid)
3295 {
3296 	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3297 	__be32 vni = vxlan->cfg.vni;
3298 	struct mlxsw_sp_fid *fid;
3299 
3300 	if (!netif_running(vxlan_dev))
3301 		return;
3302 
3303 	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3304 	if (!fid)
3305 		return;
3306 
3307 	/* A different VLAN than the one mapped to the VNI is deleted */
3308 	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3309 		goto out;
3310 
3311 	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3312 
3313 out:
3314 	mlxsw_sp_fid_put(fid);
3315 }
3316 
3317 static int
3318 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3319 				   struct switchdev_notifier_port_obj_info *
3320 				   port_obj_info)
3321 {
3322 	struct switchdev_obj_port_vlan *vlan =
3323 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3324 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3325 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3326 	struct switchdev_trans *trans = port_obj_info->trans;
3327 	struct mlxsw_sp_bridge_device *bridge_device;
3328 	struct mlxsw_sp *mlxsw_sp;
3329 	struct net_device *br_dev;
3330 	u16 vid;
3331 
3332 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3333 	if (!br_dev)
3334 		return 0;
3335 
3336 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3337 	if (!mlxsw_sp)
3338 		return 0;
3339 
3340 	port_obj_info->handled = true;
3341 
3342 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3343 	if (!bridge_device)
3344 		return -EINVAL;
3345 
3346 	if (!bridge_device->vlan_enabled)
3347 		return 0;
3348 
3349 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
3350 		int err;
3351 
3352 		err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3353 							vxlan_dev, vid,
3354 							flag_untagged,
3355 							flag_pvid, trans);
3356 		if (err)
3357 			return err;
3358 	}
3359 
3360 	return 0;
3361 }
3362 
3363 static void
3364 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3365 				   struct switchdev_notifier_port_obj_info *
3366 				   port_obj_info)
3367 {
3368 	struct switchdev_obj_port_vlan *vlan =
3369 		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3370 	struct mlxsw_sp_bridge_device *bridge_device;
3371 	struct mlxsw_sp *mlxsw_sp;
3372 	struct net_device *br_dev;
3373 	u16 vid;
3374 
3375 	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3376 	if (!br_dev)
3377 		return;
3378 
3379 	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3380 	if (!mlxsw_sp)
3381 		return;
3382 
3383 	port_obj_info->handled = true;
3384 
3385 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3386 	if (!bridge_device)
3387 		return;
3388 
3389 	if (!bridge_device->vlan_enabled)
3390 		return;
3391 
3392 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
3393 		mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
3394 						  vxlan_dev, vid);
3395 }
3396 
3397 static int
3398 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3399 					struct switchdev_notifier_port_obj_info *
3400 					port_obj_info)
3401 {
3402 	int err = 0;
3403 
3404 	switch (port_obj_info->obj->id) {
3405 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3406 		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3407 							 port_obj_info);
3408 		break;
3409 	default:
3410 		break;
3411 	}
3412 
3413 	return err;
3414 }
3415 
3416 static void
3417 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3418 					struct switchdev_notifier_port_obj_info *
3419 					port_obj_info)
3420 {
3421 	switch (port_obj_info->obj->id) {
3422 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3423 		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3424 		break;
3425 	default:
3426 		break;
3427 	}
3428 }
3429 
3430 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3431 					     unsigned long event, void *ptr)
3432 {
3433 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3434 	int err = 0;
3435 
3436 	switch (event) {
3437 	case SWITCHDEV_PORT_OBJ_ADD:
3438 		if (netif_is_vxlan(dev))
3439 			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3440 		else
3441 			err = switchdev_handle_port_obj_add(dev, ptr,
3442 							mlxsw_sp_port_dev_check,
3443 							mlxsw_sp_port_obj_add);
3444 		return notifier_from_errno(err);
3445 	case SWITCHDEV_PORT_OBJ_DEL:
3446 		if (netif_is_vxlan(dev))
3447 			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3448 		else
3449 			err = switchdev_handle_port_obj_del(dev, ptr,
3450 							mlxsw_sp_port_dev_check,
3451 							mlxsw_sp_port_obj_del);
3452 		return notifier_from_errno(err);
3453 	}
3454 
3455 	return NOTIFY_DONE;
3456 }
3457 
3458 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3459 	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3460 };
3461 
3462 u8
3463 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3464 {
3465 	return bridge_port->stp_state;
3466 }
3467 
3468 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3469 {
3470 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3471 	struct notifier_block *nb;
3472 	int err;
3473 
3474 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3475 	if (err) {
3476 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3477 		return err;
3478 	}
3479 
3480 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3481 	if (err) {
3482 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3483 		return err;
3484 	}
3485 
3486 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3487 	err = register_switchdev_blocking_notifier(nb);
3488 	if (err) {
3489 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3490 		goto err_register_switchdev_blocking_notifier;
3491 	}
3492 
3493 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3494 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3495 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
3496 	return 0;
3497 
3498 err_register_switchdev_blocking_notifier:
3499 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3500 	return err;
3501 }
3502 
3503 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3504 {
3505 	struct notifier_block *nb;
3506 
3507 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3508 
3509 	nb = &mlxsw_sp_switchdev_blocking_notifier;
3510 	unregister_switchdev_blocking_notifier(nb);
3511 
3512 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3513 }
3514 
3515 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3516 {
3517 	struct mlxsw_sp_bridge *bridge;
3518 
3519 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3520 	if (!bridge)
3521 		return -ENOMEM;
3522 	mlxsw_sp->bridge = bridge;
3523 	bridge->mlxsw_sp = mlxsw_sp;
3524 
3525 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3526 
3527 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3528 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3529 
3530 	return mlxsw_sp_fdb_init(mlxsw_sp);
3531 }
3532 
3533 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3534 {
3535 	mlxsw_sp_fdb_fini(mlxsw_sp);
3536 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3537 	kfree(mlxsw_sp->bridge);
3538 }
3539 
3540 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
3541 {
3542 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
3543 }
3544 
3545 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
3546 {
3547 }
3548