xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c (revision fe8ecccc10b3adc071de05ca7af728ca1a4ac9aa)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 
19 #include "spectrum_span.h"
20 #include "spectrum_switchdev.h"
21 #include "spectrum.h"
22 #include "core.h"
23 #include "reg.h"
24 
25 struct mlxsw_sp_bridge_ops;
26 
27 struct mlxsw_sp_bridge {
28 	struct mlxsw_sp *mlxsw_sp;
29 	struct {
30 		struct delayed_work dw;
31 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
32 		unsigned int interval; /* ms */
33 	} fdb_notify;
34 #define MLXSW_SP_MIN_AGEING_TIME 10
35 #define MLXSW_SP_MAX_AGEING_TIME 1000000
36 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
37 	u32 ageing_time;
38 	bool vlan_enabled_exists;
39 	struct list_head bridges_list;
40 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
41 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
42 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
43 };
44 
45 struct mlxsw_sp_bridge_device {
46 	struct net_device *dev;
47 	struct list_head list;
48 	struct list_head ports_list;
49 	struct list_head mids_list;
50 	u8 vlan_enabled:1,
51 	   multicast_enabled:1,
52 	   mrouter:1;
53 	const struct mlxsw_sp_bridge_ops *ops;
54 };
55 
56 struct mlxsw_sp_bridge_port {
57 	struct net_device *dev;
58 	struct mlxsw_sp_bridge_device *bridge_device;
59 	struct list_head list;
60 	struct list_head vlans_list;
61 	unsigned int ref_count;
62 	u8 stp_state;
63 	unsigned long flags;
64 	bool mrouter;
65 	bool lagged;
66 	union {
67 		u16 lag_id;
68 		u16 system_port;
69 	};
70 };
71 
72 struct mlxsw_sp_bridge_vlan {
73 	struct list_head list;
74 	struct list_head port_vlan_list;
75 	u16 vid;
76 };
77 
78 struct mlxsw_sp_bridge_ops {
79 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
80 			 struct mlxsw_sp_bridge_port *bridge_port,
81 			 struct mlxsw_sp_port *mlxsw_sp_port,
82 			 struct netlink_ext_ack *extack);
83 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
84 			   struct mlxsw_sp_bridge_port *bridge_port,
85 			   struct mlxsw_sp_port *mlxsw_sp_port);
86 	struct mlxsw_sp_fid *
87 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
88 			   u16 vid);
89 };
90 
91 static int
92 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
93 			       struct mlxsw_sp_bridge_port *bridge_port,
94 			       u16 fid_index);
95 
96 static void
97 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
98 			       struct mlxsw_sp_bridge_port *bridge_port);
99 
100 static void
101 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
102 				   struct mlxsw_sp_bridge_device
103 				   *bridge_device);
104 
105 static void
106 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
107 				 struct mlxsw_sp_bridge_port *bridge_port,
108 				 bool add);
109 
110 static struct mlxsw_sp_bridge_device *
111 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
112 			    const struct net_device *br_dev)
113 {
114 	struct mlxsw_sp_bridge_device *bridge_device;
115 
116 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
117 		if (bridge_device->dev == br_dev)
118 			return bridge_device;
119 
120 	return NULL;
121 }
122 
123 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
124 					 const struct net_device *br_dev)
125 {
126 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
127 }
128 
129 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
130 						    void *data)
131 {
132 	struct mlxsw_sp *mlxsw_sp = data;
133 
134 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
135 	return 0;
136 }
137 
138 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
139 						struct net_device *dev)
140 {
141 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
142 	netdev_walk_all_upper_dev_rcu(dev,
143 				      mlxsw_sp_bridge_device_upper_rif_destroy,
144 				      mlxsw_sp);
145 }
146 
147 static struct mlxsw_sp_bridge_device *
148 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
149 			      struct net_device *br_dev)
150 {
151 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
152 	struct mlxsw_sp_bridge_device *bridge_device;
153 	bool vlan_enabled = br_vlan_enabled(br_dev);
154 
155 	if (vlan_enabled && bridge->vlan_enabled_exists) {
156 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
157 		return ERR_PTR(-EINVAL);
158 	}
159 
160 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
161 	if (!bridge_device)
162 		return ERR_PTR(-ENOMEM);
163 
164 	bridge_device->dev = br_dev;
165 	bridge_device->vlan_enabled = vlan_enabled;
166 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
167 	bridge_device->mrouter = br_multicast_router(br_dev);
168 	INIT_LIST_HEAD(&bridge_device->ports_list);
169 	if (vlan_enabled) {
170 		bridge->vlan_enabled_exists = true;
171 		bridge_device->ops = bridge->bridge_8021q_ops;
172 	} else {
173 		bridge_device->ops = bridge->bridge_8021d_ops;
174 	}
175 	INIT_LIST_HEAD(&bridge_device->mids_list);
176 	list_add(&bridge_device->list, &bridge->bridges_list);
177 
178 	return bridge_device;
179 }
180 
181 static void
182 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
183 			       struct mlxsw_sp_bridge_device *bridge_device)
184 {
185 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
186 					    bridge_device->dev);
187 	list_del(&bridge_device->list);
188 	if (bridge_device->vlan_enabled)
189 		bridge->vlan_enabled_exists = false;
190 	WARN_ON(!list_empty(&bridge_device->ports_list));
191 	WARN_ON(!list_empty(&bridge_device->mids_list));
192 	kfree(bridge_device);
193 }
194 
195 static struct mlxsw_sp_bridge_device *
196 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
197 			   struct net_device *br_dev)
198 {
199 	struct mlxsw_sp_bridge_device *bridge_device;
200 
201 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
202 	if (bridge_device)
203 		return bridge_device;
204 
205 	return mlxsw_sp_bridge_device_create(bridge, br_dev);
206 }
207 
208 static void
209 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
210 			   struct mlxsw_sp_bridge_device *bridge_device)
211 {
212 	if (list_empty(&bridge_device->ports_list))
213 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
214 }
215 
216 static struct mlxsw_sp_bridge_port *
217 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
218 			    const struct net_device *brport_dev)
219 {
220 	struct mlxsw_sp_bridge_port *bridge_port;
221 
222 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
223 		if (bridge_port->dev == brport_dev)
224 			return bridge_port;
225 	}
226 
227 	return NULL;
228 }
229 
230 struct mlxsw_sp_bridge_port *
231 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
232 			  struct net_device *brport_dev)
233 {
234 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
235 	struct mlxsw_sp_bridge_device *bridge_device;
236 
237 	if (!br_dev)
238 		return NULL;
239 
240 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
241 	if (!bridge_device)
242 		return NULL;
243 
244 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
245 }
246 
247 static struct mlxsw_sp_bridge_port *
248 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
249 			    struct net_device *brport_dev)
250 {
251 	struct mlxsw_sp_bridge_port *bridge_port;
252 	struct mlxsw_sp_port *mlxsw_sp_port;
253 
254 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
255 	if (!bridge_port)
256 		return NULL;
257 
258 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
259 	bridge_port->lagged = mlxsw_sp_port->lagged;
260 	if (bridge_port->lagged)
261 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
262 	else
263 		bridge_port->system_port = mlxsw_sp_port->local_port;
264 	bridge_port->dev = brport_dev;
265 	bridge_port->bridge_device = bridge_device;
266 	bridge_port->stp_state = BR_STATE_DISABLED;
267 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
268 			     BR_MCAST_FLOOD;
269 	INIT_LIST_HEAD(&bridge_port->vlans_list);
270 	list_add(&bridge_port->list, &bridge_device->ports_list);
271 	bridge_port->ref_count = 1;
272 
273 	return bridge_port;
274 }
275 
276 static void
277 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
278 {
279 	list_del(&bridge_port->list);
280 	WARN_ON(!list_empty(&bridge_port->vlans_list));
281 	kfree(bridge_port);
282 }
283 
284 static bool
285 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
286 				    bridge_port)
287 {
288 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
289 
290 	/* In case ports were pulled from out of a bridged LAG, then
291 	 * it's possible the reference count isn't zero, yet the bridge
292 	 * port should be destroyed, as it's no longer an upper of ours.
293 	 */
294 	if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
295 		return true;
296 	else if (bridge_port->ref_count == 0)
297 		return true;
298 	else
299 		return false;
300 }
301 
302 static struct mlxsw_sp_bridge_port *
303 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
304 			 struct net_device *brport_dev)
305 {
306 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
307 	struct mlxsw_sp_bridge_device *bridge_device;
308 	struct mlxsw_sp_bridge_port *bridge_port;
309 	int err;
310 
311 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
312 	if (bridge_port) {
313 		bridge_port->ref_count++;
314 		return bridge_port;
315 	}
316 
317 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
318 	if (IS_ERR(bridge_device))
319 		return ERR_CAST(bridge_device);
320 
321 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
322 	if (!bridge_port) {
323 		err = -ENOMEM;
324 		goto err_bridge_port_create;
325 	}
326 
327 	return bridge_port;
328 
329 err_bridge_port_create:
330 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
331 	return ERR_PTR(err);
332 }
333 
334 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
335 				     struct mlxsw_sp_bridge_port *bridge_port)
336 {
337 	struct mlxsw_sp_bridge_device *bridge_device;
338 
339 	bridge_port->ref_count--;
340 	if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
341 		return;
342 	bridge_device = bridge_port->bridge_device;
343 	mlxsw_sp_bridge_port_destroy(bridge_port);
344 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
345 }
346 
347 static struct mlxsw_sp_port_vlan *
348 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
349 				  const struct mlxsw_sp_bridge_device *
350 				  bridge_device,
351 				  u16 vid)
352 {
353 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
354 
355 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
356 			    list) {
357 		if (!mlxsw_sp_port_vlan->bridge_port)
358 			continue;
359 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
360 		    bridge_device)
361 			continue;
362 		if (bridge_device->vlan_enabled &&
363 		    mlxsw_sp_port_vlan->vid != vid)
364 			continue;
365 		return mlxsw_sp_port_vlan;
366 	}
367 
368 	return NULL;
369 }
370 
371 static struct mlxsw_sp_port_vlan*
372 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
373 			       u16 fid_index)
374 {
375 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
376 
377 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
378 			    list) {
379 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
380 
381 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
382 			return mlxsw_sp_port_vlan;
383 	}
384 
385 	return NULL;
386 }
387 
388 static struct mlxsw_sp_bridge_vlan *
389 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
390 			  u16 vid)
391 {
392 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
393 
394 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
395 		if (bridge_vlan->vid == vid)
396 			return bridge_vlan;
397 	}
398 
399 	return NULL;
400 }
401 
402 static struct mlxsw_sp_bridge_vlan *
403 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
404 {
405 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
406 
407 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
408 	if (!bridge_vlan)
409 		return NULL;
410 
411 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
412 	bridge_vlan->vid = vid;
413 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
414 
415 	return bridge_vlan;
416 }
417 
418 static void
419 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
420 {
421 	list_del(&bridge_vlan->list);
422 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
423 	kfree(bridge_vlan);
424 }
425 
426 static struct mlxsw_sp_bridge_vlan *
427 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
428 {
429 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
430 
431 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
432 	if (bridge_vlan)
433 		return bridge_vlan;
434 
435 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
436 }
437 
438 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
439 {
440 	if (list_empty(&bridge_vlan->port_vlan_list))
441 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
442 }
443 
444 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
445 					   struct net_device *dev,
446 					   unsigned long *brport_flags)
447 {
448 	struct mlxsw_sp_bridge_port *bridge_port;
449 
450 	bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
451 	if (WARN_ON(!bridge_port))
452 		return;
453 
454 	memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
455 }
456 
457 static int mlxsw_sp_port_attr_get(struct net_device *dev,
458 				  struct switchdev_attr *attr)
459 {
460 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
461 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
462 
463 	switch (attr->id) {
464 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
465 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
466 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
467 		       attr->u.ppid.id_len);
468 		break;
469 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
470 		mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
471 					       &attr->u.brport_flags);
472 		break;
473 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
474 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
475 					       BR_MCAST_FLOOD;
476 		break;
477 	default:
478 		return -EOPNOTSUPP;
479 	}
480 
481 	return 0;
482 }
483 
484 static int
485 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
486 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
487 				  u8 state)
488 {
489 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
490 
491 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
492 			    bridge_vlan_node) {
493 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
494 			continue;
495 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
496 						 bridge_vlan->vid, state);
497 	}
498 
499 	return 0;
500 }
501 
502 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
503 					    struct switchdev_trans *trans,
504 					    struct net_device *orig_dev,
505 					    u8 state)
506 {
507 	struct mlxsw_sp_bridge_port *bridge_port;
508 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
509 	int err;
510 
511 	if (switchdev_trans_ph_prepare(trans))
512 		return 0;
513 
514 	/* It's possible we failed to enslave the port, yet this
515 	 * operation is executed due to it being deferred.
516 	 */
517 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
518 						orig_dev);
519 	if (!bridge_port)
520 		return 0;
521 
522 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
523 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
524 							bridge_vlan, state);
525 		if (err)
526 			goto err_port_bridge_vlan_stp_set;
527 	}
528 
529 	bridge_port->stp_state = state;
530 
531 	return 0;
532 
533 err_port_bridge_vlan_stp_set:
534 	list_for_each_entry_continue_reverse(bridge_vlan,
535 					     &bridge_port->vlans_list, list)
536 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
537 						  bridge_port->stp_state);
538 	return err;
539 }
540 
541 static int
542 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
543 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
544 				    enum mlxsw_sp_flood_type packet_type,
545 				    bool member)
546 {
547 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
548 
549 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
550 			    bridge_vlan_node) {
551 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
552 			continue;
553 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
554 					      packet_type,
555 					      mlxsw_sp_port->local_port,
556 					      member);
557 	}
558 
559 	return 0;
560 }
561 
562 static int
563 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
564 				     struct mlxsw_sp_bridge_port *bridge_port,
565 				     enum mlxsw_sp_flood_type packet_type,
566 				     bool member)
567 {
568 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
569 	int err;
570 
571 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
572 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
573 							  bridge_vlan,
574 							  packet_type,
575 							  member);
576 		if (err)
577 			goto err_port_bridge_vlan_flood_set;
578 	}
579 
580 	return 0;
581 
582 err_port_bridge_vlan_flood_set:
583 	list_for_each_entry_continue_reverse(bridge_vlan,
584 					     &bridge_port->vlans_list, list)
585 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
586 						    packet_type, !member);
587 	return err;
588 }
589 
590 static int
591 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
592 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
593 				       bool set)
594 {
595 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
596 	u16 vid = bridge_vlan->vid;
597 
598 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
599 			    bridge_vlan_node) {
600 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
601 			continue;
602 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
603 	}
604 
605 	return 0;
606 }
607 
608 static int
609 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
610 				  struct mlxsw_sp_bridge_port *bridge_port,
611 				  bool set)
612 {
613 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
614 	int err;
615 
616 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
617 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
618 							     bridge_vlan, set);
619 		if (err)
620 			goto err_port_bridge_vlan_learning_set;
621 	}
622 
623 	return 0;
624 
625 err_port_bridge_vlan_learning_set:
626 	list_for_each_entry_continue_reverse(bridge_vlan,
627 					     &bridge_port->vlans_list, list)
628 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
629 						       bridge_vlan, !set);
630 	return err;
631 }
632 
633 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
634 					   struct switchdev_trans *trans,
635 					   struct net_device *orig_dev,
636 					   unsigned long brport_flags)
637 {
638 	struct mlxsw_sp_bridge_port *bridge_port;
639 	int err;
640 
641 	if (switchdev_trans_ph_prepare(trans))
642 		return 0;
643 
644 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
645 						orig_dev);
646 	if (!bridge_port)
647 		return 0;
648 
649 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
650 						   MLXSW_SP_FLOOD_TYPE_UC,
651 						   brport_flags & BR_FLOOD);
652 	if (err)
653 		return err;
654 
655 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
656 						brport_flags & BR_LEARNING);
657 	if (err)
658 		return err;
659 
660 	if (bridge_port->bridge_device->multicast_enabled)
661 		goto out;
662 
663 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
664 						   MLXSW_SP_FLOOD_TYPE_MC,
665 						   brport_flags &
666 						   BR_MCAST_FLOOD);
667 	if (err)
668 		return err;
669 
670 out:
671 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
672 	return 0;
673 }
674 
675 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
676 {
677 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
678 	int err;
679 
680 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
681 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
682 	if (err)
683 		return err;
684 	mlxsw_sp->bridge->ageing_time = ageing_time;
685 	return 0;
686 }
687 
688 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
689 					    struct switchdev_trans *trans,
690 					    unsigned long ageing_clock_t)
691 {
692 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
693 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
694 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
695 
696 	if (switchdev_trans_ph_prepare(trans)) {
697 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
698 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
699 			return -ERANGE;
700 		else
701 			return 0;
702 	}
703 
704 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
705 }
706 
707 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
708 					  struct switchdev_trans *trans,
709 					  struct net_device *orig_dev,
710 					  bool vlan_enabled)
711 {
712 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
713 	struct mlxsw_sp_bridge_device *bridge_device;
714 
715 	if (!switchdev_trans_ph_prepare(trans))
716 		return 0;
717 
718 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
719 	if (WARN_ON(!bridge_device))
720 		return -EINVAL;
721 
722 	if (bridge_device->vlan_enabled == vlan_enabled)
723 		return 0;
724 
725 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
726 	return -EINVAL;
727 }
728 
729 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
730 					  struct switchdev_trans *trans,
731 					  struct net_device *orig_dev,
732 					  bool is_port_mrouter)
733 {
734 	struct mlxsw_sp_bridge_port *bridge_port;
735 	int err;
736 
737 	if (switchdev_trans_ph_prepare(trans))
738 		return 0;
739 
740 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
741 						orig_dev);
742 	if (!bridge_port)
743 		return 0;
744 
745 	if (!bridge_port->bridge_device->multicast_enabled)
746 		goto out;
747 
748 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
749 						   MLXSW_SP_FLOOD_TYPE_MC,
750 						   is_port_mrouter);
751 	if (err)
752 		return err;
753 
754 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
755 					 is_port_mrouter);
756 out:
757 	bridge_port->mrouter = is_port_mrouter;
758 	return 0;
759 }
760 
761 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
762 {
763 	const struct mlxsw_sp_bridge_device *bridge_device;
764 
765 	bridge_device = bridge_port->bridge_device;
766 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
767 					bridge_port->flags & BR_MCAST_FLOOD;
768 }
769 
770 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
771 					 struct switchdev_trans *trans,
772 					 struct net_device *orig_dev,
773 					 bool mc_disabled)
774 {
775 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
776 	struct mlxsw_sp_bridge_device *bridge_device;
777 	struct mlxsw_sp_bridge_port *bridge_port;
778 	int err;
779 
780 	if (switchdev_trans_ph_prepare(trans))
781 		return 0;
782 
783 	/* It's possible we failed to enslave the port, yet this
784 	 * operation is executed due to it being deferred.
785 	 */
786 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
787 	if (!bridge_device)
788 		return 0;
789 
790 	if (bridge_device->multicast_enabled != !mc_disabled) {
791 		bridge_device->multicast_enabled = !mc_disabled;
792 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
793 						   bridge_device);
794 	}
795 
796 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
797 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
798 		bool member = mlxsw_sp_mc_flood(bridge_port);
799 
800 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
801 							   bridge_port,
802 							   packet_type, member);
803 		if (err)
804 			return err;
805 	}
806 
807 	bridge_device->multicast_enabled = !mc_disabled;
808 
809 	return 0;
810 }
811 
812 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
813 					 u16 mid_idx, bool add)
814 {
815 	char *smid_pl;
816 	int err;
817 
818 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
819 	if (!smid_pl)
820 		return -ENOMEM;
821 
822 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
823 			    mlxsw_sp_router_port(mlxsw_sp), add);
824 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
825 	kfree(smid_pl);
826 	return err;
827 }
828 
829 static void
830 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
831 				   struct mlxsw_sp_bridge_device *bridge_device,
832 				   bool add)
833 {
834 	struct mlxsw_sp_mid *mid;
835 
836 	list_for_each_entry(mid, &bridge_device->mids_list, list)
837 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
838 }
839 
840 static int
841 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
842 				  struct switchdev_trans *trans,
843 				  struct net_device *orig_dev,
844 				  bool is_mrouter)
845 {
846 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
847 	struct mlxsw_sp_bridge_device *bridge_device;
848 
849 	if (switchdev_trans_ph_prepare(trans))
850 		return 0;
851 
852 	/* It's possible we failed to enslave the port, yet this
853 	 * operation is executed due to it being deferred.
854 	 */
855 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
856 	if (!bridge_device)
857 		return 0;
858 
859 	if (bridge_device->mrouter != is_mrouter)
860 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
861 						   is_mrouter);
862 	bridge_device->mrouter = is_mrouter;
863 	return 0;
864 }
865 
866 static int mlxsw_sp_port_attr_set(struct net_device *dev,
867 				  const struct switchdev_attr *attr,
868 				  struct switchdev_trans *trans)
869 {
870 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
871 	int err;
872 
873 	switch (attr->id) {
874 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
875 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
876 						       attr->orig_dev,
877 						       attr->u.stp_state);
878 		break;
879 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
880 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
881 						      attr->orig_dev,
882 						      attr->u.brport_flags);
883 		break;
884 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
885 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
886 						       attr->u.ageing_time);
887 		break;
888 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
889 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
890 						     attr->orig_dev,
891 						     attr->u.vlan_filtering);
892 		break;
893 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
894 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
895 						     attr->orig_dev,
896 						     attr->u.mrouter);
897 		break;
898 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
899 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
900 						    attr->orig_dev,
901 						    attr->u.mc_disabled);
902 		break;
903 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
904 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
905 							attr->orig_dev,
906 							attr->u.mrouter);
907 		break;
908 	default:
909 		err = -EOPNOTSUPP;
910 		break;
911 	}
912 
913 	if (switchdev_trans_ph_commit(trans))
914 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
915 
916 	return err;
917 }
918 
919 static int
920 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
921 			    struct mlxsw_sp_bridge_port *bridge_port)
922 {
923 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
924 	struct mlxsw_sp_bridge_device *bridge_device;
925 	u8 local_port = mlxsw_sp_port->local_port;
926 	u16 vid = mlxsw_sp_port_vlan->vid;
927 	struct mlxsw_sp_fid *fid;
928 	int err;
929 
930 	bridge_device = bridge_port->bridge_device;
931 	fid = bridge_device->ops->fid_get(bridge_device, vid);
932 	if (IS_ERR(fid))
933 		return PTR_ERR(fid);
934 
935 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
936 				     bridge_port->flags & BR_FLOOD);
937 	if (err)
938 		goto err_fid_uc_flood_set;
939 
940 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
941 				     mlxsw_sp_mc_flood(bridge_port));
942 	if (err)
943 		goto err_fid_mc_flood_set;
944 
945 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
946 				     true);
947 	if (err)
948 		goto err_fid_bc_flood_set;
949 
950 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
951 	if (err)
952 		goto err_fid_port_vid_map;
953 
954 	mlxsw_sp_port_vlan->fid = fid;
955 
956 	return 0;
957 
958 err_fid_port_vid_map:
959 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
960 err_fid_bc_flood_set:
961 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
962 err_fid_mc_flood_set:
963 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
964 err_fid_uc_flood_set:
965 	mlxsw_sp_fid_put(fid);
966 	return err;
967 }
968 
969 static void
970 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
971 {
972 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
973 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
974 	u8 local_port = mlxsw_sp_port->local_port;
975 	u16 vid = mlxsw_sp_port_vlan->vid;
976 
977 	mlxsw_sp_port_vlan->fid = NULL;
978 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
979 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
980 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
981 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
982 	mlxsw_sp_fid_put(fid);
983 }
984 
985 static u16
986 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
987 			     u16 vid, bool is_pvid)
988 {
989 	if (is_pvid)
990 		return vid;
991 	else if (mlxsw_sp_port->pvid == vid)
992 		return 0;	/* Dis-allow untagged packets */
993 	else
994 		return mlxsw_sp_port->pvid;
995 }
996 
997 static int
998 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
999 			       struct mlxsw_sp_bridge_port *bridge_port)
1000 {
1001 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1002 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1003 	u16 vid = mlxsw_sp_port_vlan->vid;
1004 	int err;
1005 
1006 	/* No need to continue if only VLAN flags were changed */
1007 	if (mlxsw_sp_port_vlan->bridge_port) {
1008 		mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1009 		return 0;
1010 	}
1011 
1012 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
1013 	if (err)
1014 		return err;
1015 
1016 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1017 					     bridge_port->flags & BR_LEARNING);
1018 	if (err)
1019 		goto err_port_vid_learning_set;
1020 
1021 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1022 					bridge_port->stp_state);
1023 	if (err)
1024 		goto err_port_vid_stp_set;
1025 
1026 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1027 	if (!bridge_vlan) {
1028 		err = -ENOMEM;
1029 		goto err_bridge_vlan_get;
1030 	}
1031 
1032 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1033 		 &bridge_vlan->port_vlan_list);
1034 
1035 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1036 				 bridge_port->dev);
1037 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1038 
1039 	return 0;
1040 
1041 err_bridge_vlan_get:
1042 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1043 err_port_vid_stp_set:
1044 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1045 err_port_vid_learning_set:
1046 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1047 	return err;
1048 }
1049 
1050 void
1051 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1052 {
1053 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1054 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1055 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1056 	struct mlxsw_sp_bridge_port *bridge_port;
1057 	u16 vid = mlxsw_sp_port_vlan->vid;
1058 	bool last_port, last_vlan;
1059 
1060 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1061 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1062 		return;
1063 
1064 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1065 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1066 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1067 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1068 
1069 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1070 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1071 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1072 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1073 	if (last_port)
1074 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1075 					       bridge_port,
1076 					       mlxsw_sp_fid_index(fid));
1077 	if (last_vlan)
1078 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1079 
1080 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1081 
1082 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1083 	mlxsw_sp_port_vlan->bridge_port = NULL;
1084 }
1085 
1086 static int
1087 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1088 			      struct mlxsw_sp_bridge_port *bridge_port,
1089 			      u16 vid, bool is_untagged, bool is_pvid)
1090 {
1091 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1092 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1093 	u16 old_pvid = mlxsw_sp_port->pvid;
1094 	int err;
1095 
1096 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1097 	if (IS_ERR(mlxsw_sp_port_vlan))
1098 		return PTR_ERR(mlxsw_sp_port_vlan);
1099 
1100 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1101 				     is_untagged);
1102 	if (err)
1103 		goto err_port_vlan_set;
1104 
1105 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1106 	if (err)
1107 		goto err_port_pvid_set;
1108 
1109 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1110 	if (err)
1111 		goto err_port_vlan_bridge_join;
1112 
1113 	return 0;
1114 
1115 err_port_vlan_bridge_join:
1116 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1117 err_port_pvid_set:
1118 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1119 err_port_vlan_set:
1120 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1121 	return err;
1122 }
1123 
1124 static int
1125 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1126 				const struct net_device *br_dev,
1127 				const struct switchdev_obj_port_vlan *vlan)
1128 {
1129 	struct mlxsw_sp_rif *rif;
1130 	struct mlxsw_sp_fid *fid;
1131 	u16 pvid;
1132 	u16 vid;
1133 
1134 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1135 	if (!rif)
1136 		return 0;
1137 	fid = mlxsw_sp_rif_fid(rif);
1138 	pvid = mlxsw_sp_fid_8021q_vid(fid);
1139 
1140 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1141 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1142 			if (vid != pvid) {
1143 				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1144 				return -EBUSY;
1145 			}
1146 		} else {
1147 			if (vid == pvid) {
1148 				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1149 				return -EBUSY;
1150 			}
1151 		}
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1158 				   const struct switchdev_obj_port_vlan *vlan,
1159 				   struct switchdev_trans *trans)
1160 {
1161 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1162 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1163 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1164 	struct net_device *orig_dev = vlan->obj.orig_dev;
1165 	struct mlxsw_sp_bridge_port *bridge_port;
1166 	u16 vid;
1167 
1168 	if (netif_is_bridge_master(orig_dev)) {
1169 		int err = 0;
1170 
1171 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1172 		    br_vlan_enabled(orig_dev) &&
1173 		    switchdev_trans_ph_prepare(trans))
1174 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1175 							      orig_dev, vlan);
1176 		if (!err)
1177 			err = -EOPNOTSUPP;
1178 		return err;
1179 	}
1180 
1181 	if (switchdev_trans_ph_prepare(trans))
1182 		return 0;
1183 
1184 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1185 	if (WARN_ON(!bridge_port))
1186 		return -EINVAL;
1187 
1188 	if (!bridge_port->bridge_device->vlan_enabled)
1189 		return 0;
1190 
1191 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1192 		int err;
1193 
1194 		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1195 						    vid, flag_untagged,
1196 						    flag_pvid);
1197 		if (err)
1198 			return err;
1199 	}
1200 
1201 	return 0;
1202 }
1203 
1204 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1205 {
1206 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1207 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1208 }
1209 
1210 static int
1211 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1212 			       struct mlxsw_sp_bridge_port *bridge_port,
1213 			       u16 fid_index)
1214 {
1215 	bool lagged = bridge_port->lagged;
1216 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1217 	u16 system_port;
1218 
1219 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1220 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1221 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1222 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1223 
1224 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1225 }
1226 
1227 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1228 {
1229 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1230 			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1231 }
1232 
1233 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1234 {
1235 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1236 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1237 }
1238 
1239 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1240 				     const char *mac, u16 fid, bool adding,
1241 				     enum mlxsw_reg_sfd_rec_action action,
1242 				     bool dynamic)
1243 {
1244 	char *sfd_pl;
1245 	u8 num_rec;
1246 	int err;
1247 
1248 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1249 	if (!sfd_pl)
1250 		return -ENOMEM;
1251 
1252 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1253 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1254 			      mac, fid, action, local_port);
1255 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1256 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1257 	if (err)
1258 		goto out;
1259 
1260 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1261 		err = -EBUSY;
1262 
1263 out:
1264 	kfree(sfd_pl);
1265 	return err;
1266 }
1267 
1268 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1269 				   const char *mac, u16 fid, bool adding,
1270 				   bool dynamic)
1271 {
1272 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1273 					 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1274 }
1275 
1276 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1277 			bool adding)
1278 {
1279 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1280 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1281 					 false);
1282 }
1283 
1284 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1285 				       const char *mac, u16 fid, u16 lag_vid,
1286 				       bool adding, bool dynamic)
1287 {
1288 	char *sfd_pl;
1289 	u8 num_rec;
1290 	int err;
1291 
1292 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1293 	if (!sfd_pl)
1294 		return -ENOMEM;
1295 
1296 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1297 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1298 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1299 				  lag_vid, lag_id);
1300 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1301 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1302 	if (err)
1303 		goto out;
1304 
1305 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1306 		err = -EBUSY;
1307 
1308 out:
1309 	kfree(sfd_pl);
1310 	return err;
1311 }
1312 
1313 static int
1314 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1315 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1316 {
1317 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1318 	struct net_device *orig_dev = fdb_info->info.dev;
1319 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1320 	struct mlxsw_sp_bridge_device *bridge_device;
1321 	struct mlxsw_sp_bridge_port *bridge_port;
1322 	u16 fid_index, vid;
1323 
1324 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1325 	if (!bridge_port)
1326 		return -EINVAL;
1327 
1328 	bridge_device = bridge_port->bridge_device;
1329 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1330 							       bridge_device,
1331 							       fdb_info->vid);
1332 	if (!mlxsw_sp_port_vlan)
1333 		return 0;
1334 
1335 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1336 	vid = mlxsw_sp_port_vlan->vid;
1337 
1338 	if (!bridge_port->lagged)
1339 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1340 					       bridge_port->system_port,
1341 					       fdb_info->addr, fid_index,
1342 					       adding, false);
1343 	else
1344 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1345 						   bridge_port->lag_id,
1346 						   fdb_info->addr, fid_index,
1347 						   vid, adding, false);
1348 }
1349 
1350 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1351 				u16 fid, u16 mid_idx, bool adding)
1352 {
1353 	char *sfd_pl;
1354 	u8 num_rec;
1355 	int err;
1356 
1357 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1358 	if (!sfd_pl)
1359 		return -ENOMEM;
1360 
1361 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1362 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1363 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1364 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1365 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1366 	if (err)
1367 		goto out;
1368 
1369 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1370 		err = -EBUSY;
1371 
1372 out:
1373 	kfree(sfd_pl);
1374 	return err;
1375 }
1376 
1377 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1378 					 long *ports_bitmap,
1379 					 bool set_router_port)
1380 {
1381 	char *smid_pl;
1382 	int err, i;
1383 
1384 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1385 	if (!smid_pl)
1386 		return -ENOMEM;
1387 
1388 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1389 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1390 		if (mlxsw_sp->ports[i])
1391 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1392 	}
1393 
1394 	mlxsw_reg_smid_port_mask_set(smid_pl,
1395 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1396 
1397 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1398 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1399 
1400 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1401 				set_router_port);
1402 
1403 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1404 	kfree(smid_pl);
1405 	return err;
1406 }
1407 
1408 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1409 				  u16 mid_idx, bool add)
1410 {
1411 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412 	char *smid_pl;
1413 	int err;
1414 
1415 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1416 	if (!smid_pl)
1417 		return -ENOMEM;
1418 
1419 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1420 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1421 	kfree(smid_pl);
1422 	return err;
1423 }
1424 
1425 static struct
1426 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1427 				const unsigned char *addr,
1428 				u16 fid)
1429 {
1430 	struct mlxsw_sp_mid *mid;
1431 
1432 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1433 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1434 			return mid;
1435 	}
1436 	return NULL;
1437 }
1438 
1439 static void
1440 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1441 				      struct mlxsw_sp_bridge_port *bridge_port,
1442 				      unsigned long *ports_bitmap)
1443 {
1444 	struct mlxsw_sp_port *mlxsw_sp_port;
1445 	u64 max_lag_members, i;
1446 	int lag_id;
1447 
1448 	if (!bridge_port->lagged) {
1449 		set_bit(bridge_port->system_port, ports_bitmap);
1450 	} else {
1451 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1452 						     MAX_LAG_MEMBERS);
1453 		lag_id = bridge_port->lag_id;
1454 		for (i = 0; i < max_lag_members; i++) {
1455 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1456 								 lag_id, i);
1457 			if (mlxsw_sp_port)
1458 				set_bit(mlxsw_sp_port->local_port,
1459 					ports_bitmap);
1460 		}
1461 	}
1462 }
1463 
1464 static void
1465 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1466 				struct mlxsw_sp_bridge_device *bridge_device,
1467 				struct mlxsw_sp *mlxsw_sp)
1468 {
1469 	struct mlxsw_sp_bridge_port *bridge_port;
1470 
1471 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1472 		if (bridge_port->mrouter) {
1473 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1474 							      bridge_port,
1475 							      flood_bitmap);
1476 		}
1477 	}
1478 }
1479 
1480 static bool
1481 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1482 			    struct mlxsw_sp_mid *mid,
1483 			    struct mlxsw_sp_bridge_device *bridge_device)
1484 {
1485 	long *flood_bitmap;
1486 	int num_of_ports;
1487 	int alloc_size;
1488 	u16 mid_idx;
1489 	int err;
1490 
1491 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1492 				      MLXSW_SP_MID_MAX);
1493 	if (mid_idx == MLXSW_SP_MID_MAX)
1494 		return false;
1495 
1496 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1497 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1498 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1499 	if (!flood_bitmap)
1500 		return false;
1501 
1502 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1503 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1504 
1505 	mid->mid = mid_idx;
1506 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1507 					    bridge_device->mrouter);
1508 	kfree(flood_bitmap);
1509 	if (err)
1510 		return false;
1511 
1512 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1513 				   true);
1514 	if (err)
1515 		return false;
1516 
1517 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1518 	mid->in_hw = true;
1519 	return true;
1520 }
1521 
1522 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1523 					struct mlxsw_sp_mid *mid)
1524 {
1525 	if (!mid->in_hw)
1526 		return 0;
1527 
1528 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1529 	mid->in_hw = false;
1530 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1531 				    false);
1532 }
1533 
1534 static struct
1535 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1536 				  struct mlxsw_sp_bridge_device *bridge_device,
1537 				  const unsigned char *addr,
1538 				  u16 fid)
1539 {
1540 	struct mlxsw_sp_mid *mid;
1541 	size_t alloc_size;
1542 
1543 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1544 	if (!mid)
1545 		return NULL;
1546 
1547 	alloc_size = sizeof(unsigned long) *
1548 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1549 
1550 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1551 	if (!mid->ports_in_mid)
1552 		goto err_ports_in_mid_alloc;
1553 
1554 	ether_addr_copy(mid->addr, addr);
1555 	mid->fid = fid;
1556 	mid->in_hw = false;
1557 
1558 	if (!bridge_device->multicast_enabled)
1559 		goto out;
1560 
1561 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1562 		goto err_write_mdb_entry;
1563 
1564 out:
1565 	list_add_tail(&mid->list, &bridge_device->mids_list);
1566 	return mid;
1567 
1568 err_write_mdb_entry:
1569 	kfree(mid->ports_in_mid);
1570 err_ports_in_mid_alloc:
1571 	kfree(mid);
1572 	return NULL;
1573 }
1574 
1575 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1576 					 struct mlxsw_sp_mid *mid)
1577 {
1578 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1579 	int err = 0;
1580 
1581 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1582 	if (bitmap_empty(mid->ports_in_mid,
1583 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1584 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1585 		list_del(&mid->list);
1586 		kfree(mid->ports_in_mid);
1587 		kfree(mid);
1588 	}
1589 	return err;
1590 }
1591 
1592 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1593 				 const struct switchdev_obj_port_mdb *mdb,
1594 				 struct switchdev_trans *trans)
1595 {
1596 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1597 	struct net_device *orig_dev = mdb->obj.orig_dev;
1598 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1599 	struct net_device *dev = mlxsw_sp_port->dev;
1600 	struct mlxsw_sp_bridge_device *bridge_device;
1601 	struct mlxsw_sp_bridge_port *bridge_port;
1602 	struct mlxsw_sp_mid *mid;
1603 	u16 fid_index;
1604 	int err = 0;
1605 
1606 	if (switchdev_trans_ph_prepare(trans))
1607 		return 0;
1608 
1609 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1610 	if (!bridge_port)
1611 		return 0;
1612 
1613 	bridge_device = bridge_port->bridge_device;
1614 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1615 							       bridge_device,
1616 							       mdb->vid);
1617 	if (!mlxsw_sp_port_vlan)
1618 		return 0;
1619 
1620 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1621 
1622 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1623 	if (!mid) {
1624 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1625 					  fid_index);
1626 		if (!mid) {
1627 			netdev_err(dev, "Unable to allocate MC group\n");
1628 			return -ENOMEM;
1629 		}
1630 	}
1631 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1632 
1633 	if (!bridge_device->multicast_enabled)
1634 		return 0;
1635 
1636 	if (bridge_port->mrouter)
1637 		return 0;
1638 
1639 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1640 	if (err) {
1641 		netdev_err(dev, "Unable to set SMID\n");
1642 		goto err_out;
1643 	}
1644 
1645 	return 0;
1646 
1647 err_out:
1648 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1649 	return err;
1650 }
1651 
1652 static void
1653 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1654 				   struct mlxsw_sp_bridge_device
1655 				   *bridge_device)
1656 {
1657 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1658 	struct mlxsw_sp_mid *mid;
1659 	bool mc_enabled;
1660 
1661 	mc_enabled = bridge_device->multicast_enabled;
1662 
1663 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1664 		if (mc_enabled)
1665 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1666 						    bridge_device);
1667 		else
1668 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1669 	}
1670 }
1671 
1672 static void
1673 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1674 				 struct mlxsw_sp_bridge_port *bridge_port,
1675 				 bool add)
1676 {
1677 	struct mlxsw_sp_bridge_device *bridge_device;
1678 	struct mlxsw_sp_mid *mid;
1679 
1680 	bridge_device = bridge_port->bridge_device;
1681 
1682 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1683 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1684 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1685 	}
1686 }
1687 
1688 struct mlxsw_sp_span_respin_work {
1689 	struct work_struct work;
1690 	struct mlxsw_sp *mlxsw_sp;
1691 };
1692 
1693 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1694 {
1695 	struct mlxsw_sp_span_respin_work *respin_work =
1696 		container_of(work, struct mlxsw_sp_span_respin_work, work);
1697 
1698 	rtnl_lock();
1699 	mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1700 	rtnl_unlock();
1701 	kfree(respin_work);
1702 }
1703 
1704 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1705 {
1706 	struct mlxsw_sp_span_respin_work *respin_work;
1707 
1708 	respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1709 	if (!respin_work)
1710 		return;
1711 
1712 	INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1713 	respin_work->mlxsw_sp = mlxsw_sp;
1714 
1715 	mlxsw_core_schedule_work(&respin_work->work);
1716 }
1717 
1718 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1719 				 const struct switchdev_obj *obj,
1720 				 struct switchdev_trans *trans)
1721 {
1722 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1723 	const struct switchdev_obj_port_vlan *vlan;
1724 	int err = 0;
1725 
1726 	switch (obj->id) {
1727 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1728 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1729 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1730 
1731 		if (switchdev_trans_ph_prepare(trans)) {
1732 			/* The event is emitted before the changes are actually
1733 			 * applied to the bridge. Therefore schedule the respin
1734 			 * call for later, so that the respin logic sees the
1735 			 * updated bridge state.
1736 			 */
1737 			mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1738 		}
1739 		break;
1740 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1741 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1742 					    SWITCHDEV_OBJ_PORT_MDB(obj),
1743 					    trans);
1744 		break;
1745 	default:
1746 		err = -EOPNOTSUPP;
1747 		break;
1748 	}
1749 
1750 	return err;
1751 }
1752 
1753 static void
1754 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1755 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1756 {
1757 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1758 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1759 
1760 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1761 	if (WARN_ON(!mlxsw_sp_port_vlan))
1762 		return;
1763 
1764 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1765 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1766 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1767 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1768 }
1769 
1770 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1771 				   const struct switchdev_obj_port_vlan *vlan)
1772 {
1773 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1774 	struct net_device *orig_dev = vlan->obj.orig_dev;
1775 	struct mlxsw_sp_bridge_port *bridge_port;
1776 	u16 vid;
1777 
1778 	if (netif_is_bridge_master(orig_dev))
1779 		return -EOPNOTSUPP;
1780 
1781 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1782 	if (WARN_ON(!bridge_port))
1783 		return -EINVAL;
1784 
1785 	if (!bridge_port->bridge_device->vlan_enabled)
1786 		return 0;
1787 
1788 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1789 		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1790 
1791 	return 0;
1792 }
1793 
1794 static int
1795 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1796 			struct mlxsw_sp_bridge_port *bridge_port,
1797 			struct mlxsw_sp_mid *mid)
1798 {
1799 	struct net_device *dev = mlxsw_sp_port->dev;
1800 	int err;
1801 
1802 	if (bridge_port->bridge_device->multicast_enabled &&
1803 	    !bridge_port->mrouter) {
1804 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1805 		if (err)
1806 			netdev_err(dev, "Unable to remove port from SMID\n");
1807 	}
1808 
1809 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1810 	if (err)
1811 		netdev_err(dev, "Unable to remove MC SFD\n");
1812 
1813 	return err;
1814 }
1815 
1816 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1817 				 const struct switchdev_obj_port_mdb *mdb)
1818 {
1819 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1820 	struct net_device *orig_dev = mdb->obj.orig_dev;
1821 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1822 	struct mlxsw_sp_bridge_device *bridge_device;
1823 	struct net_device *dev = mlxsw_sp_port->dev;
1824 	struct mlxsw_sp_bridge_port *bridge_port;
1825 	struct mlxsw_sp_mid *mid;
1826 	u16 fid_index;
1827 
1828 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1829 	if (!bridge_port)
1830 		return 0;
1831 
1832 	bridge_device = bridge_port->bridge_device;
1833 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1834 							       bridge_device,
1835 							       mdb->vid);
1836 	if (!mlxsw_sp_port_vlan)
1837 		return 0;
1838 
1839 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1840 
1841 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1842 	if (!mid) {
1843 		netdev_err(dev, "Unable to remove port from MC DB\n");
1844 		return -EINVAL;
1845 	}
1846 
1847 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1848 }
1849 
1850 static void
1851 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1852 			       struct mlxsw_sp_bridge_port *bridge_port)
1853 {
1854 	struct mlxsw_sp_bridge_device *bridge_device;
1855 	struct mlxsw_sp_mid *mid, *tmp;
1856 
1857 	bridge_device = bridge_port->bridge_device;
1858 
1859 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1860 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1861 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1862 						mid);
1863 		} else if (bridge_device->multicast_enabled &&
1864 			   bridge_port->mrouter) {
1865 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1866 		}
1867 	}
1868 }
1869 
1870 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1871 				 const struct switchdev_obj *obj)
1872 {
1873 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1874 	int err = 0;
1875 
1876 	switch (obj->id) {
1877 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1878 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1879 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1880 		break;
1881 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1882 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1883 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1884 		break;
1885 	default:
1886 		err = -EOPNOTSUPP;
1887 		break;
1888 	}
1889 
1890 	mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1891 
1892 	return err;
1893 }
1894 
1895 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1896 						   u16 lag_id)
1897 {
1898 	struct mlxsw_sp_port *mlxsw_sp_port;
1899 	u64 max_lag_members;
1900 	int i;
1901 
1902 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1903 					     MAX_LAG_MEMBERS);
1904 	for (i = 0; i < max_lag_members; i++) {
1905 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1906 		if (mlxsw_sp_port)
1907 			return mlxsw_sp_port;
1908 	}
1909 	return NULL;
1910 }
1911 
1912 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1913 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1914 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1915 	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
1916 	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
1917 };
1918 
1919 static int
1920 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1921 				struct mlxsw_sp_bridge_port *bridge_port,
1922 				struct mlxsw_sp_port *mlxsw_sp_port,
1923 				struct netlink_ext_ack *extack)
1924 {
1925 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1926 
1927 	if (is_vlan_dev(bridge_port->dev)) {
1928 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1929 		return -EINVAL;
1930 	}
1931 
1932 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1933 	if (WARN_ON(!mlxsw_sp_port_vlan))
1934 		return -EINVAL;
1935 
1936 	/* Let VLAN-aware bridge take care of its own VLANs */
1937 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1938 
1939 	return 0;
1940 }
1941 
1942 static void
1943 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1944 				 struct mlxsw_sp_bridge_port *bridge_port,
1945 				 struct mlxsw_sp_port *mlxsw_sp_port)
1946 {
1947 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
1948 	/* Make sure untagged frames are allowed to ingress */
1949 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
1950 }
1951 
1952 static struct mlxsw_sp_fid *
1953 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
1954 			      u16 vid)
1955 {
1956 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1957 
1958 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
1959 }
1960 
1961 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
1962 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
1963 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
1964 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
1965 };
1966 
1967 static bool
1968 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
1969 			   const struct net_device *br_dev)
1970 {
1971 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1972 
1973 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
1974 			    list) {
1975 		if (mlxsw_sp_port_vlan->bridge_port &&
1976 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
1977 		    br_dev)
1978 			return true;
1979 	}
1980 
1981 	return false;
1982 }
1983 
1984 static int
1985 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1986 				struct mlxsw_sp_bridge_port *bridge_port,
1987 				struct mlxsw_sp_port *mlxsw_sp_port,
1988 				struct netlink_ext_ack *extack)
1989 {
1990 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1991 	struct net_device *dev = bridge_port->dev;
1992 	u16 vid;
1993 
1994 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
1995 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1996 	if (WARN_ON(!mlxsw_sp_port_vlan))
1997 		return -EINVAL;
1998 
1999 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2000 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2001 		return -EINVAL;
2002 	}
2003 
2004 	/* Port is no longer usable as a router interface */
2005 	if (mlxsw_sp_port_vlan->fid)
2006 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2007 
2008 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
2009 }
2010 
2011 static void
2012 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2013 				 struct mlxsw_sp_bridge_port *bridge_port,
2014 				 struct mlxsw_sp_port *mlxsw_sp_port)
2015 {
2016 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2017 	struct net_device *dev = bridge_port->dev;
2018 	u16 vid;
2019 
2020 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2021 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2022 	if (WARN_ON(!mlxsw_sp_port_vlan))
2023 		return;
2024 
2025 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2026 }
2027 
2028 static struct mlxsw_sp_fid *
2029 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2030 			      u16 vid)
2031 {
2032 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2033 
2034 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2035 }
2036 
2037 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2038 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2039 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2040 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2041 };
2042 
2043 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2044 			      struct net_device *brport_dev,
2045 			      struct net_device *br_dev,
2046 			      struct netlink_ext_ack *extack)
2047 {
2048 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2049 	struct mlxsw_sp_bridge_device *bridge_device;
2050 	struct mlxsw_sp_bridge_port *bridge_port;
2051 	int err;
2052 
2053 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2054 	if (IS_ERR(bridge_port))
2055 		return PTR_ERR(bridge_port);
2056 	bridge_device = bridge_port->bridge_device;
2057 
2058 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2059 					    mlxsw_sp_port, extack);
2060 	if (err)
2061 		goto err_port_join;
2062 
2063 	return 0;
2064 
2065 err_port_join:
2066 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2067 	return err;
2068 }
2069 
2070 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2071 				struct net_device *brport_dev,
2072 				struct net_device *br_dev)
2073 {
2074 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2075 	struct mlxsw_sp_bridge_device *bridge_device;
2076 	struct mlxsw_sp_bridge_port *bridge_port;
2077 
2078 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2079 	if (!bridge_device)
2080 		return;
2081 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2082 	if (!bridge_port)
2083 		return;
2084 
2085 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2086 				       mlxsw_sp_port);
2087 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2088 }
2089 
2090 static void
2091 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2092 			    const char *mac, u16 vid,
2093 			    struct net_device *dev)
2094 {
2095 	struct switchdev_notifier_fdb_info info;
2096 
2097 	info.addr = mac;
2098 	info.vid = vid;
2099 	call_switchdev_notifiers(type, dev, &info.info);
2100 }
2101 
2102 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2103 					    char *sfn_pl, int rec_index,
2104 					    bool adding)
2105 {
2106 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2107 	struct mlxsw_sp_bridge_device *bridge_device;
2108 	struct mlxsw_sp_bridge_port *bridge_port;
2109 	struct mlxsw_sp_port *mlxsw_sp_port;
2110 	enum switchdev_notifier_type type;
2111 	char mac[ETH_ALEN];
2112 	u8 local_port;
2113 	u16 vid, fid;
2114 	bool do_notification = true;
2115 	int err;
2116 
2117 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2118 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2119 	if (!mlxsw_sp_port) {
2120 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2121 		goto just_remove;
2122 	}
2123 
2124 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2125 	if (!mlxsw_sp_port_vlan) {
2126 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2127 		goto just_remove;
2128 	}
2129 
2130 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2131 	if (!bridge_port) {
2132 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2133 		goto just_remove;
2134 	}
2135 
2136 	bridge_device = bridge_port->bridge_device;
2137 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2138 
2139 do_fdb_op:
2140 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2141 				      adding, true);
2142 	if (err) {
2143 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2144 		return;
2145 	}
2146 
2147 	if (!do_notification)
2148 		return;
2149 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2150 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
2151 
2152 	return;
2153 
2154 just_remove:
2155 	adding = false;
2156 	do_notification = false;
2157 	goto do_fdb_op;
2158 }
2159 
2160 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2161 						char *sfn_pl, int rec_index,
2162 						bool adding)
2163 {
2164 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2165 	struct mlxsw_sp_bridge_device *bridge_device;
2166 	struct mlxsw_sp_bridge_port *bridge_port;
2167 	struct mlxsw_sp_port *mlxsw_sp_port;
2168 	enum switchdev_notifier_type type;
2169 	char mac[ETH_ALEN];
2170 	u16 lag_vid = 0;
2171 	u16 lag_id;
2172 	u16 vid, fid;
2173 	bool do_notification = true;
2174 	int err;
2175 
2176 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2177 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2178 	if (!mlxsw_sp_port) {
2179 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2180 		goto just_remove;
2181 	}
2182 
2183 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2184 	if (!mlxsw_sp_port_vlan) {
2185 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2186 		goto just_remove;
2187 	}
2188 
2189 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2190 	if (!bridge_port) {
2191 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2192 		goto just_remove;
2193 	}
2194 
2195 	bridge_device = bridge_port->bridge_device;
2196 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2197 	lag_vid = mlxsw_sp_port_vlan->vid;
2198 
2199 do_fdb_op:
2200 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2201 					  adding, true);
2202 	if (err) {
2203 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2204 		return;
2205 	}
2206 
2207 	if (!do_notification)
2208 		return;
2209 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2210 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
2211 
2212 	return;
2213 
2214 just_remove:
2215 	adding = false;
2216 	do_notification = false;
2217 	goto do_fdb_op;
2218 }
2219 
2220 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2221 					    char *sfn_pl, int rec_index)
2222 {
2223 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2224 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2225 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2226 						rec_index, true);
2227 		break;
2228 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2229 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2230 						rec_index, false);
2231 		break;
2232 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2233 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2234 						    rec_index, true);
2235 		break;
2236 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2237 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2238 						    rec_index, false);
2239 		break;
2240 	}
2241 }
2242 
2243 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2244 {
2245 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2246 
2247 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2248 			       msecs_to_jiffies(bridge->fdb_notify.interval));
2249 }
2250 
2251 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2252 {
2253 	struct mlxsw_sp_bridge *bridge;
2254 	struct mlxsw_sp *mlxsw_sp;
2255 	char *sfn_pl;
2256 	u8 num_rec;
2257 	int i;
2258 	int err;
2259 
2260 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2261 	if (!sfn_pl)
2262 		return;
2263 
2264 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2265 	mlxsw_sp = bridge->mlxsw_sp;
2266 
2267 	rtnl_lock();
2268 	mlxsw_reg_sfn_pack(sfn_pl);
2269 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2270 	if (err) {
2271 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2272 		goto out;
2273 	}
2274 	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2275 	for (i = 0; i < num_rec; i++)
2276 		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2277 
2278 out:
2279 	rtnl_unlock();
2280 	kfree(sfn_pl);
2281 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2282 }
2283 
2284 struct mlxsw_sp_switchdev_event_work {
2285 	struct work_struct work;
2286 	struct switchdev_notifier_fdb_info fdb_info;
2287 	struct net_device *dev;
2288 	unsigned long event;
2289 };
2290 
2291 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2292 {
2293 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2294 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2295 	struct net_device *dev = switchdev_work->dev;
2296 	struct switchdev_notifier_fdb_info *fdb_info;
2297 	struct mlxsw_sp_port *mlxsw_sp_port;
2298 	int err;
2299 
2300 	rtnl_lock();
2301 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2302 	if (!mlxsw_sp_port)
2303 		goto out;
2304 
2305 	switch (switchdev_work->event) {
2306 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2307 		fdb_info = &switchdev_work->fdb_info;
2308 		if (!fdb_info->added_by_user)
2309 			break;
2310 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2311 		if (err)
2312 			break;
2313 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2314 					    fdb_info->addr,
2315 					    fdb_info->vid, dev);
2316 		break;
2317 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2318 		fdb_info = &switchdev_work->fdb_info;
2319 		if (!fdb_info->added_by_user)
2320 			break;
2321 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2322 		break;
2323 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2324 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2325 		/* These events are only used to potentially update an existing
2326 		 * SPAN mirror.
2327 		 */
2328 		break;
2329 	}
2330 
2331 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2332 
2333 out:
2334 	rtnl_unlock();
2335 	kfree(switchdev_work->fdb_info.addr);
2336 	kfree(switchdev_work);
2337 	dev_put(dev);
2338 }
2339 
2340 /* Called under rcu_read_lock() */
2341 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
2342 				    unsigned long event, void *ptr)
2343 {
2344 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2345 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
2346 	struct switchdev_notifier_fdb_info *fdb_info;
2347 	struct switchdev_notifier_info *info = ptr;
2348 	struct net_device *br_dev;
2349 
2350 	/* Tunnel devices are not our uppers, so check their master instead */
2351 	br_dev = netdev_master_upper_dev_get_rcu(dev);
2352 	if (!br_dev)
2353 		return NOTIFY_DONE;
2354 	if (!netif_is_bridge_master(br_dev))
2355 		return NOTIFY_DONE;
2356 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
2357 		return NOTIFY_DONE;
2358 
2359 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2360 	if (!switchdev_work)
2361 		return NOTIFY_BAD;
2362 
2363 	switchdev_work->dev = dev;
2364 	switchdev_work->event = event;
2365 
2366 	switch (event) {
2367 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
2368 	case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
2369 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2370 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2371 		fdb_info = container_of(info,
2372 					struct switchdev_notifier_fdb_info,
2373 					info);
2374 		INIT_WORK(&switchdev_work->work,
2375 			  mlxsw_sp_switchdev_bridge_fdb_event_work);
2376 		memcpy(&switchdev_work->fdb_info, ptr,
2377 		       sizeof(switchdev_work->fdb_info));
2378 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2379 		if (!switchdev_work->fdb_info.addr)
2380 			goto err_addr_alloc;
2381 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2382 				fdb_info->addr);
2383 		/* Take a reference on the device. This can be either
2384 		 * upper device containig mlxsw_sp_port or just a
2385 		 * mlxsw_sp_port
2386 		 */
2387 		dev_hold(dev);
2388 		break;
2389 	default:
2390 		kfree(switchdev_work);
2391 		return NOTIFY_DONE;
2392 	}
2393 
2394 	mlxsw_core_schedule_work(&switchdev_work->work);
2395 
2396 	return NOTIFY_DONE;
2397 
2398 err_addr_alloc:
2399 	kfree(switchdev_work);
2400 	return NOTIFY_BAD;
2401 }
2402 
2403 static struct notifier_block mlxsw_sp_switchdev_notifier = {
2404 	.notifier_call = mlxsw_sp_switchdev_event,
2405 };
2406 
2407 u8
2408 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
2409 {
2410 	return bridge_port->stp_state;
2411 }
2412 
2413 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
2414 {
2415 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2416 	int err;
2417 
2418 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
2419 	if (err) {
2420 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
2421 		return err;
2422 	}
2423 
2424 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2425 	if (err) {
2426 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
2427 		return err;
2428 	}
2429 
2430 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
2431 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
2432 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2433 	return 0;
2434 }
2435 
2436 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
2437 {
2438 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
2439 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2440 
2441 }
2442 
2443 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
2444 {
2445 	struct mlxsw_sp_bridge *bridge;
2446 
2447 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
2448 	if (!bridge)
2449 		return -ENOMEM;
2450 	mlxsw_sp->bridge = bridge;
2451 	bridge->mlxsw_sp = mlxsw_sp;
2452 
2453 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
2454 
2455 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
2456 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
2457 
2458 	return mlxsw_sp_fdb_init(mlxsw_sp);
2459 }
2460 
2461 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
2462 {
2463 	mlxsw_sp_fdb_fini(mlxsw_sp);
2464 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
2465 	kfree(mlxsw_sp->bridge);
2466 }
2467 
2468 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
2469 {
2470 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
2471 }
2472 
2473 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2474 {
2475 }
2476