xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c (revision 28efb0046512e8a13ed9f9bdf0d68d10bbfbe9cf)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50 
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54 
55 struct mlxsw_sp_bridge_ops;
56 
57 struct mlxsw_sp_bridge {
58 	struct mlxsw_sp *mlxsw_sp;
59 	struct {
60 		struct delayed_work dw;
61 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
62 		unsigned int interval; /* ms */
63 	} fdb_notify;
64 #define MLXSW_SP_MIN_AGEING_TIME 10
65 #define MLXSW_SP_MAX_AGEING_TIME 1000000
66 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
67 	u32 ageing_time;
68 	bool vlan_enabled_exists;
69 	struct list_head bridges_list;
70 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
71 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
72 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
73 };
74 
75 struct mlxsw_sp_bridge_device {
76 	struct net_device *dev;
77 	struct list_head list;
78 	struct list_head ports_list;
79 	struct list_head mids_list;
80 	u8 vlan_enabled:1,
81 	   multicast_enabled:1;
82 	const struct mlxsw_sp_bridge_ops *ops;
83 };
84 
85 struct mlxsw_sp_bridge_port {
86 	struct net_device *dev;
87 	struct mlxsw_sp_bridge_device *bridge_device;
88 	struct list_head list;
89 	struct list_head vlans_list;
90 	unsigned int ref_count;
91 	u8 stp_state;
92 	unsigned long flags;
93 	bool mrouter;
94 	bool lagged;
95 	union {
96 		u16 lag_id;
97 		u16 system_port;
98 	};
99 };
100 
101 struct mlxsw_sp_bridge_vlan {
102 	struct list_head list;
103 	struct list_head port_vlan_list;
104 	u16 vid;
105 };
106 
107 struct mlxsw_sp_bridge_ops {
108 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
109 			 struct mlxsw_sp_bridge_port *bridge_port,
110 			 struct mlxsw_sp_port *mlxsw_sp_port);
111 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
112 			   struct mlxsw_sp_bridge_port *bridge_port,
113 			   struct mlxsw_sp_port *mlxsw_sp_port);
114 	struct mlxsw_sp_fid *
115 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
116 			   u16 vid);
117 };
118 
119 static int
120 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
121 			       struct mlxsw_sp_bridge_port *bridge_port,
122 			       u16 fid_index);
123 
124 static void
125 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
126 			       struct mlxsw_sp_bridge_port *bridge_port);
127 
128 static void
129 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
130 				   struct mlxsw_sp_bridge_device
131 				   *bridge_device);
132 
133 static void
134 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
135 				 struct mlxsw_sp_bridge_port *bridge_port,
136 				 bool add);
137 
138 static struct mlxsw_sp_bridge_device *
139 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
140 			    const struct net_device *br_dev)
141 {
142 	struct mlxsw_sp_bridge_device *bridge_device;
143 
144 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
145 		if (bridge_device->dev == br_dev)
146 			return bridge_device;
147 
148 	return NULL;
149 }
150 
151 static struct mlxsw_sp_bridge_device *
152 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
153 			      struct net_device *br_dev)
154 {
155 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
156 	struct mlxsw_sp_bridge_device *bridge_device;
157 	bool vlan_enabled = br_vlan_enabled(br_dev);
158 
159 	if (vlan_enabled && bridge->vlan_enabled_exists) {
160 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
161 		return ERR_PTR(-EINVAL);
162 	}
163 
164 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
165 	if (!bridge_device)
166 		return ERR_PTR(-ENOMEM);
167 
168 	bridge_device->dev = br_dev;
169 	bridge_device->vlan_enabled = vlan_enabled;
170 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
171 	INIT_LIST_HEAD(&bridge_device->ports_list);
172 	if (vlan_enabled) {
173 		bridge->vlan_enabled_exists = true;
174 		bridge_device->ops = bridge->bridge_8021q_ops;
175 	} else {
176 		bridge_device->ops = bridge->bridge_8021d_ops;
177 	}
178 	INIT_LIST_HEAD(&bridge_device->mids_list);
179 	list_add(&bridge_device->list, &bridge->bridges_list);
180 
181 	return bridge_device;
182 }
183 
184 static void
185 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
186 			       struct mlxsw_sp_bridge_device *bridge_device)
187 {
188 	list_del(&bridge_device->list);
189 	if (bridge_device->vlan_enabled)
190 		bridge->vlan_enabled_exists = false;
191 	WARN_ON(!list_empty(&bridge_device->ports_list));
192 	WARN_ON(!list_empty(&bridge_device->mids_list));
193 	kfree(bridge_device);
194 }
195 
196 static struct mlxsw_sp_bridge_device *
197 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
198 			   struct net_device *br_dev)
199 {
200 	struct mlxsw_sp_bridge_device *bridge_device;
201 
202 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
203 	if (bridge_device)
204 		return bridge_device;
205 
206 	return mlxsw_sp_bridge_device_create(bridge, br_dev);
207 }
208 
209 static void
210 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
211 			   struct mlxsw_sp_bridge_device *bridge_device)
212 {
213 	if (list_empty(&bridge_device->ports_list))
214 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
215 }
216 
217 static struct mlxsw_sp_bridge_port *
218 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
219 			    const struct net_device *brport_dev)
220 {
221 	struct mlxsw_sp_bridge_port *bridge_port;
222 
223 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
224 		if (bridge_port->dev == brport_dev)
225 			return bridge_port;
226 	}
227 
228 	return NULL;
229 }
230 
231 static struct mlxsw_sp_bridge_port *
232 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
233 			  struct net_device *brport_dev)
234 {
235 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
236 	struct mlxsw_sp_bridge_device *bridge_device;
237 
238 	if (!br_dev)
239 		return NULL;
240 
241 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
242 	if (!bridge_device)
243 		return NULL;
244 
245 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
246 }
247 
248 static struct mlxsw_sp_bridge_port *
249 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
250 			    struct net_device *brport_dev)
251 {
252 	struct mlxsw_sp_bridge_port *bridge_port;
253 	struct mlxsw_sp_port *mlxsw_sp_port;
254 
255 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
256 	if (!bridge_port)
257 		return NULL;
258 
259 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
260 	bridge_port->lagged = mlxsw_sp_port->lagged;
261 	if (bridge_port->lagged)
262 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
263 	else
264 		bridge_port->system_port = mlxsw_sp_port->local_port;
265 	bridge_port->dev = brport_dev;
266 	bridge_port->bridge_device = bridge_device;
267 	bridge_port->stp_state = BR_STATE_DISABLED;
268 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
269 			     BR_MCAST_FLOOD;
270 	INIT_LIST_HEAD(&bridge_port->vlans_list);
271 	list_add(&bridge_port->list, &bridge_device->ports_list);
272 	bridge_port->ref_count = 1;
273 
274 	return bridge_port;
275 }
276 
277 static void
278 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
279 {
280 	list_del(&bridge_port->list);
281 	WARN_ON(!list_empty(&bridge_port->vlans_list));
282 	kfree(bridge_port);
283 }
284 
285 static bool
286 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
287 				    bridge_port)
288 {
289 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
290 
291 	/* In case ports were pulled from out of a bridged LAG, then
292 	 * it's possible the reference count isn't zero, yet the bridge
293 	 * port should be destroyed, as it's no longer an upper of ours.
294 	 */
295 	if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
296 		return true;
297 	else if (bridge_port->ref_count == 0)
298 		return true;
299 	else
300 		return false;
301 }
302 
303 static struct mlxsw_sp_bridge_port *
304 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
305 			 struct net_device *brport_dev)
306 {
307 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
308 	struct mlxsw_sp_bridge_device *bridge_device;
309 	struct mlxsw_sp_bridge_port *bridge_port;
310 	int err;
311 
312 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
313 	if (bridge_port) {
314 		bridge_port->ref_count++;
315 		return bridge_port;
316 	}
317 
318 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
319 	if (IS_ERR(bridge_device))
320 		return ERR_CAST(bridge_device);
321 
322 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
323 	if (!bridge_port) {
324 		err = -ENOMEM;
325 		goto err_bridge_port_create;
326 	}
327 
328 	return bridge_port;
329 
330 err_bridge_port_create:
331 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
332 	return ERR_PTR(err);
333 }
334 
335 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
336 				     struct mlxsw_sp_bridge_port *bridge_port)
337 {
338 	struct mlxsw_sp_bridge_device *bridge_device;
339 
340 	bridge_port->ref_count--;
341 	if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
342 		return;
343 	bridge_device = bridge_port->bridge_device;
344 	mlxsw_sp_bridge_port_destroy(bridge_port);
345 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
346 }
347 
348 static struct mlxsw_sp_port_vlan *
349 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
350 				  const struct mlxsw_sp_bridge_device *
351 				  bridge_device,
352 				  u16 vid)
353 {
354 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
355 
356 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
357 			    list) {
358 		if (!mlxsw_sp_port_vlan->bridge_port)
359 			continue;
360 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
361 		    bridge_device)
362 			continue;
363 		if (bridge_device->vlan_enabled &&
364 		    mlxsw_sp_port_vlan->vid != vid)
365 			continue;
366 		return mlxsw_sp_port_vlan;
367 	}
368 
369 	return NULL;
370 }
371 
372 static struct mlxsw_sp_port_vlan*
373 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
374 			       u16 fid_index)
375 {
376 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
377 
378 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
379 			    list) {
380 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
381 
382 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
383 			return mlxsw_sp_port_vlan;
384 	}
385 
386 	return NULL;
387 }
388 
389 static struct mlxsw_sp_bridge_vlan *
390 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
391 			  u16 vid)
392 {
393 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
394 
395 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
396 		if (bridge_vlan->vid == vid)
397 			return bridge_vlan;
398 	}
399 
400 	return NULL;
401 }
402 
403 static struct mlxsw_sp_bridge_vlan *
404 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
405 {
406 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
407 
408 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
409 	if (!bridge_vlan)
410 		return NULL;
411 
412 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
413 	bridge_vlan->vid = vid;
414 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
415 
416 	return bridge_vlan;
417 }
418 
419 static void
420 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
421 {
422 	list_del(&bridge_vlan->list);
423 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
424 	kfree(bridge_vlan);
425 }
426 
427 static struct mlxsw_sp_bridge_vlan *
428 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
429 {
430 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
431 
432 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
433 	if (bridge_vlan)
434 		return bridge_vlan;
435 
436 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
437 }
438 
439 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
440 {
441 	if (list_empty(&bridge_vlan->port_vlan_list))
442 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
443 }
444 
445 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
446 					   struct net_device *dev,
447 					   unsigned long *brport_flags)
448 {
449 	struct mlxsw_sp_bridge_port *bridge_port;
450 
451 	bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
452 	if (WARN_ON(!bridge_port))
453 		return;
454 
455 	memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
456 }
457 
458 static int mlxsw_sp_port_attr_get(struct net_device *dev,
459 				  struct switchdev_attr *attr)
460 {
461 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
462 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
463 
464 	switch (attr->id) {
465 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
466 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
467 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
468 		       attr->u.ppid.id_len);
469 		break;
470 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
471 		mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
472 					       &attr->u.brport_flags);
473 		break;
474 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
475 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
476 					       BR_MCAST_FLOOD;
477 		break;
478 	default:
479 		return -EOPNOTSUPP;
480 	}
481 
482 	return 0;
483 }
484 
485 static int
486 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
487 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
488 				  u8 state)
489 {
490 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
491 
492 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
493 			    bridge_vlan_node) {
494 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
495 			continue;
496 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
497 						 bridge_vlan->vid, state);
498 	}
499 
500 	return 0;
501 }
502 
503 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
504 					    struct switchdev_trans *trans,
505 					    struct net_device *orig_dev,
506 					    u8 state)
507 {
508 	struct mlxsw_sp_bridge_port *bridge_port;
509 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
510 	int err;
511 
512 	if (switchdev_trans_ph_prepare(trans))
513 		return 0;
514 
515 	/* It's possible we failed to enslave the port, yet this
516 	 * operation is executed due to it being deferred.
517 	 */
518 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
519 						orig_dev);
520 	if (!bridge_port)
521 		return 0;
522 
523 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
524 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
525 							bridge_vlan, state);
526 		if (err)
527 			goto err_port_bridge_vlan_stp_set;
528 	}
529 
530 	bridge_port->stp_state = state;
531 
532 	return 0;
533 
534 err_port_bridge_vlan_stp_set:
535 	list_for_each_entry_continue_reverse(bridge_vlan,
536 					     &bridge_port->vlans_list, list)
537 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
538 						  bridge_port->stp_state);
539 	return err;
540 }
541 
542 static int
543 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
544 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
545 				    enum mlxsw_sp_flood_type packet_type,
546 				    bool member)
547 {
548 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
549 
550 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
551 			    bridge_vlan_node) {
552 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
553 			continue;
554 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
555 					      packet_type,
556 					      mlxsw_sp_port->local_port,
557 					      member);
558 	}
559 
560 	return 0;
561 }
562 
563 static int
564 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
565 				     struct mlxsw_sp_bridge_port *bridge_port,
566 				     enum mlxsw_sp_flood_type packet_type,
567 				     bool member)
568 {
569 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
570 	int err;
571 
572 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
573 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
574 							  bridge_vlan,
575 							  packet_type,
576 							  member);
577 		if (err)
578 			goto err_port_bridge_vlan_flood_set;
579 	}
580 
581 	return 0;
582 
583 err_port_bridge_vlan_flood_set:
584 	list_for_each_entry_continue_reverse(bridge_vlan,
585 					     &bridge_port->vlans_list, list)
586 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
587 						    packet_type, !member);
588 	return err;
589 }
590 
591 static int
592 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
593 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
594 				       bool set)
595 {
596 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
597 	u16 vid = bridge_vlan->vid;
598 
599 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
600 			    bridge_vlan_node) {
601 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
602 			continue;
603 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
604 	}
605 
606 	return 0;
607 }
608 
609 static int
610 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
611 				  struct mlxsw_sp_bridge_port *bridge_port,
612 				  bool set)
613 {
614 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
615 	int err;
616 
617 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
618 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
619 							     bridge_vlan, set);
620 		if (err)
621 			goto err_port_bridge_vlan_learning_set;
622 	}
623 
624 	return 0;
625 
626 err_port_bridge_vlan_learning_set:
627 	list_for_each_entry_continue_reverse(bridge_vlan,
628 					     &bridge_port->vlans_list, list)
629 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
630 						       bridge_vlan, !set);
631 	return err;
632 }
633 
634 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
635 					   struct switchdev_trans *trans,
636 					   struct net_device *orig_dev,
637 					   unsigned long brport_flags)
638 {
639 	struct mlxsw_sp_bridge_port *bridge_port;
640 	int err;
641 
642 	if (switchdev_trans_ph_prepare(trans))
643 		return 0;
644 
645 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
646 						orig_dev);
647 	if (!bridge_port)
648 		return 0;
649 
650 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
651 						   MLXSW_SP_FLOOD_TYPE_UC,
652 						   brport_flags & BR_FLOOD);
653 	if (err)
654 		return err;
655 
656 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
657 						brport_flags & BR_LEARNING);
658 	if (err)
659 		return err;
660 
661 	if (bridge_port->bridge_device->multicast_enabled)
662 		goto out;
663 
664 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
665 						   MLXSW_SP_FLOOD_TYPE_MC,
666 						   brport_flags &
667 						   BR_MCAST_FLOOD);
668 	if (err)
669 		return err;
670 
671 out:
672 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
673 	return 0;
674 }
675 
676 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
677 {
678 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
679 	int err;
680 
681 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
682 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
683 	if (err)
684 		return err;
685 	mlxsw_sp->bridge->ageing_time = ageing_time;
686 	return 0;
687 }
688 
689 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
690 					    struct switchdev_trans *trans,
691 					    unsigned long ageing_clock_t)
692 {
693 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
694 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
695 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
696 
697 	if (switchdev_trans_ph_prepare(trans)) {
698 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
699 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
700 			return -ERANGE;
701 		else
702 			return 0;
703 	}
704 
705 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
706 }
707 
708 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
709 					  struct switchdev_trans *trans,
710 					  struct net_device *orig_dev,
711 					  bool vlan_enabled)
712 {
713 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
714 	struct mlxsw_sp_bridge_device *bridge_device;
715 
716 	if (!switchdev_trans_ph_prepare(trans))
717 		return 0;
718 
719 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
720 	if (WARN_ON(!bridge_device))
721 		return -EINVAL;
722 
723 	if (bridge_device->vlan_enabled == vlan_enabled)
724 		return 0;
725 
726 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
727 	return -EINVAL;
728 }
729 
730 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
731 					  struct switchdev_trans *trans,
732 					  struct net_device *orig_dev,
733 					  bool is_port_mrouter)
734 {
735 	struct mlxsw_sp_bridge_port *bridge_port;
736 	int err;
737 
738 	if (switchdev_trans_ph_prepare(trans))
739 		return 0;
740 
741 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
742 						orig_dev);
743 	if (!bridge_port)
744 		return 0;
745 
746 	if (!bridge_port->bridge_device->multicast_enabled)
747 		goto out;
748 
749 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
750 						   MLXSW_SP_FLOOD_TYPE_MC,
751 						   is_port_mrouter);
752 	if (err)
753 		return err;
754 
755 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
756 					 is_port_mrouter);
757 out:
758 	bridge_port->mrouter = is_port_mrouter;
759 	return 0;
760 }
761 
762 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
763 {
764 	const struct mlxsw_sp_bridge_device *bridge_device;
765 
766 	bridge_device = bridge_port->bridge_device;
767 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
768 					bridge_port->flags & BR_MCAST_FLOOD;
769 }
770 
771 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
772 					 struct switchdev_trans *trans,
773 					 struct net_device *orig_dev,
774 					 bool mc_disabled)
775 {
776 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
777 	struct mlxsw_sp_bridge_device *bridge_device;
778 	struct mlxsw_sp_bridge_port *bridge_port;
779 	int err;
780 
781 	if (switchdev_trans_ph_prepare(trans))
782 		return 0;
783 
784 	/* It's possible we failed to enslave the port, yet this
785 	 * operation is executed due to it being deferred.
786 	 */
787 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
788 	if (!bridge_device)
789 		return 0;
790 
791 	if (bridge_device->multicast_enabled != !mc_disabled) {
792 		bridge_device->multicast_enabled = !mc_disabled;
793 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
794 						   bridge_device);
795 	}
796 
797 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
798 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
799 		bool member = mlxsw_sp_mc_flood(bridge_port);
800 
801 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
802 							   bridge_port,
803 							   packet_type, member);
804 		if (err)
805 			return err;
806 	}
807 
808 	bridge_device->multicast_enabled = !mc_disabled;
809 
810 	return 0;
811 }
812 
813 static int mlxsw_sp_port_attr_set(struct net_device *dev,
814 				  const struct switchdev_attr *attr,
815 				  struct switchdev_trans *trans)
816 {
817 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
818 	int err;
819 
820 	switch (attr->id) {
821 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
822 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
823 						       attr->orig_dev,
824 						       attr->u.stp_state);
825 		break;
826 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
827 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
828 						      attr->orig_dev,
829 						      attr->u.brport_flags);
830 		break;
831 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
832 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
833 						       attr->u.ageing_time);
834 		break;
835 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
836 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
837 						     attr->orig_dev,
838 						     attr->u.vlan_filtering);
839 		break;
840 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
841 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
842 						     attr->orig_dev,
843 						     attr->u.mrouter);
844 		break;
845 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
846 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
847 						    attr->orig_dev,
848 						    attr->u.mc_disabled);
849 		break;
850 	default:
851 		err = -EOPNOTSUPP;
852 		break;
853 	}
854 
855 	return err;
856 }
857 
858 static int
859 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
860 			    struct mlxsw_sp_bridge_port *bridge_port)
861 {
862 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
863 	struct mlxsw_sp_bridge_device *bridge_device;
864 	u8 local_port = mlxsw_sp_port->local_port;
865 	u16 vid = mlxsw_sp_port_vlan->vid;
866 	struct mlxsw_sp_fid *fid;
867 	int err;
868 
869 	bridge_device = bridge_port->bridge_device;
870 	fid = bridge_device->ops->fid_get(bridge_device, vid);
871 	if (IS_ERR(fid))
872 		return PTR_ERR(fid);
873 
874 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
875 				     bridge_port->flags & BR_FLOOD);
876 	if (err)
877 		goto err_fid_uc_flood_set;
878 
879 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
880 				     mlxsw_sp_mc_flood(bridge_port));
881 	if (err)
882 		goto err_fid_mc_flood_set;
883 
884 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
885 				     true);
886 	if (err)
887 		goto err_fid_bc_flood_set;
888 
889 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
890 	if (err)
891 		goto err_fid_port_vid_map;
892 
893 	mlxsw_sp_port_vlan->fid = fid;
894 
895 	return 0;
896 
897 err_fid_port_vid_map:
898 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
899 err_fid_bc_flood_set:
900 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
901 err_fid_mc_flood_set:
902 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
903 err_fid_uc_flood_set:
904 	mlxsw_sp_fid_put(fid);
905 	return err;
906 }
907 
908 static void
909 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
910 {
911 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
912 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
913 	u8 local_port = mlxsw_sp_port->local_port;
914 	u16 vid = mlxsw_sp_port_vlan->vid;
915 
916 	mlxsw_sp_port_vlan->fid = NULL;
917 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
918 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
919 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
920 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
921 	mlxsw_sp_fid_put(fid);
922 }
923 
924 static u16
925 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
926 			     u16 vid, bool is_pvid)
927 {
928 	if (is_pvid)
929 		return vid;
930 	else if (mlxsw_sp_port->pvid == vid)
931 		return 0;	/* Dis-allow untagged packets */
932 	else
933 		return mlxsw_sp_port->pvid;
934 }
935 
936 static int
937 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
938 			       struct mlxsw_sp_bridge_port *bridge_port)
939 {
940 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
941 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
942 	u16 vid = mlxsw_sp_port_vlan->vid;
943 	int err;
944 
945 	/* No need to continue if only VLAN flags were changed */
946 	if (mlxsw_sp_port_vlan->bridge_port)
947 		return 0;
948 
949 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
950 	if (err)
951 		return err;
952 
953 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
954 					     bridge_port->flags & BR_LEARNING);
955 	if (err)
956 		goto err_port_vid_learning_set;
957 
958 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
959 					bridge_port->stp_state);
960 	if (err)
961 		goto err_port_vid_stp_set;
962 
963 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
964 	if (!bridge_vlan) {
965 		err = -ENOMEM;
966 		goto err_bridge_vlan_get;
967 	}
968 
969 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
970 		 &bridge_vlan->port_vlan_list);
971 
972 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
973 				 bridge_port->dev);
974 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
975 
976 	return 0;
977 
978 err_bridge_vlan_get:
979 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
980 err_port_vid_stp_set:
981 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
982 err_port_vid_learning_set:
983 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
984 	return err;
985 }
986 
987 void
988 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
989 {
990 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
991 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
992 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
993 	struct mlxsw_sp_bridge_port *bridge_port;
994 	u16 vid = mlxsw_sp_port_vlan->vid;
995 	bool last_port, last_vlan;
996 
997 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
998 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
999 		return;
1000 
1001 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1002 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1003 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1004 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1005 
1006 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1007 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1008 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1009 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1010 	if (last_port)
1011 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1012 					       bridge_port,
1013 					       mlxsw_sp_fid_index(fid));
1014 	if (last_vlan)
1015 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1016 
1017 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1018 
1019 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1020 	mlxsw_sp_port_vlan->bridge_port = NULL;
1021 }
1022 
1023 static int
1024 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1025 			      struct mlxsw_sp_bridge_port *bridge_port,
1026 			      u16 vid, bool is_untagged, bool is_pvid)
1027 {
1028 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1029 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1030 	u16 old_pvid = mlxsw_sp_port->pvid;
1031 	int err;
1032 
1033 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1034 	if (IS_ERR(mlxsw_sp_port_vlan))
1035 		return PTR_ERR(mlxsw_sp_port_vlan);
1036 
1037 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1038 				     is_untagged);
1039 	if (err)
1040 		goto err_port_vlan_set;
1041 
1042 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1043 	if (err)
1044 		goto err_port_pvid_set;
1045 
1046 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1047 	if (err)
1048 		goto err_port_vlan_bridge_join;
1049 
1050 	return 0;
1051 
1052 err_port_vlan_bridge_join:
1053 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1054 err_port_pvid_set:
1055 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1056 err_port_vlan_set:
1057 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1058 	return err;
1059 }
1060 
1061 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1062 				   const struct switchdev_obj_port_vlan *vlan,
1063 				   struct switchdev_trans *trans)
1064 {
1065 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1066 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1067 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1068 	struct net_device *orig_dev = vlan->obj.orig_dev;
1069 	struct mlxsw_sp_bridge_port *bridge_port;
1070 	u16 vid;
1071 
1072 	if (switchdev_trans_ph_prepare(trans))
1073 		return 0;
1074 
1075 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1076 	if (WARN_ON(!bridge_port))
1077 		return -EINVAL;
1078 
1079 	if (!bridge_port->bridge_device->vlan_enabled)
1080 		return 0;
1081 
1082 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1083 		int err;
1084 
1085 		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1086 						    vid, flag_untagged,
1087 						    flag_pvid);
1088 		if (err)
1089 			return err;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1096 {
1097 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1098 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1099 }
1100 
1101 static int
1102 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1103 			       struct mlxsw_sp_bridge_port *bridge_port,
1104 			       u16 fid_index)
1105 {
1106 	bool lagged = bridge_port->lagged;
1107 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1108 	u16 system_port;
1109 
1110 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1111 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1112 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1113 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1114 
1115 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1116 }
1117 
1118 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1119 {
1120 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1121 			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1122 }
1123 
1124 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1125 {
1126 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1127 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1128 }
1129 
1130 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1131 				     const char *mac, u16 fid, bool adding,
1132 				     enum mlxsw_reg_sfd_rec_action action,
1133 				     bool dynamic)
1134 {
1135 	char *sfd_pl;
1136 	int err;
1137 
1138 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1139 	if (!sfd_pl)
1140 		return -ENOMEM;
1141 
1142 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1143 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1144 			      mac, fid, action, local_port);
1145 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1146 	kfree(sfd_pl);
1147 
1148 	return err;
1149 }
1150 
1151 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1152 				   const char *mac, u16 fid, bool adding,
1153 				   bool dynamic)
1154 {
1155 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1156 					 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1157 }
1158 
1159 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1160 			bool adding)
1161 {
1162 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1163 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1164 					 false);
1165 }
1166 
1167 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1168 				       const char *mac, u16 fid, u16 lag_vid,
1169 				       bool adding, bool dynamic)
1170 {
1171 	char *sfd_pl;
1172 	int err;
1173 
1174 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1175 	if (!sfd_pl)
1176 		return -ENOMEM;
1177 
1178 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1179 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1180 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1181 				  lag_vid, lag_id);
1182 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1183 	kfree(sfd_pl);
1184 
1185 	return err;
1186 }
1187 
1188 static int
1189 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1190 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1191 {
1192 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1193 	struct net_device *orig_dev = fdb_info->info.dev;
1194 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1195 	struct mlxsw_sp_bridge_device *bridge_device;
1196 	struct mlxsw_sp_bridge_port *bridge_port;
1197 	u16 fid_index, vid;
1198 
1199 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1200 	if (!bridge_port)
1201 		return -EINVAL;
1202 
1203 	bridge_device = bridge_port->bridge_device;
1204 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1205 							       bridge_device,
1206 							       fdb_info->vid);
1207 	if (!mlxsw_sp_port_vlan)
1208 		return 0;
1209 
1210 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1211 	vid = mlxsw_sp_port_vlan->vid;
1212 
1213 	if (!bridge_port->lagged)
1214 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1215 					       bridge_port->system_port,
1216 					       fdb_info->addr, fid_index,
1217 					       adding, false);
1218 	else
1219 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1220 						   bridge_port->lag_id,
1221 						   fdb_info->addr, fid_index,
1222 						   vid, adding, false);
1223 }
1224 
1225 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1226 				u16 fid, u16 mid_idx, bool adding)
1227 {
1228 	char *sfd_pl;
1229 	int err;
1230 
1231 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1232 	if (!sfd_pl)
1233 		return -ENOMEM;
1234 
1235 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1236 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1237 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1238 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1239 	kfree(sfd_pl);
1240 	return err;
1241 }
1242 
1243 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1244 					 long *ports_bitmap)
1245 {
1246 	char *smid_pl;
1247 	int err, i;
1248 
1249 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1250 	if (!smid_pl)
1251 		return -ENOMEM;
1252 
1253 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1254 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1255 		if (mlxsw_sp->ports[i])
1256 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1257 	}
1258 
1259 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1260 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1261 
1262 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1263 	kfree(smid_pl);
1264 	return err;
1265 }
1266 
1267 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1268 				  u16 mid_idx, bool add)
1269 {
1270 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1271 	char *smid_pl;
1272 	int err;
1273 
1274 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1275 	if (!smid_pl)
1276 		return -ENOMEM;
1277 
1278 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1279 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1280 	kfree(smid_pl);
1281 	return err;
1282 }
1283 
1284 static struct
1285 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1286 				const unsigned char *addr,
1287 				u16 fid)
1288 {
1289 	struct mlxsw_sp_mid *mid;
1290 
1291 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1292 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1293 			return mid;
1294 	}
1295 	return NULL;
1296 }
1297 
1298 static void
1299 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1300 				      struct mlxsw_sp_bridge_port *bridge_port,
1301 				      unsigned long *ports_bitmap)
1302 {
1303 	struct mlxsw_sp_port *mlxsw_sp_port;
1304 	u64 max_lag_members, i;
1305 	int lag_id;
1306 
1307 	if (!bridge_port->lagged) {
1308 		set_bit(bridge_port->system_port, ports_bitmap);
1309 	} else {
1310 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1311 						     MAX_LAG_MEMBERS);
1312 		lag_id = bridge_port->lag_id;
1313 		for (i = 0; i < max_lag_members; i++) {
1314 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1315 								 lag_id, i);
1316 			if (mlxsw_sp_port)
1317 				set_bit(mlxsw_sp_port->local_port,
1318 					ports_bitmap);
1319 		}
1320 	}
1321 }
1322 
1323 static void
1324 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1325 				struct mlxsw_sp_bridge_device *bridge_device,
1326 				struct mlxsw_sp *mlxsw_sp)
1327 {
1328 	struct mlxsw_sp_bridge_port *bridge_port;
1329 
1330 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1331 		if (bridge_port->mrouter) {
1332 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1333 							      bridge_port,
1334 							      flood_bitmap);
1335 		}
1336 	}
1337 }
1338 
1339 static bool
1340 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1341 			    struct mlxsw_sp_mid *mid,
1342 			    struct mlxsw_sp_bridge_device *bridge_device)
1343 {
1344 	long *flood_bitmap;
1345 	int num_of_ports;
1346 	int alloc_size;
1347 	u16 mid_idx;
1348 	int err;
1349 
1350 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1351 				      MLXSW_SP_MID_MAX);
1352 	if (mid_idx == MLXSW_SP_MID_MAX)
1353 		return false;
1354 
1355 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1356 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1357 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1358 	if (!flood_bitmap)
1359 		return false;
1360 
1361 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1362 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1363 
1364 	mid->mid = mid_idx;
1365 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap);
1366 	kfree(flood_bitmap);
1367 	if (err)
1368 		return false;
1369 
1370 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1371 				   true);
1372 	if (err)
1373 		return false;
1374 
1375 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1376 	mid->in_hw = true;
1377 	return true;
1378 }
1379 
1380 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1381 					struct mlxsw_sp_mid *mid)
1382 {
1383 	if (!mid->in_hw)
1384 		return 0;
1385 
1386 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1387 	mid->in_hw = false;
1388 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1389 				    false);
1390 }
1391 
1392 static struct
1393 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1394 				  struct mlxsw_sp_bridge_device *bridge_device,
1395 				  const unsigned char *addr,
1396 				  u16 fid)
1397 {
1398 	struct mlxsw_sp_mid *mid;
1399 	size_t alloc_size;
1400 
1401 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1402 	if (!mid)
1403 		return NULL;
1404 
1405 	alloc_size = sizeof(unsigned long) *
1406 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1407 
1408 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1409 	if (!mid->ports_in_mid)
1410 		goto err_ports_in_mid_alloc;
1411 
1412 	ether_addr_copy(mid->addr, addr);
1413 	mid->fid = fid;
1414 	mid->in_hw = false;
1415 
1416 	if (!bridge_device->multicast_enabled)
1417 		goto out;
1418 
1419 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1420 		goto err_write_mdb_entry;
1421 
1422 out:
1423 	list_add_tail(&mid->list, &bridge_device->mids_list);
1424 	return mid;
1425 
1426 err_write_mdb_entry:
1427 	kfree(mid->ports_in_mid);
1428 err_ports_in_mid_alloc:
1429 	kfree(mid);
1430 	return NULL;
1431 }
1432 
1433 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1434 					 struct mlxsw_sp_mid *mid)
1435 {
1436 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1437 	int err = 0;
1438 
1439 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1440 	if (bitmap_empty(mid->ports_in_mid,
1441 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1442 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1443 		list_del(&mid->list);
1444 		kfree(mid->ports_in_mid);
1445 		kfree(mid);
1446 	}
1447 	return err;
1448 }
1449 
1450 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1451 				 const struct switchdev_obj_port_mdb *mdb,
1452 				 struct switchdev_trans *trans)
1453 {
1454 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1455 	struct net_device *orig_dev = mdb->obj.orig_dev;
1456 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1457 	struct net_device *dev = mlxsw_sp_port->dev;
1458 	struct mlxsw_sp_bridge_device *bridge_device;
1459 	struct mlxsw_sp_bridge_port *bridge_port;
1460 	struct mlxsw_sp_mid *mid;
1461 	u16 fid_index;
1462 	int err = 0;
1463 
1464 	if (switchdev_trans_ph_prepare(trans))
1465 		return 0;
1466 
1467 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1468 	if (!bridge_port)
1469 		return 0;
1470 
1471 	bridge_device = bridge_port->bridge_device;
1472 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1473 							       bridge_device,
1474 							       mdb->vid);
1475 	if (!mlxsw_sp_port_vlan)
1476 		return 0;
1477 
1478 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1479 
1480 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1481 	if (!mid) {
1482 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1483 					  fid_index);
1484 		if (!mid) {
1485 			netdev_err(dev, "Unable to allocate MC group\n");
1486 			return -ENOMEM;
1487 		}
1488 	}
1489 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1490 
1491 	if (!bridge_device->multicast_enabled)
1492 		return 0;
1493 
1494 	if (bridge_port->mrouter)
1495 		return 0;
1496 
1497 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1498 	if (err) {
1499 		netdev_err(dev, "Unable to set SMID\n");
1500 		goto err_out;
1501 	}
1502 
1503 	return 0;
1504 
1505 err_out:
1506 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1507 	return err;
1508 }
1509 
1510 static void
1511 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1512 				   struct mlxsw_sp_bridge_device
1513 				   *bridge_device)
1514 {
1515 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1516 	struct mlxsw_sp_mid *mid;
1517 	bool mc_enabled;
1518 
1519 	mc_enabled = bridge_device->multicast_enabled;
1520 
1521 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1522 		if (mc_enabled)
1523 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1524 						    bridge_device);
1525 		else
1526 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1527 	}
1528 }
1529 
1530 static void
1531 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1532 				 struct mlxsw_sp_bridge_port *bridge_port,
1533 				 bool add)
1534 {
1535 	struct mlxsw_sp_bridge_device *bridge_device;
1536 	struct mlxsw_sp_mid *mid;
1537 
1538 	bridge_device = bridge_port->bridge_device;
1539 
1540 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1541 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1542 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1543 	}
1544 }
1545 
1546 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1547 				 const struct switchdev_obj *obj,
1548 				 struct switchdev_trans *trans)
1549 {
1550 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1551 	int err = 0;
1552 
1553 	switch (obj->id) {
1554 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1555 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
1556 					      SWITCHDEV_OBJ_PORT_VLAN(obj),
1557 					      trans);
1558 		break;
1559 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1560 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1561 					    SWITCHDEV_OBJ_PORT_MDB(obj),
1562 					    trans);
1563 		break;
1564 	default:
1565 		err = -EOPNOTSUPP;
1566 		break;
1567 	}
1568 
1569 	return err;
1570 }
1571 
1572 static void
1573 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1574 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1575 {
1576 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1577 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1578 
1579 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1580 	if (WARN_ON(!mlxsw_sp_port_vlan))
1581 		return;
1582 
1583 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1584 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1585 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1586 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1587 }
1588 
1589 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1590 				   const struct switchdev_obj_port_vlan *vlan)
1591 {
1592 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1593 	struct net_device *orig_dev = vlan->obj.orig_dev;
1594 	struct mlxsw_sp_bridge_port *bridge_port;
1595 	u16 vid;
1596 
1597 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1598 	if (WARN_ON(!bridge_port))
1599 		return -EINVAL;
1600 
1601 	if (!bridge_port->bridge_device->vlan_enabled)
1602 		return 0;
1603 
1604 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1605 		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1606 
1607 	return 0;
1608 }
1609 
1610 static int
1611 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1612 			struct mlxsw_sp_bridge_port *bridge_port,
1613 			struct mlxsw_sp_mid *mid)
1614 {
1615 	struct net_device *dev = mlxsw_sp_port->dev;
1616 	int err;
1617 
1618 	if (bridge_port->bridge_device->multicast_enabled) {
1619 		if (bridge_port->bridge_device->multicast_enabled) {
1620 			err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
1621 						     false);
1622 			if (err)
1623 				netdev_err(dev, "Unable to remove port from SMID\n");
1624 		}
1625 	}
1626 
1627 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1628 	if (err)
1629 		netdev_err(dev, "Unable to remove MC SFD\n");
1630 
1631 	return err;
1632 }
1633 
1634 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1635 				 const struct switchdev_obj_port_mdb *mdb)
1636 {
1637 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1638 	struct net_device *orig_dev = mdb->obj.orig_dev;
1639 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1640 	struct mlxsw_sp_bridge_device *bridge_device;
1641 	struct net_device *dev = mlxsw_sp_port->dev;
1642 	struct mlxsw_sp_bridge_port *bridge_port;
1643 	struct mlxsw_sp_mid *mid;
1644 	u16 fid_index;
1645 
1646 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1647 	if (!bridge_port)
1648 		return 0;
1649 
1650 	bridge_device = bridge_port->bridge_device;
1651 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1652 							       bridge_device,
1653 							       mdb->vid);
1654 	if (!mlxsw_sp_port_vlan)
1655 		return 0;
1656 
1657 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1658 
1659 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1660 	if (!mid) {
1661 		netdev_err(dev, "Unable to remove port from MC DB\n");
1662 		return -EINVAL;
1663 	}
1664 
1665 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1666 }
1667 
1668 static void
1669 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1670 			       struct mlxsw_sp_bridge_port *bridge_port)
1671 {
1672 	struct mlxsw_sp_bridge_device *bridge_device;
1673 	struct mlxsw_sp_mid *mid, *tmp;
1674 
1675 	bridge_device = bridge_port->bridge_device;
1676 
1677 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1678 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1679 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1680 						mid);
1681 		} else if (bridge_device->multicast_enabled &&
1682 			   bridge_port->mrouter) {
1683 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1684 		}
1685 	}
1686 }
1687 
1688 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1689 				 const struct switchdev_obj *obj)
1690 {
1691 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1692 	int err = 0;
1693 
1694 	switch (obj->id) {
1695 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1696 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1697 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1698 		break;
1699 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1700 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1701 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1702 		break;
1703 	default:
1704 		err = -EOPNOTSUPP;
1705 		break;
1706 	}
1707 
1708 	return err;
1709 }
1710 
1711 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1712 						   u16 lag_id)
1713 {
1714 	struct mlxsw_sp_port *mlxsw_sp_port;
1715 	u64 max_lag_members;
1716 	int i;
1717 
1718 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1719 					     MAX_LAG_MEMBERS);
1720 	for (i = 0; i < max_lag_members; i++) {
1721 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1722 		if (mlxsw_sp_port)
1723 			return mlxsw_sp_port;
1724 	}
1725 	return NULL;
1726 }
1727 
1728 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1729 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1730 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1731 	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
1732 	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
1733 };
1734 
1735 static int
1736 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1737 				struct mlxsw_sp_bridge_port *bridge_port,
1738 				struct mlxsw_sp_port *mlxsw_sp_port)
1739 {
1740 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1741 
1742 	if (is_vlan_dev(bridge_port->dev))
1743 		return -EINVAL;
1744 
1745 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1746 	if (WARN_ON(!mlxsw_sp_port_vlan))
1747 		return -EINVAL;
1748 
1749 	/* Let VLAN-aware bridge take care of its own VLANs */
1750 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1751 
1752 	return 0;
1753 }
1754 
1755 static void
1756 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1757 				 struct mlxsw_sp_bridge_port *bridge_port,
1758 				 struct mlxsw_sp_port *mlxsw_sp_port)
1759 {
1760 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
1761 	/* Make sure untagged frames are allowed to ingress */
1762 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
1763 }
1764 
1765 static struct mlxsw_sp_fid *
1766 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
1767 			      u16 vid)
1768 {
1769 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1770 
1771 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
1772 }
1773 
1774 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
1775 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
1776 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
1777 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
1778 };
1779 
1780 static bool
1781 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
1782 			   const struct net_device *br_dev)
1783 {
1784 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1785 
1786 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
1787 			    list) {
1788 		if (mlxsw_sp_port_vlan->bridge_port &&
1789 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
1790 		    br_dev)
1791 			return true;
1792 	}
1793 
1794 	return false;
1795 }
1796 
1797 static int
1798 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1799 				struct mlxsw_sp_bridge_port *bridge_port,
1800 				struct mlxsw_sp_port *mlxsw_sp_port)
1801 {
1802 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1803 	u16 vid;
1804 
1805 	if (!is_vlan_dev(bridge_port->dev))
1806 		return -EINVAL;
1807 	vid = vlan_dev_vlan_id(bridge_port->dev);
1808 
1809 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1810 	if (WARN_ON(!mlxsw_sp_port_vlan))
1811 		return -EINVAL;
1812 
1813 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
1814 		netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
1815 		return -EINVAL;
1816 	}
1817 
1818 	/* Port is no longer usable as a router interface */
1819 	if (mlxsw_sp_port_vlan->fid)
1820 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1821 
1822 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1823 }
1824 
1825 static void
1826 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1827 				 struct mlxsw_sp_bridge_port *bridge_port,
1828 				 struct mlxsw_sp_port *mlxsw_sp_port)
1829 {
1830 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1831 	u16 vid = vlan_dev_vlan_id(bridge_port->dev);
1832 
1833 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1834 	if (WARN_ON(!mlxsw_sp_port_vlan))
1835 		return;
1836 
1837 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1838 }
1839 
1840 static struct mlxsw_sp_fid *
1841 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
1842 			      u16 vid)
1843 {
1844 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1845 
1846 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
1847 }
1848 
1849 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
1850 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
1851 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
1852 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
1853 };
1854 
1855 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
1856 			      struct net_device *brport_dev,
1857 			      struct net_device *br_dev)
1858 {
1859 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1860 	struct mlxsw_sp_bridge_device *bridge_device;
1861 	struct mlxsw_sp_bridge_port *bridge_port;
1862 	int err;
1863 
1864 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
1865 	if (IS_ERR(bridge_port))
1866 		return PTR_ERR(bridge_port);
1867 	bridge_device = bridge_port->bridge_device;
1868 
1869 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
1870 					    mlxsw_sp_port);
1871 	if (err)
1872 		goto err_port_join;
1873 
1874 	return 0;
1875 
1876 err_port_join:
1877 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
1878 	return err;
1879 }
1880 
1881 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
1882 				struct net_device *brport_dev,
1883 				struct net_device *br_dev)
1884 {
1885 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1886 	struct mlxsw_sp_bridge_device *bridge_device;
1887 	struct mlxsw_sp_bridge_port *bridge_port;
1888 
1889 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
1890 	if (!bridge_device)
1891 		return;
1892 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
1893 	if (!bridge_port)
1894 		return;
1895 
1896 	bridge_device->ops->port_leave(bridge_device, bridge_port,
1897 				       mlxsw_sp_port);
1898 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
1899 }
1900 
1901 static void
1902 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
1903 			    const char *mac, u16 vid,
1904 			    struct net_device *dev)
1905 {
1906 	struct switchdev_notifier_fdb_info info;
1907 
1908 	info.addr = mac;
1909 	info.vid = vid;
1910 	call_switchdev_notifiers(type, dev, &info.info);
1911 }
1912 
1913 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1914 					    char *sfn_pl, int rec_index,
1915 					    bool adding)
1916 {
1917 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1918 	struct mlxsw_sp_bridge_device *bridge_device;
1919 	struct mlxsw_sp_bridge_port *bridge_port;
1920 	struct mlxsw_sp_port *mlxsw_sp_port;
1921 	enum switchdev_notifier_type type;
1922 	char mac[ETH_ALEN];
1923 	u8 local_port;
1924 	u16 vid, fid;
1925 	bool do_notification = true;
1926 	int err;
1927 
1928 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1929 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
1930 	if (!mlxsw_sp_port) {
1931 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1932 		goto just_remove;
1933 	}
1934 
1935 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
1936 	if (!mlxsw_sp_port_vlan) {
1937 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
1938 		goto just_remove;
1939 	}
1940 
1941 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1942 	if (!bridge_port) {
1943 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
1944 		goto just_remove;
1945 	}
1946 
1947 	bridge_device = bridge_port->bridge_device;
1948 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
1949 
1950 do_fdb_op:
1951 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1952 				      adding, true);
1953 	if (err) {
1954 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1955 		return;
1956 	}
1957 
1958 	if (!do_notification)
1959 		return;
1960 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
1961 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
1962 
1963 	return;
1964 
1965 just_remove:
1966 	adding = false;
1967 	do_notification = false;
1968 	goto do_fdb_op;
1969 }
1970 
1971 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1972 						char *sfn_pl, int rec_index,
1973 						bool adding)
1974 {
1975 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1976 	struct mlxsw_sp_bridge_device *bridge_device;
1977 	struct mlxsw_sp_bridge_port *bridge_port;
1978 	struct mlxsw_sp_port *mlxsw_sp_port;
1979 	enum switchdev_notifier_type type;
1980 	char mac[ETH_ALEN];
1981 	u16 lag_vid = 0;
1982 	u16 lag_id;
1983 	u16 vid, fid;
1984 	bool do_notification = true;
1985 	int err;
1986 
1987 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1988 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1989 	if (!mlxsw_sp_port) {
1990 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1991 		goto just_remove;
1992 	}
1993 
1994 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
1995 	if (!mlxsw_sp_port_vlan) {
1996 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
1997 		goto just_remove;
1998 	}
1999 
2000 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2001 	if (!bridge_port) {
2002 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2003 		goto just_remove;
2004 	}
2005 
2006 	bridge_device = bridge_port->bridge_device;
2007 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2008 	lag_vid = mlxsw_sp_port_vlan->vid;
2009 
2010 do_fdb_op:
2011 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2012 					  adding, true);
2013 	if (err) {
2014 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2015 		return;
2016 	}
2017 
2018 	if (!do_notification)
2019 		return;
2020 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2021 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
2022 
2023 	return;
2024 
2025 just_remove:
2026 	adding = false;
2027 	do_notification = false;
2028 	goto do_fdb_op;
2029 }
2030 
2031 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2032 					    char *sfn_pl, int rec_index)
2033 {
2034 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2035 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2036 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2037 						rec_index, true);
2038 		break;
2039 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2040 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2041 						rec_index, false);
2042 		break;
2043 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2044 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2045 						    rec_index, true);
2046 		break;
2047 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2048 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2049 						    rec_index, false);
2050 		break;
2051 	}
2052 }
2053 
2054 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2055 {
2056 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2057 
2058 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2059 			       msecs_to_jiffies(bridge->fdb_notify.interval));
2060 }
2061 
2062 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2063 {
2064 	struct mlxsw_sp_bridge *bridge;
2065 	struct mlxsw_sp *mlxsw_sp;
2066 	char *sfn_pl;
2067 	u8 num_rec;
2068 	int i;
2069 	int err;
2070 
2071 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2072 	if (!sfn_pl)
2073 		return;
2074 
2075 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2076 	mlxsw_sp = bridge->mlxsw_sp;
2077 
2078 	rtnl_lock();
2079 	mlxsw_reg_sfn_pack(sfn_pl);
2080 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2081 	if (err) {
2082 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2083 		goto out;
2084 	}
2085 	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2086 	for (i = 0; i < num_rec; i++)
2087 		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2088 
2089 out:
2090 	rtnl_unlock();
2091 	kfree(sfn_pl);
2092 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2093 }
2094 
2095 struct mlxsw_sp_switchdev_event_work {
2096 	struct work_struct work;
2097 	struct switchdev_notifier_fdb_info fdb_info;
2098 	struct net_device *dev;
2099 	unsigned long event;
2100 };
2101 
2102 static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
2103 {
2104 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2105 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2106 	struct net_device *dev = switchdev_work->dev;
2107 	struct switchdev_notifier_fdb_info *fdb_info;
2108 	struct mlxsw_sp_port *mlxsw_sp_port;
2109 	int err;
2110 
2111 	rtnl_lock();
2112 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2113 	if (!mlxsw_sp_port)
2114 		goto out;
2115 
2116 	switch (switchdev_work->event) {
2117 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2118 		fdb_info = &switchdev_work->fdb_info;
2119 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2120 		if (err)
2121 			break;
2122 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2123 					    fdb_info->addr,
2124 					    fdb_info->vid, dev);
2125 		break;
2126 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2127 		fdb_info = &switchdev_work->fdb_info;
2128 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2129 		break;
2130 	}
2131 
2132 out:
2133 	rtnl_unlock();
2134 	kfree(switchdev_work->fdb_info.addr);
2135 	kfree(switchdev_work);
2136 	dev_put(dev);
2137 }
2138 
2139 /* Called under rcu_read_lock() */
2140 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
2141 				    unsigned long event, void *ptr)
2142 {
2143 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2144 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
2145 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
2146 
2147 	if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
2148 		return NOTIFY_DONE;
2149 
2150 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2151 	if (!switchdev_work)
2152 		return NOTIFY_BAD;
2153 
2154 	INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
2155 	switchdev_work->dev = dev;
2156 	switchdev_work->event = event;
2157 
2158 	switch (event) {
2159 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
2160 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2161 		memcpy(&switchdev_work->fdb_info, ptr,
2162 		       sizeof(switchdev_work->fdb_info));
2163 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2164 		if (!switchdev_work->fdb_info.addr)
2165 			goto err_addr_alloc;
2166 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2167 				fdb_info->addr);
2168 		/* Take a reference on the device. This can be either
2169 		 * upper device containig mlxsw_sp_port or just a
2170 		 * mlxsw_sp_port
2171 		 */
2172 		dev_hold(dev);
2173 		break;
2174 	default:
2175 		kfree(switchdev_work);
2176 		return NOTIFY_DONE;
2177 	}
2178 
2179 	mlxsw_core_schedule_work(&switchdev_work->work);
2180 
2181 	return NOTIFY_DONE;
2182 
2183 err_addr_alloc:
2184 	kfree(switchdev_work);
2185 	return NOTIFY_BAD;
2186 }
2187 
2188 static struct notifier_block mlxsw_sp_switchdev_notifier = {
2189 	.notifier_call = mlxsw_sp_switchdev_event,
2190 };
2191 
2192 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
2193 {
2194 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2195 	int err;
2196 
2197 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
2198 	if (err) {
2199 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
2200 		return err;
2201 	}
2202 
2203 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2204 	if (err) {
2205 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
2206 		return err;
2207 	}
2208 
2209 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
2210 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
2211 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2212 	return 0;
2213 }
2214 
2215 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
2216 {
2217 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
2218 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2219 
2220 }
2221 
2222 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
2223 {
2224 	struct mlxsw_sp_bridge *bridge;
2225 
2226 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
2227 	if (!bridge)
2228 		return -ENOMEM;
2229 	mlxsw_sp->bridge = bridge;
2230 	bridge->mlxsw_sp = mlxsw_sp;
2231 
2232 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
2233 
2234 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
2235 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
2236 
2237 	return mlxsw_sp_fdb_init(mlxsw_sp);
2238 }
2239 
2240 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
2241 {
2242 	mlxsw_sp_fdb_fini(mlxsw_sp);
2243 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
2244 	kfree(mlxsw_sp->bridge);
2245 }
2246 
2247 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
2248 {
2249 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
2250 }
2251 
2252 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2253 {
2254 }
2255