xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	struct sparx5 *sparx5;
20 	unsigned long event;
21 };
22 
23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
24 					     struct switchdev_brport_flags flags)
25 {
26 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
27 		return -EINVAL;
28 
29 	return 0;
30 }
31 
32 static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
33 {
34 	bool should_flood = flood_flag || port->is_mrouter;
35 	int pgid;
36 
37 	for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
38 		sparx5_pgid_update_mask(port, pgid, should_flood);
39 }
40 
41 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
42 					  struct switchdev_brport_flags flags)
43 {
44 	if (flags.mask & BR_MCAST_FLOOD) {
45 		sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
46 		sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
47 	}
48 
49 	if (flags.mask & BR_FLOOD)
50 		sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
51 	if (flags.mask & BR_BCAST_FLOOD)
52 		sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
53 }
54 
55 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
56 				      u8 state)
57 {
58 	struct sparx5 *sparx5 = port->sparx5;
59 
60 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
61 		netdev_err(port->ndev,
62 			   "Controlling non-bridged port %d?\n", port->portno);
63 		return;
64 	}
65 
66 	switch (state) {
67 	case BR_STATE_FORWARDING:
68 		set_bit(port->portno, sparx5->bridge_fwd_mask);
69 		fallthrough;
70 	case BR_STATE_LEARNING:
71 		set_bit(port->portno, sparx5->bridge_lrn_mask);
72 		break;
73 
74 	default:
75 		/* All other states treated as blocking */
76 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
77 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
78 		break;
79 	}
80 
81 	/* apply the bridge_fwd_mask to all the ports */
82 	sparx5_update_fwd(sparx5);
83 }
84 
85 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
86 					unsigned long ageing_clock_t)
87 {
88 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
89 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
90 
91 	sparx5_set_ageing(port->sparx5, ageing_time);
92 }
93 
94 static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
95 					 struct net_device *orig_dev,
96 					 bool enable)
97 {
98 	struct sparx5 *sparx5 = port->sparx5;
99 	struct sparx5_mdb_entry *e;
100 	bool flood_flag;
101 
102 	if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
103 		return;
104 
105 	/* Add/del mrouter port on all active mdb entries in HW.
106 	 * Don't change entry port mask, since that represents
107 	 * ports that actually joined that group.
108 	 */
109 	mutex_lock(&sparx5->mdb_lock);
110 	list_for_each_entry(e, &sparx5->mdb_entries, list) {
111 		if (!test_bit(port->portno, e->port_mask) &&
112 		    ether_addr_is_ip_mcast(e->addr))
113 			sparx5_pgid_update_mask(port, e->pgid_idx, enable);
114 	}
115 	mutex_unlock(&sparx5->mdb_lock);
116 
117 	/* Enable/disable flooding depending on if port is mrouter port
118 	 * or if mcast flood is enabled.
119 	 */
120 	port->is_mrouter = enable;
121 	flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
122 	sparx5_port_update_mcast_ip_flood(port, flood_flag);
123 }
124 
125 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
126 				const struct switchdev_attr *attr,
127 				struct netlink_ext_ack *extack)
128 {
129 	struct sparx5_port *port = netdev_priv(dev);
130 
131 	switch (attr->id) {
132 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
133 		return sparx5_port_attr_pre_bridge_flags(port,
134 							 attr->u.brport_flags);
135 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
136 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
137 		break;
138 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
139 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
140 		break;
141 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
142 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
143 		break;
144 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
145 		/* Used PVID 1 when default_pvid is 0, to avoid
146 		 * collision with non-bridged ports.
147 		 */
148 		if (port->pvid == 0)
149 			port->pvid = 1;
150 		port->vlan_aware = attr->u.vlan_filtering;
151 		sparx5_vlan_port_apply(port->sparx5, port);
152 		break;
153 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
154 		sparx5_port_attr_mrouter_set(port,
155 					     attr->orig_dev,
156 					     attr->u.mrouter);
157 		break;
158 	default:
159 		return -EOPNOTSUPP;
160 	}
161 
162 	return 0;
163 }
164 
165 static int sparx5_port_bridge_join(struct sparx5_port *port,
166 				   struct net_device *bridge,
167 				   struct netlink_ext_ack *extack)
168 {
169 	struct sparx5 *sparx5 = port->sparx5;
170 	struct net_device *ndev = port->ndev;
171 	int err;
172 
173 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
174 		/* First bridged port */
175 		sparx5->hw_bridge_dev = bridge;
176 	else
177 		if (sparx5->hw_bridge_dev != bridge)
178 			/* This is adding the port to a second bridge, this is
179 			 * unsupported
180 			 */
181 			return -ENODEV;
182 
183 	set_bit(port->portno, sparx5->bridge_mask);
184 
185 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
186 					    false, extack);
187 	if (err)
188 		goto err_switchdev_offload;
189 
190 	/* Remove standalone port entry */
191 	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
192 
193 	/* Port enters in bridge mode therefore don't need to copy to CPU
194 	 * frames for multicast in case the bridge is not requesting them
195 	 */
196 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
197 
198 	return 0;
199 
200 err_switchdev_offload:
201 	clear_bit(port->portno, sparx5->bridge_mask);
202 	return err;
203 }
204 
205 static void sparx5_port_bridge_leave(struct sparx5_port *port,
206 				     struct net_device *bridge)
207 {
208 	struct sparx5 *sparx5 = port->sparx5;
209 
210 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
211 
212 	clear_bit(port->portno, sparx5->bridge_mask);
213 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
214 		sparx5->hw_bridge_dev = NULL;
215 
216 	/* Clear bridge vlan settings before updating the port settings */
217 	port->vlan_aware = 0;
218 	port->pvid = NULL_VID;
219 	port->vid = NULL_VID;
220 
221 	/* Forward frames to CPU */
222 	sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
223 
224 	/* Port enters in host more therefore restore mc list */
225 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
226 }
227 
228 static int sparx5_port_changeupper(struct net_device *dev,
229 				   struct netdev_notifier_changeupper_info *info)
230 {
231 	struct sparx5_port *port = netdev_priv(dev);
232 	struct netlink_ext_ack *extack;
233 	int err = 0;
234 
235 	extack = netdev_notifier_info_to_extack(&info->info);
236 
237 	if (netif_is_bridge_master(info->upper_dev)) {
238 		if (info->linking)
239 			err = sparx5_port_bridge_join(port, info->upper_dev,
240 						      extack);
241 		else
242 			sparx5_port_bridge_leave(port, info->upper_dev);
243 
244 		sparx5_vlan_port_apply(port->sparx5, port);
245 	}
246 
247 	return err;
248 }
249 
250 static int sparx5_port_add_addr(struct net_device *dev, bool up)
251 {
252 	struct sparx5_port *port = netdev_priv(dev);
253 	struct sparx5 *sparx5 = port->sparx5;
254 	u16 vid = port->pvid;
255 
256 	if (up)
257 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
258 	else
259 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
260 
261 	return 0;
262 }
263 
264 static int sparx5_netdevice_port_event(struct net_device *dev,
265 				       struct notifier_block *nb,
266 				       unsigned long event, void *ptr)
267 {
268 	int err = 0;
269 
270 	if (!sparx5_netdevice_check(dev))
271 		return 0;
272 
273 	switch (event) {
274 	case NETDEV_CHANGEUPPER:
275 		err = sparx5_port_changeupper(dev, ptr);
276 		break;
277 	case NETDEV_PRE_UP:
278 		err = sparx5_port_add_addr(dev, true);
279 		break;
280 	case NETDEV_DOWN:
281 		err = sparx5_port_add_addr(dev, false);
282 		break;
283 	}
284 
285 	return err;
286 }
287 
288 static int sparx5_netdevice_event(struct notifier_block *nb,
289 				  unsigned long event, void *ptr)
290 {
291 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
292 	int ret = 0;
293 
294 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
295 
296 	return notifier_from_errno(ret);
297 }
298 
299 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
300 {
301 	struct sparx5_switchdev_event_work *switchdev_work =
302 		container_of(work, struct sparx5_switchdev_event_work, work);
303 	struct net_device *dev = switchdev_work->dev;
304 	struct switchdev_notifier_fdb_info *fdb_info;
305 	struct sparx5_port *port;
306 	struct sparx5 *sparx5;
307 	bool host_addr;
308 	u16 vid;
309 
310 	rtnl_lock();
311 	if (!sparx5_netdevice_check(dev)) {
312 		host_addr = true;
313 		sparx5 = switchdev_work->sparx5;
314 	} else {
315 		host_addr = false;
316 		sparx5 = switchdev_work->sparx5;
317 		port = netdev_priv(dev);
318 	}
319 
320 	fdb_info = &switchdev_work->fdb_info;
321 
322 	/* Used PVID 1 when default_pvid is 0, to avoid
323 	 * collision with non-bridged ports.
324 	 */
325 	if (fdb_info->vid == 0)
326 		vid = 1;
327 	else
328 		vid = fdb_info->vid;
329 
330 	switch (switchdev_work->event) {
331 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
332 		if (host_addr)
333 			sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
334 					      fdb_info->addr, vid);
335 		else
336 			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
337 					      fdb_info->addr, vid);
338 		break;
339 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
340 		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
341 		break;
342 	}
343 
344 	rtnl_unlock();
345 	kfree(switchdev_work->fdb_info.addr);
346 	kfree(switchdev_work);
347 	dev_put(dev);
348 }
349 
350 static void sparx5_schedule_work(struct work_struct *work)
351 {
352 	queue_work(sparx5_owq, work);
353 }
354 
355 static int sparx5_switchdev_event(struct notifier_block *nb,
356 				  unsigned long event, void *ptr)
357 {
358 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
359 	struct sparx5_switchdev_event_work *switchdev_work;
360 	struct switchdev_notifier_fdb_info *fdb_info;
361 	struct switchdev_notifier_info *info = ptr;
362 	struct sparx5 *spx5;
363 	int err;
364 
365 	spx5 = container_of(nb, struct sparx5, switchdev_nb);
366 
367 	switch (event) {
368 	case SWITCHDEV_PORT_ATTR_SET:
369 		err = switchdev_handle_port_attr_set(dev, ptr,
370 						     sparx5_netdevice_check,
371 						     sparx5_port_attr_set);
372 		return notifier_from_errno(err);
373 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
374 		fallthrough;
375 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
376 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
377 		if (!switchdev_work)
378 			return NOTIFY_BAD;
379 
380 		switchdev_work->dev = dev;
381 		switchdev_work->event = event;
382 		switchdev_work->sparx5 = spx5;
383 
384 		fdb_info = container_of(info,
385 					struct switchdev_notifier_fdb_info,
386 					info);
387 		INIT_WORK(&switchdev_work->work,
388 			  sparx5_switchdev_bridge_fdb_event_work);
389 		memcpy(&switchdev_work->fdb_info, ptr,
390 		       sizeof(switchdev_work->fdb_info));
391 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
392 		if (!switchdev_work->fdb_info.addr)
393 			goto err_addr_alloc;
394 
395 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
396 				fdb_info->addr);
397 		dev_hold(dev);
398 
399 		sparx5_schedule_work(&switchdev_work->work);
400 		break;
401 	}
402 
403 	return NOTIFY_DONE;
404 err_addr_alloc:
405 	kfree(switchdev_work);
406 	return NOTIFY_BAD;
407 }
408 
409 static int sparx5_handle_port_vlan_add(struct net_device *dev,
410 				       struct notifier_block *nb,
411 				       const struct switchdev_obj_port_vlan *v)
412 {
413 	struct sparx5_port *port = netdev_priv(dev);
414 
415 	if (netif_is_bridge_master(dev)) {
416 		struct sparx5 *sparx5 =
417 			container_of(nb, struct sparx5,
418 				     switchdev_blocking_nb);
419 
420 		/* Flood broadcast to CPU */
421 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
422 				  v->vid);
423 		return 0;
424 	}
425 
426 	if (!sparx5_netdevice_check(dev))
427 		return -EOPNOTSUPP;
428 
429 	return sparx5_vlan_vid_add(port, v->vid,
430 				  v->flags & BRIDGE_VLAN_INFO_PVID,
431 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
432 }
433 
434 static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
435 				  const unsigned char *addr,
436 				  u16 vid,
437 				  struct sparx5_mdb_entry **entry_out)
438 {
439 	struct sparx5_mdb_entry *entry;
440 	u16 pgid_idx;
441 	int err;
442 
443 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
444 	if (!entry)
445 		return -ENOMEM;
446 
447 	err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
448 	if (err) {
449 		kfree(entry);
450 		return err;
451 	}
452 
453 	memcpy(entry->addr, addr, ETH_ALEN);
454 	entry->vid = vid;
455 	entry->pgid_idx = pgid_idx;
456 
457 	mutex_lock(&sparx5->mdb_lock);
458 	list_add_tail(&entry->list, &sparx5->mdb_entries);
459 	mutex_unlock(&sparx5->mdb_lock);
460 
461 	*entry_out = entry;
462 	return 0;
463 }
464 
465 static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
466 				  const unsigned char *addr,
467 				  u16 vid)
468 {
469 	struct sparx5_mdb_entry *entry, *tmp;
470 
471 	mutex_lock(&sparx5->mdb_lock);
472 	list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
473 		if ((vid == 0 || entry->vid == vid) &&
474 		    ether_addr_equal(addr, entry->addr)) {
475 			list_del(&entry->list);
476 
477 			sparx5_pgid_free(sparx5, entry->pgid_idx);
478 			kfree(entry);
479 			goto out;
480 		}
481 	}
482 
483 out:
484 	mutex_unlock(&sparx5->mdb_lock);
485 }
486 
487 static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
488 						     const unsigned char *addr,
489 						     u16 vid)
490 {
491 	struct sparx5_mdb_entry *e, *found = NULL;
492 
493 	mutex_lock(&sparx5->mdb_lock);
494 	list_for_each_entry(e, &sparx5->mdb_entries, list) {
495 		if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
496 			found = e;
497 			goto out;
498 		}
499 	}
500 
501 out:
502 	mutex_unlock(&sparx5->mdb_lock);
503 	return found;
504 }
505 
506 static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
507 {
508 	spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
509 		 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
510 		 ANA_AC_PGID_MISC_CFG(pgid));
511 }
512 
513 static int sparx5_handle_port_mdb_add(struct net_device *dev,
514 				      struct notifier_block *nb,
515 				      const struct switchdev_obj_port_mdb *v)
516 {
517 	struct sparx5_port *port = netdev_priv(dev);
518 	struct sparx5 *spx5 = port->sparx5;
519 	struct sparx5_mdb_entry *entry;
520 	bool is_host, is_new;
521 	int err, i;
522 	u16 vid;
523 
524 	if (!sparx5_netdevice_check(dev))
525 		return -EOPNOTSUPP;
526 
527 	is_host = netif_is_bridge_master(v->obj.orig_dev);
528 
529 	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
530 	 * Fall back to bridge vid 1.
531 	 */
532 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
533 		vid = 1;
534 	else
535 		vid = v->vid;
536 
537 	is_new = false;
538 	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
539 	if (!entry) {
540 		err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
541 		is_new = true;
542 		if (err)
543 			return err;
544 	}
545 
546 	mutex_lock(&spx5->mdb_lock);
547 
548 	/* Add any mrouter ports to the new entry */
549 	if (is_new && ether_addr_is_ip_mcast(v->addr))
550 		for (i = 0; i < SPX5_PORTS; i++)
551 			if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
552 				sparx5_pgid_update_mask(spx5->ports[i],
553 							entry->pgid_idx,
554 							true);
555 
556 	if (is_host && !entry->cpu_copy) {
557 		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
558 		entry->cpu_copy = true;
559 	} else if (!is_host) {
560 		sparx5_pgid_update_mask(port, entry->pgid_idx, true);
561 		set_bit(port->portno, entry->port_mask);
562 	}
563 	mutex_unlock(&spx5->mdb_lock);
564 
565 	sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
566 
567 	return 0;
568 }
569 
570 static int sparx5_handle_port_mdb_del(struct net_device *dev,
571 				      struct notifier_block *nb,
572 				      const struct switchdev_obj_port_mdb *v)
573 {
574 	struct sparx5_port *port = netdev_priv(dev);
575 	struct sparx5 *spx5 = port->sparx5;
576 	struct sparx5_mdb_entry *entry;
577 	bool is_host;
578 	u16 vid;
579 
580 	if (!sparx5_netdevice_check(dev))
581 		return -EOPNOTSUPP;
582 
583 	is_host = netif_is_bridge_master(v->obj.orig_dev);
584 
585 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
586 		vid = 1;
587 	else
588 		vid = v->vid;
589 
590 	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
591 	if (!entry)
592 		return 0;
593 
594 	mutex_lock(&spx5->mdb_lock);
595 	if (is_host && entry->cpu_copy) {
596 		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
597 		entry->cpu_copy = false;
598 	} else if (!is_host) {
599 		clear_bit(port->portno, entry->port_mask);
600 
601 		/* Port not mrouter port or addr is L2 mcast, remove port from mask. */
602 		if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
603 			sparx5_pgid_update_mask(port, entry->pgid_idx, false);
604 	}
605 	mutex_unlock(&spx5->mdb_lock);
606 
607 	if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
608 		 /* Clear pgid in case mrouter ports exists
609 		  * that are not part of the group.
610 		  */
611 		sparx5_pgid_clear(spx5, entry->pgid_idx);
612 		sparx5_mact_forget(spx5, entry->addr, entry->vid);
613 		sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
614 	}
615 	return 0;
616 }
617 
618 static int sparx5_handle_port_obj_add(struct net_device *dev,
619 				      struct notifier_block *nb,
620 				      struct switchdev_notifier_port_obj_info *info)
621 {
622 	const struct switchdev_obj *obj = info->obj;
623 	int err;
624 
625 	switch (obj->id) {
626 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
627 		err = sparx5_handle_port_vlan_add(dev, nb,
628 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
629 		break;
630 	case SWITCHDEV_OBJ_ID_PORT_MDB:
631 	case SWITCHDEV_OBJ_ID_HOST_MDB:
632 		err = sparx5_handle_port_mdb_add(dev, nb,
633 						 SWITCHDEV_OBJ_PORT_MDB(obj));
634 		break;
635 	default:
636 		err = -EOPNOTSUPP;
637 		break;
638 	}
639 
640 	info->handled = true;
641 	return err;
642 }
643 
644 static int sparx5_handle_port_vlan_del(struct net_device *dev,
645 				       struct notifier_block *nb,
646 				       u16 vid)
647 {
648 	struct sparx5_port *port = netdev_priv(dev);
649 	int ret;
650 
651 	/* Master bridge? */
652 	if (netif_is_bridge_master(dev)) {
653 		struct sparx5 *sparx5 =
654 			container_of(nb, struct sparx5,
655 				     switchdev_blocking_nb);
656 
657 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
658 		return 0;
659 	}
660 
661 	if (!sparx5_netdevice_check(dev))
662 		return -EOPNOTSUPP;
663 
664 	ret = sparx5_vlan_vid_del(port, vid);
665 	if (ret)
666 		return ret;
667 
668 	return 0;
669 }
670 
671 static int sparx5_handle_port_obj_del(struct net_device *dev,
672 				      struct notifier_block *nb,
673 				      struct switchdev_notifier_port_obj_info *info)
674 {
675 	const struct switchdev_obj *obj = info->obj;
676 	int err;
677 
678 	switch (obj->id) {
679 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
680 		err = sparx5_handle_port_vlan_del(dev, nb,
681 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
682 		break;
683 	case SWITCHDEV_OBJ_ID_PORT_MDB:
684 	case SWITCHDEV_OBJ_ID_HOST_MDB:
685 		err = sparx5_handle_port_mdb_del(dev, nb,
686 						 SWITCHDEV_OBJ_PORT_MDB(obj));
687 		break;
688 	default:
689 		err = -EOPNOTSUPP;
690 		break;
691 	}
692 
693 	info->handled = true;
694 	return err;
695 }
696 
697 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
698 					   unsigned long event,
699 					   void *ptr)
700 {
701 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
702 	int err;
703 
704 	switch (event) {
705 	case SWITCHDEV_PORT_OBJ_ADD:
706 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
707 		return notifier_from_errno(err);
708 	case SWITCHDEV_PORT_OBJ_DEL:
709 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
710 		return notifier_from_errno(err);
711 	case SWITCHDEV_PORT_ATTR_SET:
712 		err = switchdev_handle_port_attr_set(dev, ptr,
713 						     sparx5_netdevice_check,
714 						     sparx5_port_attr_set);
715 		return notifier_from_errno(err);
716 	}
717 
718 	return NOTIFY_DONE;
719 }
720 
721 int sparx5_register_notifier_blocks(struct sparx5 *s5)
722 {
723 	int err;
724 
725 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
726 	err = register_netdevice_notifier(&s5->netdevice_nb);
727 	if (err)
728 		return err;
729 
730 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
731 	err = register_switchdev_notifier(&s5->switchdev_nb);
732 	if (err)
733 		goto err_switchdev_nb;
734 
735 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
736 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
737 	if (err)
738 		goto err_switchdev_blocking_nb;
739 
740 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
741 	if (!sparx5_owq) {
742 		err = -ENOMEM;
743 		goto err_switchdev_blocking_nb;
744 	}
745 
746 	return 0;
747 
748 err_switchdev_blocking_nb:
749 	unregister_switchdev_notifier(&s5->switchdev_nb);
750 err_switchdev_nb:
751 	unregister_netdevice_notifier(&s5->netdevice_nb);
752 
753 	return err;
754 }
755 
756 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
757 {
758 	destroy_workqueue(sparx5_owq);
759 
760 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
761 	unregister_switchdev_notifier(&s5->switchdev_nb);
762 	unregister_netdevice_notifier(&s5->netdevice_nb);
763 }
764