xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c (revision a0efa2f362a69e47b9d8b48f770ef3a0249a7911)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	struct sparx5 *sparx5;
20 	unsigned long event;
21 };
22 
23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
24 					     struct switchdev_brport_flags flags)
25 {
26 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
27 		return -EINVAL;
28 
29 	return 0;
30 }
31 
32 static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
33 {
34 	bool should_flood = flood_flag || port->is_mrouter;
35 	struct sparx5 *sparx5 = port->sparx5;
36 	int pgid;
37 
38 	for (pgid = sparx5_get_pgid(sparx5, PGID_IPV4_MC_DATA);
39 	     pgid <= sparx5_get_pgid(sparx5, PGID_IPV6_MC_CTRL); pgid++)
40 		sparx5_pgid_update_mask(port, pgid, should_flood);
41 }
42 
43 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
44 					  struct switchdev_brport_flags flags)
45 {
46 	struct sparx5 *sparx5 = port->sparx5;
47 
48 	if (flags.mask & BR_MCAST_FLOOD) {
49 		sparx5_pgid_update_mask(port,
50 					sparx5_get_pgid(sparx5, PGID_MC_FLOOD),
51 					!!(flags.val & BR_MCAST_FLOOD));
52 		sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
53 	}
54 
55 	if (flags.mask & BR_FLOOD)
56 		sparx5_pgid_update_mask(port,
57 					sparx5_get_pgid(sparx5, PGID_UC_FLOOD),
58 					!!(flags.val & BR_FLOOD));
59 	if (flags.mask & BR_BCAST_FLOOD)
60 		sparx5_pgid_update_mask(port,
61 					sparx5_get_pgid(sparx5, PGID_BCAST),
62 					!!(flags.val & BR_BCAST_FLOOD));
63 }
64 
65 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
66 				      u8 state)
67 {
68 	struct sparx5 *sparx5 = port->sparx5;
69 
70 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
71 		netdev_err(port->ndev,
72 			   "Controlling non-bridged port %d?\n", port->portno);
73 		return;
74 	}
75 
76 	switch (state) {
77 	case BR_STATE_FORWARDING:
78 		set_bit(port->portno, sparx5->bridge_fwd_mask);
79 		fallthrough;
80 	case BR_STATE_LEARNING:
81 		set_bit(port->portno, sparx5->bridge_lrn_mask);
82 		break;
83 
84 	default:
85 		/* All other states treated as blocking */
86 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
87 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
88 		break;
89 	}
90 
91 	/* apply the bridge_fwd_mask to all the ports */
92 	sparx5_update_fwd(sparx5);
93 }
94 
95 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
96 					unsigned long ageing_clock_t)
97 {
98 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
99 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
100 
101 	sparx5_set_ageing(port->sparx5, ageing_time);
102 }
103 
104 static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
105 					 struct net_device *orig_dev,
106 					 bool enable)
107 {
108 	struct sparx5 *sparx5 = port->sparx5;
109 	struct sparx5_mdb_entry *e;
110 	bool flood_flag;
111 
112 	if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
113 		return;
114 
115 	/* Add/del mrouter port on all active mdb entries in HW.
116 	 * Don't change entry port mask, since that represents
117 	 * ports that actually joined that group.
118 	 */
119 	mutex_lock(&sparx5->mdb_lock);
120 	list_for_each_entry(e, &sparx5->mdb_entries, list) {
121 		if (!test_bit(port->portno, e->port_mask) &&
122 		    ether_addr_is_ip_mcast(e->addr))
123 			sparx5_pgid_update_mask(port, e->pgid_idx, enable);
124 	}
125 	mutex_unlock(&sparx5->mdb_lock);
126 
127 	/* Enable/disable flooding depending on if port is mrouter port
128 	 * or if mcast flood is enabled.
129 	 */
130 	port->is_mrouter = enable;
131 	flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
132 	sparx5_port_update_mcast_ip_flood(port, flood_flag);
133 }
134 
135 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
136 				const struct switchdev_attr *attr,
137 				struct netlink_ext_ack *extack)
138 {
139 	struct sparx5_port *port = netdev_priv(dev);
140 
141 	switch (attr->id) {
142 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
143 		return sparx5_port_attr_pre_bridge_flags(port,
144 							 attr->u.brport_flags);
145 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
146 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
147 		break;
148 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
149 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
150 		break;
151 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
152 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
153 		break;
154 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
155 		/* Used PVID 1 when default_pvid is 0, to avoid
156 		 * collision with non-bridged ports.
157 		 */
158 		if (port->pvid == 0)
159 			port->pvid = 1;
160 		port->vlan_aware = attr->u.vlan_filtering;
161 		sparx5_vlan_port_apply(port->sparx5, port);
162 		break;
163 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
164 		sparx5_port_attr_mrouter_set(port,
165 					     attr->orig_dev,
166 					     attr->u.mrouter);
167 		break;
168 	default:
169 		return -EOPNOTSUPP;
170 	}
171 
172 	return 0;
173 }
174 
175 static int sparx5_port_bridge_join(struct sparx5_port *port,
176 				   struct net_device *bridge,
177 				   struct netlink_ext_ack *extack)
178 {
179 	struct sparx5 *sparx5 = port->sparx5;
180 	struct net_device *ndev = port->ndev;
181 	int err;
182 
183 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
184 		/* First bridged port */
185 		sparx5->hw_bridge_dev = bridge;
186 	else
187 		if (sparx5->hw_bridge_dev != bridge)
188 			/* This is adding the port to a second bridge, this is
189 			 * unsupported
190 			 */
191 			return -ENODEV;
192 
193 	set_bit(port->portno, sparx5->bridge_mask);
194 
195 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
196 					    false, extack);
197 	if (err)
198 		goto err_switchdev_offload;
199 
200 	/* Remove standalone port entry */
201 	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
202 
203 	/* Port enters in bridge mode therefore don't need to copy to CPU
204 	 * frames for multicast in case the bridge is not requesting them
205 	 */
206 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
207 
208 	return 0;
209 
210 err_switchdev_offload:
211 	clear_bit(port->portno, sparx5->bridge_mask);
212 	return err;
213 }
214 
215 static void sparx5_port_bridge_leave(struct sparx5_port *port,
216 				     struct net_device *bridge)
217 {
218 	struct sparx5 *sparx5 = port->sparx5;
219 
220 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
221 
222 	clear_bit(port->portno, sparx5->bridge_mask);
223 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
224 		sparx5->hw_bridge_dev = NULL;
225 
226 	/* Clear bridge vlan settings before updating the port settings */
227 	port->vlan_aware = 0;
228 	port->pvid = NULL_VID;
229 	port->vid = NULL_VID;
230 
231 	/* Forward frames to CPU */
232 	sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
233 			  port->ndev->dev_addr, 0);
234 
235 	/* Port enters in host more therefore restore mc list */
236 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
237 }
238 
239 static int sparx5_port_changeupper(struct net_device *dev,
240 				   struct netdev_notifier_changeupper_info *info)
241 {
242 	struct sparx5_port *port = netdev_priv(dev);
243 	struct netlink_ext_ack *extack;
244 	int err = 0;
245 
246 	extack = netdev_notifier_info_to_extack(&info->info);
247 
248 	if (netif_is_bridge_master(info->upper_dev)) {
249 		if (info->linking)
250 			err = sparx5_port_bridge_join(port, info->upper_dev,
251 						      extack);
252 		else
253 			sparx5_port_bridge_leave(port, info->upper_dev);
254 
255 		sparx5_vlan_port_apply(port->sparx5, port);
256 	}
257 
258 	return err;
259 }
260 
261 static int sparx5_port_add_addr(struct net_device *dev, bool up)
262 {
263 	struct sparx5_port *port = netdev_priv(dev);
264 	struct sparx5 *sparx5 = port->sparx5;
265 	u16 vid = port->pvid;
266 
267 	if (up)
268 		sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
269 				  port->ndev->dev_addr, vid);
270 	else
271 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
272 
273 	return 0;
274 }
275 
276 static int sparx5_netdevice_port_event(struct net_device *dev,
277 				       struct notifier_block *nb,
278 				       unsigned long event, void *ptr)
279 {
280 	int err = 0;
281 
282 	if (!sparx5_netdevice_check(dev))
283 		return 0;
284 
285 	switch (event) {
286 	case NETDEV_CHANGEUPPER:
287 		err = sparx5_port_changeupper(dev, ptr);
288 		break;
289 	case NETDEV_PRE_UP:
290 		err = sparx5_port_add_addr(dev, true);
291 		break;
292 	case NETDEV_DOWN:
293 		err = sparx5_port_add_addr(dev, false);
294 		break;
295 	}
296 
297 	return err;
298 }
299 
300 static int sparx5_netdevice_event(struct notifier_block *nb,
301 				  unsigned long event, void *ptr)
302 {
303 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
304 	int ret = 0;
305 
306 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
307 
308 	return notifier_from_errno(ret);
309 }
310 
311 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
312 {
313 	struct sparx5_switchdev_event_work *switchdev_work =
314 		container_of(work, struct sparx5_switchdev_event_work, work);
315 	struct net_device *dev = switchdev_work->dev;
316 	struct switchdev_notifier_fdb_info *fdb_info;
317 	struct sparx5_port *port;
318 	struct sparx5 *sparx5;
319 	bool host_addr;
320 	u16 vid;
321 
322 	rtnl_lock();
323 	if (!sparx5_netdevice_check(dev)) {
324 		host_addr = true;
325 		sparx5 = switchdev_work->sparx5;
326 	} else {
327 		host_addr = false;
328 		sparx5 = switchdev_work->sparx5;
329 		port = netdev_priv(dev);
330 	}
331 
332 	fdb_info = &switchdev_work->fdb_info;
333 
334 	/* Used PVID 1 when default_pvid is 0, to avoid
335 	 * collision with non-bridged ports.
336 	 */
337 	if (fdb_info->vid == 0)
338 		vid = 1;
339 	else
340 		vid = fdb_info->vid;
341 
342 	switch (switchdev_work->event) {
343 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
344 		if (host_addr)
345 			sparx5_add_mact_entry(sparx5, dev,
346 					      sparx5_get_pgid(sparx5, PGID_CPU),
347 					      fdb_info->addr, vid);
348 		else
349 			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
350 					      fdb_info->addr, vid);
351 		break;
352 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
353 		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
354 		break;
355 	}
356 
357 	rtnl_unlock();
358 	kfree(switchdev_work->fdb_info.addr);
359 	kfree(switchdev_work);
360 	dev_put(dev);
361 }
362 
363 static void sparx5_schedule_work(struct work_struct *work)
364 {
365 	queue_work(sparx5_owq, work);
366 }
367 
368 static int sparx5_switchdev_event(struct notifier_block *nb,
369 				  unsigned long event, void *ptr)
370 {
371 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
372 	struct sparx5_switchdev_event_work *switchdev_work;
373 	struct switchdev_notifier_fdb_info *fdb_info;
374 	struct switchdev_notifier_info *info = ptr;
375 	struct sparx5 *spx5;
376 	int err;
377 
378 	spx5 = container_of(nb, struct sparx5, switchdev_nb);
379 
380 	switch (event) {
381 	case SWITCHDEV_PORT_ATTR_SET:
382 		err = switchdev_handle_port_attr_set(dev, ptr,
383 						     sparx5_netdevice_check,
384 						     sparx5_port_attr_set);
385 		return notifier_from_errno(err);
386 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
387 		fallthrough;
388 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
389 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
390 		if (!switchdev_work)
391 			return NOTIFY_BAD;
392 
393 		switchdev_work->dev = dev;
394 		switchdev_work->event = event;
395 		switchdev_work->sparx5 = spx5;
396 
397 		fdb_info = container_of(info,
398 					struct switchdev_notifier_fdb_info,
399 					info);
400 		INIT_WORK(&switchdev_work->work,
401 			  sparx5_switchdev_bridge_fdb_event_work);
402 		memcpy(&switchdev_work->fdb_info, ptr,
403 		       sizeof(switchdev_work->fdb_info));
404 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
405 		if (!switchdev_work->fdb_info.addr)
406 			goto err_addr_alloc;
407 
408 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
409 				fdb_info->addr);
410 		dev_hold(dev);
411 
412 		sparx5_schedule_work(&switchdev_work->work);
413 		break;
414 	}
415 
416 	return NOTIFY_DONE;
417 err_addr_alloc:
418 	kfree(switchdev_work);
419 	return NOTIFY_BAD;
420 }
421 
422 static int sparx5_handle_port_vlan_add(struct net_device *dev,
423 				       struct notifier_block *nb,
424 				       const struct switchdev_obj_port_vlan *v)
425 {
426 	struct sparx5_port *port = netdev_priv(dev);
427 
428 	if (netif_is_bridge_master(dev)) {
429 		struct sparx5 *sparx5 =
430 			container_of(nb, struct sparx5,
431 				     switchdev_blocking_nb);
432 
433 		/* Flood broadcast to CPU */
434 		sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_BCAST),
435 				  dev->broadcast, v->vid);
436 		return 0;
437 	}
438 
439 	if (!sparx5_netdevice_check(dev))
440 		return -EOPNOTSUPP;
441 
442 	return sparx5_vlan_vid_add(port, v->vid,
443 				  v->flags & BRIDGE_VLAN_INFO_PVID,
444 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
445 }
446 
447 static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
448 				  const unsigned char *addr,
449 				  u16 vid,
450 				  struct sparx5_mdb_entry **entry_out)
451 {
452 	struct sparx5_mdb_entry *entry;
453 	u16 pgid_idx;
454 	int err;
455 
456 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
457 	if (!entry)
458 		return -ENOMEM;
459 
460 	err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
461 	if (err) {
462 		kfree(entry);
463 		return err;
464 	}
465 
466 	memcpy(entry->addr, addr, ETH_ALEN);
467 	entry->vid = vid;
468 	entry->pgid_idx = pgid_idx;
469 
470 	mutex_lock(&sparx5->mdb_lock);
471 	list_add_tail(&entry->list, &sparx5->mdb_entries);
472 	mutex_unlock(&sparx5->mdb_lock);
473 
474 	*entry_out = entry;
475 	return 0;
476 }
477 
478 static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
479 				  const unsigned char *addr,
480 				  u16 vid)
481 {
482 	struct sparx5_mdb_entry *entry, *tmp;
483 
484 	mutex_lock(&sparx5->mdb_lock);
485 	list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
486 		if ((vid == 0 || entry->vid == vid) &&
487 		    ether_addr_equal(addr, entry->addr)) {
488 			list_del(&entry->list);
489 
490 			sparx5_pgid_free(sparx5, entry->pgid_idx);
491 			kfree(entry);
492 			goto out;
493 		}
494 	}
495 
496 out:
497 	mutex_unlock(&sparx5->mdb_lock);
498 }
499 
500 static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
501 						     const unsigned char *addr,
502 						     u16 vid)
503 {
504 	struct sparx5_mdb_entry *e, *found = NULL;
505 
506 	mutex_lock(&sparx5->mdb_lock);
507 	list_for_each_entry(e, &sparx5->mdb_entries, list) {
508 		if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
509 			found = e;
510 			goto out;
511 		}
512 	}
513 
514 out:
515 	mutex_unlock(&sparx5->mdb_lock);
516 	return found;
517 }
518 
519 static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
520 {
521 	spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
522 		 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
523 		 ANA_AC_PGID_MISC_CFG(pgid));
524 }
525 
526 static int sparx5_handle_port_mdb_add(struct net_device *dev,
527 				      struct notifier_block *nb,
528 				      const struct switchdev_obj_port_mdb *v)
529 {
530 	struct sparx5_port *port = netdev_priv(dev);
531 	struct sparx5 *spx5 = port->sparx5;
532 	struct sparx5_mdb_entry *entry;
533 	bool is_host, is_new;
534 	int err, i;
535 	u16 vid;
536 
537 	if (!sparx5_netdevice_check(dev))
538 		return -EOPNOTSUPP;
539 
540 	is_host = netif_is_bridge_master(v->obj.orig_dev);
541 
542 	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
543 	 * Fall back to bridge vid 1.
544 	 */
545 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
546 		vid = 1;
547 	else
548 		vid = v->vid;
549 
550 	is_new = false;
551 	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
552 	if (!entry) {
553 		err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
554 		is_new = true;
555 		if (err)
556 			return err;
557 	}
558 
559 	mutex_lock(&spx5->mdb_lock);
560 
561 	/* Add any mrouter ports to the new entry */
562 	if (is_new && ether_addr_is_ip_mcast(v->addr))
563 		for (i = 0; i < spx5->data->consts->n_ports; i++)
564 			if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
565 				sparx5_pgid_update_mask(spx5->ports[i],
566 							entry->pgid_idx,
567 							true);
568 
569 	if (is_host && !entry->cpu_copy) {
570 		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
571 		entry->cpu_copy = true;
572 	} else if (!is_host) {
573 		sparx5_pgid_update_mask(port, entry->pgid_idx, true);
574 		set_bit(port->portno, entry->port_mask);
575 	}
576 	mutex_unlock(&spx5->mdb_lock);
577 
578 	sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
579 
580 	return 0;
581 }
582 
583 static int sparx5_handle_port_mdb_del(struct net_device *dev,
584 				      struct notifier_block *nb,
585 				      const struct switchdev_obj_port_mdb *v)
586 {
587 	struct sparx5_port *port = netdev_priv(dev);
588 	struct sparx5 *spx5 = port->sparx5;
589 	struct sparx5_mdb_entry *entry;
590 	bool is_host;
591 	u16 vid;
592 
593 	if (!sparx5_netdevice_check(dev))
594 		return -EOPNOTSUPP;
595 
596 	is_host = netif_is_bridge_master(v->obj.orig_dev);
597 
598 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
599 		vid = 1;
600 	else
601 		vid = v->vid;
602 
603 	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
604 	if (!entry)
605 		return 0;
606 
607 	mutex_lock(&spx5->mdb_lock);
608 	if (is_host && entry->cpu_copy) {
609 		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
610 		entry->cpu_copy = false;
611 	} else if (!is_host) {
612 		clear_bit(port->portno, entry->port_mask);
613 
614 		/* Port not mrouter port or addr is L2 mcast, remove port from mask. */
615 		if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
616 			sparx5_pgid_update_mask(port, entry->pgid_idx, false);
617 	}
618 	mutex_unlock(&spx5->mdb_lock);
619 
620 	if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
621 		 /* Clear pgid in case mrouter ports exists
622 		  * that are not part of the group.
623 		  */
624 		sparx5_pgid_clear(spx5, entry->pgid_idx);
625 		sparx5_mact_forget(spx5, entry->addr, entry->vid);
626 		sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
627 	}
628 	return 0;
629 }
630 
631 static int sparx5_handle_port_obj_add(struct net_device *dev,
632 				      struct notifier_block *nb,
633 				      struct switchdev_notifier_port_obj_info *info)
634 {
635 	const struct switchdev_obj *obj = info->obj;
636 	int err;
637 
638 	switch (obj->id) {
639 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
640 		err = sparx5_handle_port_vlan_add(dev, nb,
641 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
642 		break;
643 	case SWITCHDEV_OBJ_ID_PORT_MDB:
644 	case SWITCHDEV_OBJ_ID_HOST_MDB:
645 		err = sparx5_handle_port_mdb_add(dev, nb,
646 						 SWITCHDEV_OBJ_PORT_MDB(obj));
647 		break;
648 	default:
649 		err = -EOPNOTSUPP;
650 		break;
651 	}
652 
653 	info->handled = true;
654 	return err;
655 }
656 
657 static int sparx5_handle_port_vlan_del(struct net_device *dev,
658 				       struct notifier_block *nb,
659 				       u16 vid)
660 {
661 	struct sparx5_port *port = netdev_priv(dev);
662 	int ret;
663 
664 	/* Master bridge? */
665 	if (netif_is_bridge_master(dev)) {
666 		struct sparx5 *sparx5 =
667 			container_of(nb, struct sparx5,
668 				     switchdev_blocking_nb);
669 
670 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
671 		return 0;
672 	}
673 
674 	if (!sparx5_netdevice_check(dev))
675 		return -EOPNOTSUPP;
676 
677 	ret = sparx5_vlan_vid_del(port, vid);
678 	if (ret)
679 		return ret;
680 
681 	return 0;
682 }
683 
684 static int sparx5_handle_port_obj_del(struct net_device *dev,
685 				      struct notifier_block *nb,
686 				      struct switchdev_notifier_port_obj_info *info)
687 {
688 	const struct switchdev_obj *obj = info->obj;
689 	int err;
690 
691 	switch (obj->id) {
692 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
693 		err = sparx5_handle_port_vlan_del(dev, nb,
694 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
695 		break;
696 	case SWITCHDEV_OBJ_ID_PORT_MDB:
697 	case SWITCHDEV_OBJ_ID_HOST_MDB:
698 		err = sparx5_handle_port_mdb_del(dev, nb,
699 						 SWITCHDEV_OBJ_PORT_MDB(obj));
700 		break;
701 	default:
702 		err = -EOPNOTSUPP;
703 		break;
704 	}
705 
706 	info->handled = true;
707 	return err;
708 }
709 
710 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
711 					   unsigned long event,
712 					   void *ptr)
713 {
714 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
715 	int err;
716 
717 	switch (event) {
718 	case SWITCHDEV_PORT_OBJ_ADD:
719 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
720 		return notifier_from_errno(err);
721 	case SWITCHDEV_PORT_OBJ_DEL:
722 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
723 		return notifier_from_errno(err);
724 	case SWITCHDEV_PORT_ATTR_SET:
725 		err = switchdev_handle_port_attr_set(dev, ptr,
726 						     sparx5_netdevice_check,
727 						     sparx5_port_attr_set);
728 		return notifier_from_errno(err);
729 	}
730 
731 	return NOTIFY_DONE;
732 }
733 
734 int sparx5_register_notifier_blocks(struct sparx5 *s5)
735 {
736 	int err;
737 
738 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
739 	err = register_netdevice_notifier(&s5->netdevice_nb);
740 	if (err)
741 		return err;
742 
743 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
744 	err = register_switchdev_notifier(&s5->switchdev_nb);
745 	if (err)
746 		goto err_switchdev_nb;
747 
748 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
749 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
750 	if (err)
751 		goto err_switchdev_blocking_nb;
752 
753 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
754 	if (!sparx5_owq) {
755 		err = -ENOMEM;
756 		goto err_switchdev_blocking_nb;
757 	}
758 
759 	return 0;
760 
761 err_switchdev_blocking_nb:
762 	unregister_switchdev_notifier(&s5->switchdev_nb);
763 err_switchdev_nb:
764 	unregister_netdevice_notifier(&s5->netdevice_nb);
765 
766 	return err;
767 }
768 
769 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
770 {
771 	destroy_workqueue(sparx5_owq);
772 
773 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
774 	unregister_switchdev_notifier(&s5->switchdev_nb);
775 	unregister_netdevice_notifier(&s5->netdevice_nb);
776 }
777