xref: /linux/net/dsa/switch.c (revision 0ad53fe3ae82443c74ff8cfd7bd13377cc1134a3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	int i;
21 
22 	for (i = 0; i < ds->num_ports; ++i) {
23 		struct dsa_port *dp = dsa_to_port(ds, i);
24 
25 		if (dp->ageing_time && dp->ageing_time < ageing_time)
26 			ageing_time = dp->ageing_time;
27 	}
28 
29 	return ageing_time;
30 }
31 
32 static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 				  struct dsa_notifier_ageing_time_info *info)
34 {
35 	unsigned int ageing_time = info->ageing_time;
36 
37 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
38 		return -ERANGE;
39 
40 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
41 		return -ERANGE;
42 
43 	/* Program the fastest ageing time in case of multiple bridges */
44 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
45 
46 	if (ds->ops->set_ageing_time)
47 		return ds->ops->set_ageing_time(ds, ageing_time);
48 
49 	return 0;
50 }
51 
52 static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
53 				 struct dsa_notifier_mtu_info *info)
54 {
55 	if (ds->index == info->sw_index && port == info->port)
56 		return true;
57 
58 	/* Do not propagate to other switches in the tree if the notifier was
59 	 * targeted for a single switch.
60 	 */
61 	if (info->targeted_match)
62 		return false;
63 
64 	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
65 		return true;
66 
67 	return false;
68 }
69 
70 static int dsa_switch_mtu(struct dsa_switch *ds,
71 			  struct dsa_notifier_mtu_info *info)
72 {
73 	int port, ret;
74 
75 	if (!ds->ops->port_change_mtu)
76 		return -EOPNOTSUPP;
77 
78 	for (port = 0; port < ds->num_ports; port++) {
79 		if (dsa_switch_mtu_match(ds, port, info)) {
80 			ret = ds->ops->port_change_mtu(ds, port, info->mtu);
81 			if (ret)
82 				return ret;
83 		}
84 	}
85 
86 	return 0;
87 }
88 
89 static int dsa_switch_bridge_join(struct dsa_switch *ds,
90 				  struct dsa_notifier_bridge_info *info)
91 {
92 	struct dsa_switch_tree *dst = ds->dst;
93 	int err;
94 
95 	if (dst->index == info->tree_index && ds->index == info->sw_index) {
96 		if (!ds->ops->port_bridge_join)
97 			return -EOPNOTSUPP;
98 
99 		err = ds->ops->port_bridge_join(ds, info->port, info->br);
100 		if (err)
101 			return err;
102 	}
103 
104 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
105 	    ds->ops->crosschip_bridge_join) {
106 		err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
107 						     info->sw_index,
108 						     info->port, info->br);
109 		if (err)
110 			return err;
111 	}
112 
113 	return dsa_tag_8021q_bridge_join(ds, info);
114 }
115 
116 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
117 				   struct dsa_notifier_bridge_info *info)
118 {
119 	struct dsa_switch_tree *dst = ds->dst;
120 	struct netlink_ext_ack extack = {0};
121 	bool change_vlan_filtering = false;
122 	bool vlan_filtering;
123 	int err, port;
124 
125 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
126 	    ds->ops->port_bridge_leave)
127 		ds->ops->port_bridge_leave(ds, info->port, info->br);
128 
129 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
130 	    ds->ops->crosschip_bridge_leave)
131 		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
132 						info->sw_index, info->port,
133 						info->br);
134 
135 	if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) {
136 		change_vlan_filtering = true;
137 		vlan_filtering = true;
138 	} else if (!ds->needs_standalone_vlan_filtering &&
139 		   br_vlan_enabled(info->br)) {
140 		change_vlan_filtering = true;
141 		vlan_filtering = false;
142 	}
143 
144 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
145 	 * event for changing vlan_filtering setting upon slave ports leaving
146 	 * it. That is a good thing, because that lets us handle it and also
147 	 * handle the case where the switch's vlan_filtering setting is global
148 	 * (not per port). When that happens, the correct moment to trigger the
149 	 * vlan_filtering callback is only when the last port leaves the last
150 	 * VLAN-aware bridge.
151 	 */
152 	if (change_vlan_filtering && ds->vlan_filtering_is_global) {
153 		for (port = 0; port < ds->num_ports; port++) {
154 			struct net_device *bridge_dev;
155 
156 			bridge_dev = dsa_to_port(ds, port)->bridge_dev;
157 
158 			if (bridge_dev && br_vlan_enabled(bridge_dev)) {
159 				change_vlan_filtering = false;
160 				break;
161 			}
162 		}
163 	}
164 
165 	if (change_vlan_filtering) {
166 		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
167 					      vlan_filtering, &extack);
168 		if (extack._msg)
169 			dev_err(ds->dev, "port %d: %s\n", info->port,
170 				extack._msg);
171 		if (err && err != EOPNOTSUPP)
172 			return err;
173 	}
174 
175 	return dsa_tag_8021q_bridge_leave(ds, info);
176 }
177 
178 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
179  * DSA links) that sit between the targeted port on which the notifier was
180  * emitted and its dedicated CPU port.
181  */
182 static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
183 					  int info_sw_index, int info_port)
184 {
185 	struct dsa_port *targeted_dp, *cpu_dp;
186 	struct dsa_switch *targeted_ds;
187 
188 	targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
189 	targeted_dp = dsa_to_port(targeted_ds, info_port);
190 	cpu_dp = targeted_dp->cpu_dp;
191 
192 	if (dsa_switch_is_upstream_of(ds, targeted_ds))
193 		return port == dsa_towards_port(ds, cpu_dp->ds->index,
194 						cpu_dp->index);
195 
196 	return false;
197 }
198 
199 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
200 					      const unsigned char *addr,
201 					      u16 vid)
202 {
203 	struct dsa_mac_addr *a;
204 
205 	list_for_each_entry(a, addr_list, list)
206 		if (ether_addr_equal(a->addr, addr) && a->vid == vid)
207 			return a;
208 
209 	return NULL;
210 }
211 
212 static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
213 				 const struct switchdev_obj_port_mdb *mdb)
214 {
215 	struct dsa_port *dp = dsa_to_port(ds, port);
216 	struct dsa_mac_addr *a;
217 	int err;
218 
219 	/* No need to bother with refcounting for user ports */
220 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
221 		return ds->ops->port_mdb_add(ds, port, mdb);
222 
223 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
224 	if (a) {
225 		refcount_inc(&a->refcount);
226 		return 0;
227 	}
228 
229 	a = kzalloc(sizeof(*a), GFP_KERNEL);
230 	if (!a)
231 		return -ENOMEM;
232 
233 	err = ds->ops->port_mdb_add(ds, port, mdb);
234 	if (err) {
235 		kfree(a);
236 		return err;
237 	}
238 
239 	ether_addr_copy(a->addr, mdb->addr);
240 	a->vid = mdb->vid;
241 	refcount_set(&a->refcount, 1);
242 	list_add_tail(&a->list, &dp->mdbs);
243 
244 	return 0;
245 }
246 
247 static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
248 				 const struct switchdev_obj_port_mdb *mdb)
249 {
250 	struct dsa_port *dp = dsa_to_port(ds, port);
251 	struct dsa_mac_addr *a;
252 	int err;
253 
254 	/* No need to bother with refcounting for user ports */
255 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
256 		return ds->ops->port_mdb_del(ds, port, mdb);
257 
258 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
259 	if (!a)
260 		return -ENOENT;
261 
262 	if (!refcount_dec_and_test(&a->refcount))
263 		return 0;
264 
265 	err = ds->ops->port_mdb_del(ds, port, mdb);
266 	if (err) {
267 		refcount_inc(&a->refcount);
268 		return err;
269 	}
270 
271 	list_del(&a->list);
272 	kfree(a);
273 
274 	return 0;
275 }
276 
277 static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
278 				 const unsigned char *addr, u16 vid)
279 {
280 	struct dsa_port *dp = dsa_to_port(ds, port);
281 	struct dsa_mac_addr *a;
282 	int err;
283 
284 	/* No need to bother with refcounting for user ports */
285 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
286 		return ds->ops->port_fdb_add(ds, port, addr, vid);
287 
288 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
289 	if (a) {
290 		refcount_inc(&a->refcount);
291 		return 0;
292 	}
293 
294 	a = kzalloc(sizeof(*a), GFP_KERNEL);
295 	if (!a)
296 		return -ENOMEM;
297 
298 	err = ds->ops->port_fdb_add(ds, port, addr, vid);
299 	if (err) {
300 		kfree(a);
301 		return err;
302 	}
303 
304 	ether_addr_copy(a->addr, addr);
305 	a->vid = vid;
306 	refcount_set(&a->refcount, 1);
307 	list_add_tail(&a->list, &dp->fdbs);
308 
309 	return 0;
310 }
311 
312 static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
313 				 const unsigned char *addr, u16 vid)
314 {
315 	struct dsa_port *dp = dsa_to_port(ds, port);
316 	struct dsa_mac_addr *a;
317 	int err;
318 
319 	/* No need to bother with refcounting for user ports */
320 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
321 		return ds->ops->port_fdb_del(ds, port, addr, vid);
322 
323 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
324 	if (!a)
325 		return -ENOENT;
326 
327 	if (!refcount_dec_and_test(&a->refcount))
328 		return 0;
329 
330 	err = ds->ops->port_fdb_del(ds, port, addr, vid);
331 	if (err) {
332 		refcount_inc(&a->refcount);
333 		return err;
334 	}
335 
336 	list_del(&a->list);
337 	kfree(a);
338 
339 	return 0;
340 }
341 
342 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
343 				   struct dsa_notifier_fdb_info *info)
344 {
345 	int err = 0;
346 	int port;
347 
348 	if (!ds->ops->port_fdb_add)
349 		return -EOPNOTSUPP;
350 
351 	for (port = 0; port < ds->num_ports; port++) {
352 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
353 						  info->port)) {
354 			err = dsa_switch_do_fdb_add(ds, port, info->addr,
355 						    info->vid);
356 			if (err)
357 				break;
358 		}
359 	}
360 
361 	return err;
362 }
363 
364 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
365 				   struct dsa_notifier_fdb_info *info)
366 {
367 	int err = 0;
368 	int port;
369 
370 	if (!ds->ops->port_fdb_del)
371 		return -EOPNOTSUPP;
372 
373 	for (port = 0; port < ds->num_ports; port++) {
374 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
375 						  info->port)) {
376 			err = dsa_switch_do_fdb_del(ds, port, info->addr,
377 						    info->vid);
378 			if (err)
379 				break;
380 		}
381 	}
382 
383 	return err;
384 }
385 
386 static int dsa_switch_fdb_add(struct dsa_switch *ds,
387 			      struct dsa_notifier_fdb_info *info)
388 {
389 	int port = dsa_towards_port(ds, info->sw_index, info->port);
390 
391 	if (!ds->ops->port_fdb_add)
392 		return -EOPNOTSUPP;
393 
394 	return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
395 }
396 
397 static int dsa_switch_fdb_del(struct dsa_switch *ds,
398 			      struct dsa_notifier_fdb_info *info)
399 {
400 	int port = dsa_towards_port(ds, info->sw_index, info->port);
401 
402 	if (!ds->ops->port_fdb_del)
403 		return -EOPNOTSUPP;
404 
405 	return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
406 }
407 
408 static int dsa_switch_hsr_join(struct dsa_switch *ds,
409 			       struct dsa_notifier_hsr_info *info)
410 {
411 	if (ds->index == info->sw_index && ds->ops->port_hsr_join)
412 		return ds->ops->port_hsr_join(ds, info->port, info->hsr);
413 
414 	return -EOPNOTSUPP;
415 }
416 
417 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
418 				struct dsa_notifier_hsr_info *info)
419 {
420 	if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
421 		return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
422 
423 	return -EOPNOTSUPP;
424 }
425 
426 static int dsa_switch_lag_change(struct dsa_switch *ds,
427 				 struct dsa_notifier_lag_info *info)
428 {
429 	if (ds->index == info->sw_index && ds->ops->port_lag_change)
430 		return ds->ops->port_lag_change(ds, info->port);
431 
432 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
433 		return ds->ops->crosschip_lag_change(ds, info->sw_index,
434 						     info->port);
435 
436 	return 0;
437 }
438 
439 static int dsa_switch_lag_join(struct dsa_switch *ds,
440 			       struct dsa_notifier_lag_info *info)
441 {
442 	if (ds->index == info->sw_index && ds->ops->port_lag_join)
443 		return ds->ops->port_lag_join(ds, info->port, info->lag,
444 					      info->info);
445 
446 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
447 		return ds->ops->crosschip_lag_join(ds, info->sw_index,
448 						   info->port, info->lag,
449 						   info->info);
450 
451 	return -EOPNOTSUPP;
452 }
453 
454 static int dsa_switch_lag_leave(struct dsa_switch *ds,
455 				struct dsa_notifier_lag_info *info)
456 {
457 	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
458 		return ds->ops->port_lag_leave(ds, info->port, info->lag);
459 
460 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
461 		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
462 						    info->port, info->lag);
463 
464 	return -EOPNOTSUPP;
465 }
466 
467 static int dsa_switch_mdb_add(struct dsa_switch *ds,
468 			      struct dsa_notifier_mdb_info *info)
469 {
470 	int port = dsa_towards_port(ds, info->sw_index, info->port);
471 
472 	if (!ds->ops->port_mdb_add)
473 		return -EOPNOTSUPP;
474 
475 	return dsa_switch_do_mdb_add(ds, port, info->mdb);
476 }
477 
478 static int dsa_switch_mdb_del(struct dsa_switch *ds,
479 			      struct dsa_notifier_mdb_info *info)
480 {
481 	int port = dsa_towards_port(ds, info->sw_index, info->port);
482 
483 	if (!ds->ops->port_mdb_del)
484 		return -EOPNOTSUPP;
485 
486 	return dsa_switch_do_mdb_del(ds, port, info->mdb);
487 }
488 
489 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
490 				   struct dsa_notifier_mdb_info *info)
491 {
492 	int err = 0;
493 	int port;
494 
495 	if (!ds->ops->port_mdb_add)
496 		return -EOPNOTSUPP;
497 
498 	for (port = 0; port < ds->num_ports; port++) {
499 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
500 						  info->port)) {
501 			err = dsa_switch_do_mdb_add(ds, port, info->mdb);
502 			if (err)
503 				break;
504 		}
505 	}
506 
507 	return err;
508 }
509 
510 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
511 				   struct dsa_notifier_mdb_info *info)
512 {
513 	int err = 0;
514 	int port;
515 
516 	if (!ds->ops->port_mdb_del)
517 		return -EOPNOTSUPP;
518 
519 	for (port = 0; port < ds->num_ports; port++) {
520 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
521 						  info->port)) {
522 			err = dsa_switch_do_mdb_del(ds, port, info->mdb);
523 			if (err)
524 				break;
525 		}
526 	}
527 
528 	return err;
529 }
530 
531 static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
532 				  struct dsa_notifier_vlan_info *info)
533 {
534 	if (ds->index == info->sw_index && port == info->port)
535 		return true;
536 
537 	if (dsa_is_dsa_port(ds, port))
538 		return true;
539 
540 	return false;
541 }
542 
543 static int dsa_switch_vlan_add(struct dsa_switch *ds,
544 			       struct dsa_notifier_vlan_info *info)
545 {
546 	int port, err;
547 
548 	if (!ds->ops->port_vlan_add)
549 		return -EOPNOTSUPP;
550 
551 	for (port = 0; port < ds->num_ports; port++) {
552 		if (dsa_switch_vlan_match(ds, port, info)) {
553 			err = ds->ops->port_vlan_add(ds, port, info->vlan,
554 						     info->extack);
555 			if (err)
556 				return err;
557 		}
558 	}
559 
560 	return 0;
561 }
562 
563 static int dsa_switch_vlan_del(struct dsa_switch *ds,
564 			       struct dsa_notifier_vlan_info *info)
565 {
566 	if (!ds->ops->port_vlan_del)
567 		return -EOPNOTSUPP;
568 
569 	if (ds->index == info->sw_index)
570 		return ds->ops->port_vlan_del(ds, info->port, info->vlan);
571 
572 	/* Do not deprogram the DSA links as they may be used as conduit
573 	 * for other VLAN members in the fabric.
574 	 */
575 	return 0;
576 }
577 
578 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
579 				       struct dsa_notifier_tag_proto_info *info)
580 {
581 	const struct dsa_device_ops *tag_ops = info->tag_ops;
582 	int port, err;
583 
584 	if (!ds->ops->change_tag_protocol)
585 		return -EOPNOTSUPP;
586 
587 	ASSERT_RTNL();
588 
589 	for (port = 0; port < ds->num_ports; port++) {
590 		if (!dsa_is_cpu_port(ds, port))
591 			continue;
592 
593 		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
594 		if (err)
595 			return err;
596 
597 		dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
598 	}
599 
600 	/* Now that changing the tag protocol can no longer fail, let's update
601 	 * the remaining bits which are "duplicated for faster access", and the
602 	 * bits that depend on the tagger, such as the MTU.
603 	 */
604 	for (port = 0; port < ds->num_ports; port++) {
605 		if (dsa_is_user_port(ds, port)) {
606 			struct net_device *slave;
607 
608 			slave = dsa_to_port(ds, port)->slave;
609 			dsa_slave_setup_tagger(slave);
610 
611 			/* rtnl_mutex is held in dsa_tree_change_tag_proto */
612 			dsa_slave_change_mtu(slave, slave->mtu);
613 		}
614 	}
615 
616 	return 0;
617 }
618 
619 static int dsa_switch_mrp_add(struct dsa_switch *ds,
620 			      struct dsa_notifier_mrp_info *info)
621 {
622 	if (!ds->ops->port_mrp_add)
623 		return -EOPNOTSUPP;
624 
625 	if (ds->index == info->sw_index)
626 		return ds->ops->port_mrp_add(ds, info->port, info->mrp);
627 
628 	return 0;
629 }
630 
631 static int dsa_switch_mrp_del(struct dsa_switch *ds,
632 			      struct dsa_notifier_mrp_info *info)
633 {
634 	if (!ds->ops->port_mrp_del)
635 		return -EOPNOTSUPP;
636 
637 	if (ds->index == info->sw_index)
638 		return ds->ops->port_mrp_del(ds, info->port, info->mrp);
639 
640 	return 0;
641 }
642 
643 static int
644 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
645 			     struct dsa_notifier_mrp_ring_role_info *info)
646 {
647 	if (!ds->ops->port_mrp_add)
648 		return -EOPNOTSUPP;
649 
650 	if (ds->index == info->sw_index)
651 		return ds->ops->port_mrp_add_ring_role(ds, info->port,
652 						       info->mrp);
653 
654 	return 0;
655 }
656 
657 static int
658 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
659 			     struct dsa_notifier_mrp_ring_role_info *info)
660 {
661 	if (!ds->ops->port_mrp_del)
662 		return -EOPNOTSUPP;
663 
664 	if (ds->index == info->sw_index)
665 		return ds->ops->port_mrp_del_ring_role(ds, info->port,
666 						       info->mrp);
667 
668 	return 0;
669 }
670 
671 static int dsa_switch_event(struct notifier_block *nb,
672 			    unsigned long event, void *info)
673 {
674 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
675 	int err;
676 
677 	switch (event) {
678 	case DSA_NOTIFIER_AGEING_TIME:
679 		err = dsa_switch_ageing_time(ds, info);
680 		break;
681 	case DSA_NOTIFIER_BRIDGE_JOIN:
682 		err = dsa_switch_bridge_join(ds, info);
683 		break;
684 	case DSA_NOTIFIER_BRIDGE_LEAVE:
685 		err = dsa_switch_bridge_leave(ds, info);
686 		break;
687 	case DSA_NOTIFIER_FDB_ADD:
688 		err = dsa_switch_fdb_add(ds, info);
689 		break;
690 	case DSA_NOTIFIER_FDB_DEL:
691 		err = dsa_switch_fdb_del(ds, info);
692 		break;
693 	case DSA_NOTIFIER_HOST_FDB_ADD:
694 		err = dsa_switch_host_fdb_add(ds, info);
695 		break;
696 	case DSA_NOTIFIER_HOST_FDB_DEL:
697 		err = dsa_switch_host_fdb_del(ds, info);
698 		break;
699 	case DSA_NOTIFIER_HSR_JOIN:
700 		err = dsa_switch_hsr_join(ds, info);
701 		break;
702 	case DSA_NOTIFIER_HSR_LEAVE:
703 		err = dsa_switch_hsr_leave(ds, info);
704 		break;
705 	case DSA_NOTIFIER_LAG_CHANGE:
706 		err = dsa_switch_lag_change(ds, info);
707 		break;
708 	case DSA_NOTIFIER_LAG_JOIN:
709 		err = dsa_switch_lag_join(ds, info);
710 		break;
711 	case DSA_NOTIFIER_LAG_LEAVE:
712 		err = dsa_switch_lag_leave(ds, info);
713 		break;
714 	case DSA_NOTIFIER_MDB_ADD:
715 		err = dsa_switch_mdb_add(ds, info);
716 		break;
717 	case DSA_NOTIFIER_MDB_DEL:
718 		err = dsa_switch_mdb_del(ds, info);
719 		break;
720 	case DSA_NOTIFIER_HOST_MDB_ADD:
721 		err = dsa_switch_host_mdb_add(ds, info);
722 		break;
723 	case DSA_NOTIFIER_HOST_MDB_DEL:
724 		err = dsa_switch_host_mdb_del(ds, info);
725 		break;
726 	case DSA_NOTIFIER_VLAN_ADD:
727 		err = dsa_switch_vlan_add(ds, info);
728 		break;
729 	case DSA_NOTIFIER_VLAN_DEL:
730 		err = dsa_switch_vlan_del(ds, info);
731 		break;
732 	case DSA_NOTIFIER_MTU:
733 		err = dsa_switch_mtu(ds, info);
734 		break;
735 	case DSA_NOTIFIER_TAG_PROTO:
736 		err = dsa_switch_change_tag_proto(ds, info);
737 		break;
738 	case DSA_NOTIFIER_MRP_ADD:
739 		err = dsa_switch_mrp_add(ds, info);
740 		break;
741 	case DSA_NOTIFIER_MRP_DEL:
742 		err = dsa_switch_mrp_del(ds, info);
743 		break;
744 	case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
745 		err = dsa_switch_mrp_add_ring_role(ds, info);
746 		break;
747 	case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
748 		err = dsa_switch_mrp_del_ring_role(ds, info);
749 		break;
750 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
751 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
752 		break;
753 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
754 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
755 		break;
756 	default:
757 		err = -EOPNOTSUPP;
758 		break;
759 	}
760 
761 	if (err)
762 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
763 			event, err);
764 
765 	return notifier_from_errno(err);
766 }
767 
768 int dsa_switch_register_notifier(struct dsa_switch *ds)
769 {
770 	ds->nb.notifier_call = dsa_switch_event;
771 
772 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
773 }
774 
775 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
776 {
777 	int err;
778 
779 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
780 	if (err)
781 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
782 }
783