xref: /linux/net/dsa/switch.c (revision 2e0566aeb9ff83db9fb22bf6f0b994f03377b038)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	struct dsa_port *dp;
21 
22 	dsa_switch_for_each_port(dp, ds)
23 		if (dp->ageing_time && dp->ageing_time < ageing_time)
24 			ageing_time = dp->ageing_time;
25 
26 	return ageing_time;
27 }
28 
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 				  struct dsa_notifier_ageing_time_info *info)
31 {
32 	unsigned int ageing_time = info->ageing_time;
33 
34 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35 		return -ERANGE;
36 
37 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38 		return -ERANGE;
39 
40 	/* Program the fastest ageing time in case of multiple bridges */
41 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42 
43 	if (ds->ops->set_ageing_time)
44 		return ds->ops->set_ageing_time(ds, ageing_time);
45 
46 	return 0;
47 }
48 
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 			       struct dsa_notifier_mtu_info *info)
51 {
52 	if (dp->ds->index == info->sw_index && dp->index == info->port)
53 		return true;
54 
55 	/* Do not propagate to other switches in the tree if the notifier was
56 	 * targeted for a single switch.
57 	 */
58 	if (info->targeted_match)
59 		return false;
60 
61 	if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
62 		return true;
63 
64 	return false;
65 }
66 
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 			  struct dsa_notifier_mtu_info *info)
69 {
70 	struct dsa_port *dp;
71 	int ret;
72 
73 	if (!ds->ops->port_change_mtu)
74 		return -EOPNOTSUPP;
75 
76 	dsa_switch_for_each_port(dp, ds) {
77 		if (dsa_port_mtu_match(dp, info)) {
78 			ret = ds->ops->port_change_mtu(ds, dp->index,
79 						       info->mtu);
80 			if (ret)
81 				return ret;
82 		}
83 	}
84 
85 	return 0;
86 }
87 
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 				  struct dsa_notifier_bridge_info *info)
90 {
91 	struct dsa_switch_tree *dst = ds->dst;
92 	int err;
93 
94 	if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 		if (!ds->ops->port_bridge_join)
96 			return -EOPNOTSUPP;
97 
98 		err = ds->ops->port_bridge_join(ds, info->port, info->br);
99 		if (err)
100 			return err;
101 	}
102 
103 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
104 	    ds->ops->crosschip_bridge_join) {
105 		err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
106 						     info->sw_index,
107 						     info->port, info->br);
108 		if (err)
109 			return err;
110 	}
111 
112 	return dsa_tag_8021q_bridge_join(ds, info);
113 }
114 
115 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
116 				   struct dsa_notifier_bridge_info *info)
117 {
118 	struct dsa_switch_tree *dst = ds->dst;
119 	struct netlink_ext_ack extack = {0};
120 	bool change_vlan_filtering = false;
121 	bool vlan_filtering;
122 	struct dsa_port *dp;
123 	int err;
124 
125 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
126 	    ds->ops->port_bridge_leave)
127 		ds->ops->port_bridge_leave(ds, info->port, info->br);
128 
129 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
130 	    ds->ops->crosschip_bridge_leave)
131 		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
132 						info->sw_index, info->port,
133 						info->br);
134 
135 	if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) {
136 		change_vlan_filtering = true;
137 		vlan_filtering = true;
138 	} else if (!ds->needs_standalone_vlan_filtering &&
139 		   br_vlan_enabled(info->br)) {
140 		change_vlan_filtering = true;
141 		vlan_filtering = false;
142 	}
143 
144 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
145 	 * event for changing vlan_filtering setting upon slave ports leaving
146 	 * it. That is a good thing, because that lets us handle it and also
147 	 * handle the case where the switch's vlan_filtering setting is global
148 	 * (not per port). When that happens, the correct moment to trigger the
149 	 * vlan_filtering callback is only when the last port leaves the last
150 	 * VLAN-aware bridge.
151 	 */
152 	if (change_vlan_filtering && ds->vlan_filtering_is_global) {
153 		dsa_switch_for_each_port(dp, ds) {
154 			struct net_device *bridge_dev;
155 
156 			bridge_dev = dp->bridge_dev;
157 
158 			if (bridge_dev && br_vlan_enabled(bridge_dev)) {
159 				change_vlan_filtering = false;
160 				break;
161 			}
162 		}
163 	}
164 
165 	if (change_vlan_filtering) {
166 		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
167 					      vlan_filtering, &extack);
168 		if (extack._msg)
169 			dev_err(ds->dev, "port %d: %s\n", info->port,
170 				extack._msg);
171 		if (err && err != -EOPNOTSUPP)
172 			return err;
173 	}
174 
175 	return dsa_tag_8021q_bridge_leave(ds, info);
176 }
177 
178 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
179  * DSA links) that sit between the targeted port on which the notifier was
180  * emitted and its dedicated CPU port.
181  */
182 static bool dsa_port_host_address_match(struct dsa_port *dp,
183 					int info_sw_index, int info_port)
184 {
185 	struct dsa_port *targeted_dp, *cpu_dp;
186 	struct dsa_switch *targeted_ds;
187 
188 	targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
189 	targeted_dp = dsa_to_port(targeted_ds, info_port);
190 	cpu_dp = targeted_dp->cpu_dp;
191 
192 	if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
193 		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
194 						     cpu_dp->index);
195 
196 	return false;
197 }
198 
199 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
200 					      const unsigned char *addr,
201 					      u16 vid)
202 {
203 	struct dsa_mac_addr *a;
204 
205 	list_for_each_entry(a, addr_list, list)
206 		if (ether_addr_equal(a->addr, addr) && a->vid == vid)
207 			return a;
208 
209 	return NULL;
210 }
211 
212 static int dsa_port_do_mdb_add(struct dsa_port *dp,
213 			       const struct switchdev_obj_port_mdb *mdb)
214 {
215 	struct dsa_switch *ds = dp->ds;
216 	struct dsa_mac_addr *a;
217 	int port = dp->index;
218 	int err;
219 
220 	/* No need to bother with refcounting for user ports */
221 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
222 		return ds->ops->port_mdb_add(ds, port, mdb);
223 
224 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
225 	if (a) {
226 		refcount_inc(&a->refcount);
227 		return 0;
228 	}
229 
230 	a = kzalloc(sizeof(*a), GFP_KERNEL);
231 	if (!a)
232 		return -ENOMEM;
233 
234 	err = ds->ops->port_mdb_add(ds, port, mdb);
235 	if (err) {
236 		kfree(a);
237 		return err;
238 	}
239 
240 	ether_addr_copy(a->addr, mdb->addr);
241 	a->vid = mdb->vid;
242 	refcount_set(&a->refcount, 1);
243 	list_add_tail(&a->list, &dp->mdbs);
244 
245 	return 0;
246 }
247 
248 static int dsa_port_do_mdb_del(struct dsa_port *dp,
249 			       const struct switchdev_obj_port_mdb *mdb)
250 {
251 	struct dsa_switch *ds = dp->ds;
252 	struct dsa_mac_addr *a;
253 	int port = dp->index;
254 	int err;
255 
256 	/* No need to bother with refcounting for user ports */
257 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
258 		return ds->ops->port_mdb_del(ds, port, mdb);
259 
260 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
261 	if (!a)
262 		return -ENOENT;
263 
264 	if (!refcount_dec_and_test(&a->refcount))
265 		return 0;
266 
267 	err = ds->ops->port_mdb_del(ds, port, mdb);
268 	if (err) {
269 		refcount_inc(&a->refcount);
270 		return err;
271 	}
272 
273 	list_del(&a->list);
274 	kfree(a);
275 
276 	return 0;
277 }
278 
279 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
280 			       u16 vid)
281 {
282 	struct dsa_switch *ds = dp->ds;
283 	struct dsa_mac_addr *a;
284 	int port = dp->index;
285 	int err;
286 
287 	/* No need to bother with refcounting for user ports */
288 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
289 		return ds->ops->port_fdb_add(ds, port, addr, vid);
290 
291 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
292 	if (a) {
293 		refcount_inc(&a->refcount);
294 		return 0;
295 	}
296 
297 	a = kzalloc(sizeof(*a), GFP_KERNEL);
298 	if (!a)
299 		return -ENOMEM;
300 
301 	err = ds->ops->port_fdb_add(ds, port, addr, vid);
302 	if (err) {
303 		kfree(a);
304 		return err;
305 	}
306 
307 	ether_addr_copy(a->addr, addr);
308 	a->vid = vid;
309 	refcount_set(&a->refcount, 1);
310 	list_add_tail(&a->list, &dp->fdbs);
311 
312 	return 0;
313 }
314 
315 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
316 			       u16 vid)
317 {
318 	struct dsa_switch *ds = dp->ds;
319 	struct dsa_mac_addr *a;
320 	int port = dp->index;
321 	int err;
322 
323 	/* No need to bother with refcounting for user ports */
324 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
325 		return ds->ops->port_fdb_del(ds, port, addr, vid);
326 
327 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
328 	if (!a)
329 		return -ENOENT;
330 
331 	if (!refcount_dec_and_test(&a->refcount))
332 		return 0;
333 
334 	err = ds->ops->port_fdb_del(ds, port, addr, vid);
335 	if (err) {
336 		refcount_inc(&a->refcount);
337 		return err;
338 	}
339 
340 	list_del(&a->list);
341 	kfree(a);
342 
343 	return 0;
344 }
345 
346 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
347 				   struct dsa_notifier_fdb_info *info)
348 {
349 	struct dsa_port *dp;
350 	int err = 0;
351 
352 	if (!ds->ops->port_fdb_add)
353 		return -EOPNOTSUPP;
354 
355 	dsa_switch_for_each_port(dp, ds) {
356 		if (dsa_port_host_address_match(dp, info->sw_index,
357 						info->port)) {
358 			err = dsa_port_do_fdb_add(dp, info->addr, info->vid);
359 			if (err)
360 				break;
361 		}
362 	}
363 
364 	return err;
365 }
366 
367 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
368 				   struct dsa_notifier_fdb_info *info)
369 {
370 	struct dsa_port *dp;
371 	int err = 0;
372 
373 	if (!ds->ops->port_fdb_del)
374 		return -EOPNOTSUPP;
375 
376 	dsa_switch_for_each_port(dp, ds) {
377 		if (dsa_port_host_address_match(dp, info->sw_index,
378 						info->port)) {
379 			err = dsa_port_do_fdb_del(dp, info->addr, info->vid);
380 			if (err)
381 				break;
382 		}
383 	}
384 
385 	return err;
386 }
387 
388 static int dsa_switch_fdb_add(struct dsa_switch *ds,
389 			      struct dsa_notifier_fdb_info *info)
390 {
391 	int port = dsa_towards_port(ds, info->sw_index, info->port);
392 	struct dsa_port *dp = dsa_to_port(ds, port);
393 
394 	if (!ds->ops->port_fdb_add)
395 		return -EOPNOTSUPP;
396 
397 	return dsa_port_do_fdb_add(dp, info->addr, info->vid);
398 }
399 
400 static int dsa_switch_fdb_del(struct dsa_switch *ds,
401 			      struct dsa_notifier_fdb_info *info)
402 {
403 	int port = dsa_towards_port(ds, info->sw_index, info->port);
404 	struct dsa_port *dp = dsa_to_port(ds, port);
405 
406 	if (!ds->ops->port_fdb_del)
407 		return -EOPNOTSUPP;
408 
409 	return dsa_port_do_fdb_del(dp, info->addr, info->vid);
410 }
411 
412 static int dsa_switch_hsr_join(struct dsa_switch *ds,
413 			       struct dsa_notifier_hsr_info *info)
414 {
415 	if (ds->index == info->sw_index && ds->ops->port_hsr_join)
416 		return ds->ops->port_hsr_join(ds, info->port, info->hsr);
417 
418 	return -EOPNOTSUPP;
419 }
420 
421 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
422 				struct dsa_notifier_hsr_info *info)
423 {
424 	if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
425 		return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
426 
427 	return -EOPNOTSUPP;
428 }
429 
430 static int dsa_switch_lag_change(struct dsa_switch *ds,
431 				 struct dsa_notifier_lag_info *info)
432 {
433 	if (ds->index == info->sw_index && ds->ops->port_lag_change)
434 		return ds->ops->port_lag_change(ds, info->port);
435 
436 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
437 		return ds->ops->crosschip_lag_change(ds, info->sw_index,
438 						     info->port);
439 
440 	return 0;
441 }
442 
443 static int dsa_switch_lag_join(struct dsa_switch *ds,
444 			       struct dsa_notifier_lag_info *info)
445 {
446 	if (ds->index == info->sw_index && ds->ops->port_lag_join)
447 		return ds->ops->port_lag_join(ds, info->port, info->lag,
448 					      info->info);
449 
450 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
451 		return ds->ops->crosschip_lag_join(ds, info->sw_index,
452 						   info->port, info->lag,
453 						   info->info);
454 
455 	return -EOPNOTSUPP;
456 }
457 
458 static int dsa_switch_lag_leave(struct dsa_switch *ds,
459 				struct dsa_notifier_lag_info *info)
460 {
461 	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
462 		return ds->ops->port_lag_leave(ds, info->port, info->lag);
463 
464 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
465 		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
466 						    info->port, info->lag);
467 
468 	return -EOPNOTSUPP;
469 }
470 
471 static int dsa_switch_mdb_add(struct dsa_switch *ds,
472 			      struct dsa_notifier_mdb_info *info)
473 {
474 	int port = dsa_towards_port(ds, info->sw_index, info->port);
475 	struct dsa_port *dp = dsa_to_port(ds, port);
476 
477 	if (!ds->ops->port_mdb_add)
478 		return -EOPNOTSUPP;
479 
480 	return dsa_port_do_mdb_add(dp, info->mdb);
481 }
482 
483 static int dsa_switch_mdb_del(struct dsa_switch *ds,
484 			      struct dsa_notifier_mdb_info *info)
485 {
486 	int port = dsa_towards_port(ds, info->sw_index, info->port);
487 	struct dsa_port *dp = dsa_to_port(ds, port);
488 
489 	if (!ds->ops->port_mdb_del)
490 		return -EOPNOTSUPP;
491 
492 	return dsa_port_do_mdb_del(dp, info->mdb);
493 }
494 
495 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
496 				   struct dsa_notifier_mdb_info *info)
497 {
498 	struct dsa_port *dp;
499 	int err = 0;
500 
501 	if (!ds->ops->port_mdb_add)
502 		return -EOPNOTSUPP;
503 
504 	dsa_switch_for_each_port(dp, ds) {
505 		if (dsa_port_host_address_match(dp, info->sw_index,
506 						info->port)) {
507 			err = dsa_port_do_mdb_add(dp, info->mdb);
508 			if (err)
509 				break;
510 		}
511 	}
512 
513 	return err;
514 }
515 
516 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
517 				   struct dsa_notifier_mdb_info *info)
518 {
519 	struct dsa_port *dp;
520 	int err = 0;
521 
522 	if (!ds->ops->port_mdb_del)
523 		return -EOPNOTSUPP;
524 
525 	dsa_switch_for_each_port(dp, ds) {
526 		if (dsa_port_host_address_match(dp, info->sw_index,
527 						info->port)) {
528 			err = dsa_port_do_mdb_del(dp, info->mdb);
529 			if (err)
530 				break;
531 		}
532 	}
533 
534 	return err;
535 }
536 
537 static bool dsa_port_vlan_match(struct dsa_port *dp,
538 				struct dsa_notifier_vlan_info *info)
539 {
540 	if (dp->ds->index == info->sw_index && dp->index == info->port)
541 		return true;
542 
543 	if (dsa_port_is_dsa(dp))
544 		return true;
545 
546 	return false;
547 }
548 
549 static int dsa_switch_vlan_add(struct dsa_switch *ds,
550 			       struct dsa_notifier_vlan_info *info)
551 {
552 	struct dsa_port *dp;
553 	int err;
554 
555 	if (!ds->ops->port_vlan_add)
556 		return -EOPNOTSUPP;
557 
558 	dsa_switch_for_each_port(dp, ds) {
559 		if (dsa_port_vlan_match(dp, info)) {
560 			err = ds->ops->port_vlan_add(ds, dp->index, info->vlan,
561 						     info->extack);
562 			if (err)
563 				return err;
564 		}
565 	}
566 
567 	return 0;
568 }
569 
570 static int dsa_switch_vlan_del(struct dsa_switch *ds,
571 			       struct dsa_notifier_vlan_info *info)
572 {
573 	if (!ds->ops->port_vlan_del)
574 		return -EOPNOTSUPP;
575 
576 	if (ds->index == info->sw_index)
577 		return ds->ops->port_vlan_del(ds, info->port, info->vlan);
578 
579 	/* Do not deprogram the DSA links as they may be used as conduit
580 	 * for other VLAN members in the fabric.
581 	 */
582 	return 0;
583 }
584 
585 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
586 				       struct dsa_notifier_tag_proto_info *info)
587 {
588 	const struct dsa_device_ops *tag_ops = info->tag_ops;
589 	struct dsa_port *dp, *cpu_dp;
590 	int err;
591 
592 	if (!ds->ops->change_tag_protocol)
593 		return -EOPNOTSUPP;
594 
595 	ASSERT_RTNL();
596 
597 	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
598 		err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
599 						   tag_ops->proto);
600 		if (err)
601 			return err;
602 
603 		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
604 	}
605 
606 	/* Now that changing the tag protocol can no longer fail, let's update
607 	 * the remaining bits which are "duplicated for faster access", and the
608 	 * bits that depend on the tagger, such as the MTU.
609 	 */
610 	dsa_switch_for_each_user_port(dp, ds) {
611 		struct net_device *slave = dp->slave;
612 
613 		dsa_slave_setup_tagger(slave);
614 
615 		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
616 		dsa_slave_change_mtu(slave, slave->mtu);
617 	}
618 
619 	return 0;
620 }
621 
622 static int dsa_switch_mrp_add(struct dsa_switch *ds,
623 			      struct dsa_notifier_mrp_info *info)
624 {
625 	if (!ds->ops->port_mrp_add)
626 		return -EOPNOTSUPP;
627 
628 	if (ds->index == info->sw_index)
629 		return ds->ops->port_mrp_add(ds, info->port, info->mrp);
630 
631 	return 0;
632 }
633 
634 static int dsa_switch_mrp_del(struct dsa_switch *ds,
635 			      struct dsa_notifier_mrp_info *info)
636 {
637 	if (!ds->ops->port_mrp_del)
638 		return -EOPNOTSUPP;
639 
640 	if (ds->index == info->sw_index)
641 		return ds->ops->port_mrp_del(ds, info->port, info->mrp);
642 
643 	return 0;
644 }
645 
646 static int
647 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
648 			     struct dsa_notifier_mrp_ring_role_info *info)
649 {
650 	if (!ds->ops->port_mrp_add)
651 		return -EOPNOTSUPP;
652 
653 	if (ds->index == info->sw_index)
654 		return ds->ops->port_mrp_add_ring_role(ds, info->port,
655 						       info->mrp);
656 
657 	return 0;
658 }
659 
660 static int
661 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
662 			     struct dsa_notifier_mrp_ring_role_info *info)
663 {
664 	if (!ds->ops->port_mrp_del)
665 		return -EOPNOTSUPP;
666 
667 	if (ds->index == info->sw_index)
668 		return ds->ops->port_mrp_del_ring_role(ds, info->port,
669 						       info->mrp);
670 
671 	return 0;
672 }
673 
674 static int dsa_switch_event(struct notifier_block *nb,
675 			    unsigned long event, void *info)
676 {
677 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
678 	int err;
679 
680 	switch (event) {
681 	case DSA_NOTIFIER_AGEING_TIME:
682 		err = dsa_switch_ageing_time(ds, info);
683 		break;
684 	case DSA_NOTIFIER_BRIDGE_JOIN:
685 		err = dsa_switch_bridge_join(ds, info);
686 		break;
687 	case DSA_NOTIFIER_BRIDGE_LEAVE:
688 		err = dsa_switch_bridge_leave(ds, info);
689 		break;
690 	case DSA_NOTIFIER_FDB_ADD:
691 		err = dsa_switch_fdb_add(ds, info);
692 		break;
693 	case DSA_NOTIFIER_FDB_DEL:
694 		err = dsa_switch_fdb_del(ds, info);
695 		break;
696 	case DSA_NOTIFIER_HOST_FDB_ADD:
697 		err = dsa_switch_host_fdb_add(ds, info);
698 		break;
699 	case DSA_NOTIFIER_HOST_FDB_DEL:
700 		err = dsa_switch_host_fdb_del(ds, info);
701 		break;
702 	case DSA_NOTIFIER_HSR_JOIN:
703 		err = dsa_switch_hsr_join(ds, info);
704 		break;
705 	case DSA_NOTIFIER_HSR_LEAVE:
706 		err = dsa_switch_hsr_leave(ds, info);
707 		break;
708 	case DSA_NOTIFIER_LAG_CHANGE:
709 		err = dsa_switch_lag_change(ds, info);
710 		break;
711 	case DSA_NOTIFIER_LAG_JOIN:
712 		err = dsa_switch_lag_join(ds, info);
713 		break;
714 	case DSA_NOTIFIER_LAG_LEAVE:
715 		err = dsa_switch_lag_leave(ds, info);
716 		break;
717 	case DSA_NOTIFIER_MDB_ADD:
718 		err = dsa_switch_mdb_add(ds, info);
719 		break;
720 	case DSA_NOTIFIER_MDB_DEL:
721 		err = dsa_switch_mdb_del(ds, info);
722 		break;
723 	case DSA_NOTIFIER_HOST_MDB_ADD:
724 		err = dsa_switch_host_mdb_add(ds, info);
725 		break;
726 	case DSA_NOTIFIER_HOST_MDB_DEL:
727 		err = dsa_switch_host_mdb_del(ds, info);
728 		break;
729 	case DSA_NOTIFIER_VLAN_ADD:
730 		err = dsa_switch_vlan_add(ds, info);
731 		break;
732 	case DSA_NOTIFIER_VLAN_DEL:
733 		err = dsa_switch_vlan_del(ds, info);
734 		break;
735 	case DSA_NOTIFIER_MTU:
736 		err = dsa_switch_mtu(ds, info);
737 		break;
738 	case DSA_NOTIFIER_TAG_PROTO:
739 		err = dsa_switch_change_tag_proto(ds, info);
740 		break;
741 	case DSA_NOTIFIER_MRP_ADD:
742 		err = dsa_switch_mrp_add(ds, info);
743 		break;
744 	case DSA_NOTIFIER_MRP_DEL:
745 		err = dsa_switch_mrp_del(ds, info);
746 		break;
747 	case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
748 		err = dsa_switch_mrp_add_ring_role(ds, info);
749 		break;
750 	case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
751 		err = dsa_switch_mrp_del_ring_role(ds, info);
752 		break;
753 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
754 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
755 		break;
756 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
757 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
758 		break;
759 	default:
760 		err = -EOPNOTSUPP;
761 		break;
762 	}
763 
764 	if (err)
765 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
766 			event, err);
767 
768 	return notifier_from_errno(err);
769 }
770 
771 int dsa_switch_register_notifier(struct dsa_switch *ds)
772 {
773 	ds->nb.notifier_call = dsa_switch_event;
774 
775 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
776 }
777 
778 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
779 {
780 	int err;
781 
782 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
783 	if (err)
784 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
785 }
786