xref: /linux/net/dsa/switch.c (revision d3eed0e57d5d1bcbf1bd60f83a4adfe7d7b8dd9c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	struct dsa_port *dp;
21 
22 	dsa_switch_for_each_port(dp, ds)
23 		if (dp->ageing_time && dp->ageing_time < ageing_time)
24 			ageing_time = dp->ageing_time;
25 
26 	return ageing_time;
27 }
28 
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 				  struct dsa_notifier_ageing_time_info *info)
31 {
32 	unsigned int ageing_time = info->ageing_time;
33 
34 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35 		return -ERANGE;
36 
37 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38 		return -ERANGE;
39 
40 	/* Program the fastest ageing time in case of multiple bridges */
41 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42 
43 	if (ds->ops->set_ageing_time)
44 		return ds->ops->set_ageing_time(ds, ageing_time);
45 
46 	return 0;
47 }
48 
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 			       struct dsa_notifier_mtu_info *info)
51 {
52 	if (dp->ds->index == info->sw_index && dp->index == info->port)
53 		return true;
54 
55 	/* Do not propagate to other switches in the tree if the notifier was
56 	 * targeted for a single switch.
57 	 */
58 	if (info->targeted_match)
59 		return false;
60 
61 	if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
62 		return true;
63 
64 	return false;
65 }
66 
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 			  struct dsa_notifier_mtu_info *info)
69 {
70 	struct dsa_port *dp;
71 	int ret;
72 
73 	if (!ds->ops->port_change_mtu)
74 		return -EOPNOTSUPP;
75 
76 	dsa_switch_for_each_port(dp, ds) {
77 		if (dsa_port_mtu_match(dp, info)) {
78 			ret = ds->ops->port_change_mtu(ds, dp->index,
79 						       info->mtu);
80 			if (ret)
81 				return ret;
82 		}
83 	}
84 
85 	return 0;
86 }
87 
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 				  struct dsa_notifier_bridge_info *info)
90 {
91 	struct dsa_switch_tree *dst = ds->dst;
92 	int err;
93 
94 	if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 		if (!ds->ops->port_bridge_join)
96 			return -EOPNOTSUPP;
97 
98 		err = ds->ops->port_bridge_join(ds, info->port, info->bridge);
99 		if (err)
100 			return err;
101 	}
102 
103 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
104 	    ds->ops->crosschip_bridge_join) {
105 		err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
106 						     info->sw_index,
107 						     info->port, info->bridge);
108 		if (err)
109 			return err;
110 	}
111 
112 	return dsa_tag_8021q_bridge_join(ds, info);
113 }
114 
115 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
116 				   struct dsa_notifier_bridge_info *info)
117 {
118 	struct dsa_switch_tree *dst = ds->dst;
119 	struct netlink_ext_ack extack = {0};
120 	bool change_vlan_filtering = false;
121 	bool vlan_filtering;
122 	struct dsa_port *dp;
123 	int err;
124 
125 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
126 	    ds->ops->port_bridge_leave)
127 		ds->ops->port_bridge_leave(ds, info->port, info->bridge);
128 
129 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
130 	    ds->ops->crosschip_bridge_leave)
131 		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
132 						info->sw_index, info->port,
133 						info->bridge);
134 
135 	if (ds->needs_standalone_vlan_filtering &&
136 	    !br_vlan_enabled(info->bridge.dev)) {
137 		change_vlan_filtering = true;
138 		vlan_filtering = true;
139 	} else if (!ds->needs_standalone_vlan_filtering &&
140 		   br_vlan_enabled(info->bridge.dev)) {
141 		change_vlan_filtering = true;
142 		vlan_filtering = false;
143 	}
144 
145 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
146 	 * event for changing vlan_filtering setting upon slave ports leaving
147 	 * it. That is a good thing, because that lets us handle it and also
148 	 * handle the case where the switch's vlan_filtering setting is global
149 	 * (not per port). When that happens, the correct moment to trigger the
150 	 * vlan_filtering callback is only when the last port leaves the last
151 	 * VLAN-aware bridge.
152 	 */
153 	if (change_vlan_filtering && ds->vlan_filtering_is_global) {
154 		dsa_switch_for_each_port(dp, ds) {
155 			struct net_device *br = dsa_port_bridge_dev_get(dp);
156 
157 			if (br && br_vlan_enabled(br)) {
158 				change_vlan_filtering = false;
159 				break;
160 			}
161 		}
162 	}
163 
164 	if (change_vlan_filtering) {
165 		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
166 					      vlan_filtering, &extack);
167 		if (extack._msg)
168 			dev_err(ds->dev, "port %d: %s\n", info->port,
169 				extack._msg);
170 		if (err && err != -EOPNOTSUPP)
171 			return err;
172 	}
173 
174 	return dsa_tag_8021q_bridge_leave(ds, info);
175 }
176 
177 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
178  * DSA links) that sit between the targeted port on which the notifier was
179  * emitted and its dedicated CPU port.
180  */
181 static bool dsa_port_host_address_match(struct dsa_port *dp,
182 					int info_sw_index, int info_port)
183 {
184 	struct dsa_port *targeted_dp, *cpu_dp;
185 	struct dsa_switch *targeted_ds;
186 
187 	targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
188 	targeted_dp = dsa_to_port(targeted_ds, info_port);
189 	cpu_dp = targeted_dp->cpu_dp;
190 
191 	if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
192 		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
193 						     cpu_dp->index);
194 
195 	return false;
196 }
197 
198 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
199 					      const unsigned char *addr,
200 					      u16 vid)
201 {
202 	struct dsa_mac_addr *a;
203 
204 	list_for_each_entry(a, addr_list, list)
205 		if (ether_addr_equal(a->addr, addr) && a->vid == vid)
206 			return a;
207 
208 	return NULL;
209 }
210 
211 static int dsa_port_do_mdb_add(struct dsa_port *dp,
212 			       const struct switchdev_obj_port_mdb *mdb)
213 {
214 	struct dsa_switch *ds = dp->ds;
215 	struct dsa_mac_addr *a;
216 	int port = dp->index;
217 	int err = 0;
218 
219 	/* No need to bother with refcounting for user ports */
220 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
221 		return ds->ops->port_mdb_add(ds, port, mdb);
222 
223 	mutex_lock(&dp->addr_lists_lock);
224 
225 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
226 	if (a) {
227 		refcount_inc(&a->refcount);
228 		goto out;
229 	}
230 
231 	a = kzalloc(sizeof(*a), GFP_KERNEL);
232 	if (!a) {
233 		err = -ENOMEM;
234 		goto out;
235 	}
236 
237 	err = ds->ops->port_mdb_add(ds, port, mdb);
238 	if (err) {
239 		kfree(a);
240 		goto out;
241 	}
242 
243 	ether_addr_copy(a->addr, mdb->addr);
244 	a->vid = mdb->vid;
245 	refcount_set(&a->refcount, 1);
246 	list_add_tail(&a->list, &dp->mdbs);
247 
248 out:
249 	mutex_unlock(&dp->addr_lists_lock);
250 
251 	return err;
252 }
253 
254 static int dsa_port_do_mdb_del(struct dsa_port *dp,
255 			       const struct switchdev_obj_port_mdb *mdb)
256 {
257 	struct dsa_switch *ds = dp->ds;
258 	struct dsa_mac_addr *a;
259 	int port = dp->index;
260 	int err = 0;
261 
262 	/* No need to bother with refcounting for user ports */
263 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
264 		return ds->ops->port_mdb_del(ds, port, mdb);
265 
266 	mutex_lock(&dp->addr_lists_lock);
267 
268 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
269 	if (!a) {
270 		err = -ENOENT;
271 		goto out;
272 	}
273 
274 	if (!refcount_dec_and_test(&a->refcount))
275 		goto out;
276 
277 	err = ds->ops->port_mdb_del(ds, port, mdb);
278 	if (err) {
279 		refcount_set(&a->refcount, 1);
280 		goto out;
281 	}
282 
283 	list_del(&a->list);
284 	kfree(a);
285 
286 out:
287 	mutex_unlock(&dp->addr_lists_lock);
288 
289 	return err;
290 }
291 
292 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
293 			       u16 vid)
294 {
295 	struct dsa_switch *ds = dp->ds;
296 	struct dsa_mac_addr *a;
297 	int port = dp->index;
298 	int err = 0;
299 
300 	/* No need to bother with refcounting for user ports */
301 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
302 		return ds->ops->port_fdb_add(ds, port, addr, vid);
303 
304 	mutex_lock(&dp->addr_lists_lock);
305 
306 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
307 	if (a) {
308 		refcount_inc(&a->refcount);
309 		goto out;
310 	}
311 
312 	a = kzalloc(sizeof(*a), GFP_KERNEL);
313 	if (!a) {
314 		err = -ENOMEM;
315 		goto out;
316 	}
317 
318 	err = ds->ops->port_fdb_add(ds, port, addr, vid);
319 	if (err) {
320 		kfree(a);
321 		goto out;
322 	}
323 
324 	ether_addr_copy(a->addr, addr);
325 	a->vid = vid;
326 	refcount_set(&a->refcount, 1);
327 	list_add_tail(&a->list, &dp->fdbs);
328 
329 out:
330 	mutex_unlock(&dp->addr_lists_lock);
331 
332 	return err;
333 }
334 
335 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
336 			       u16 vid)
337 {
338 	struct dsa_switch *ds = dp->ds;
339 	struct dsa_mac_addr *a;
340 	int port = dp->index;
341 	int err = 0;
342 
343 	/* No need to bother with refcounting for user ports */
344 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
345 		return ds->ops->port_fdb_del(ds, port, addr, vid);
346 
347 	mutex_lock(&dp->addr_lists_lock);
348 
349 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
350 	if (!a) {
351 		err = -ENOENT;
352 		goto out;
353 	}
354 
355 	if (!refcount_dec_and_test(&a->refcount))
356 		goto out;
357 
358 	err = ds->ops->port_fdb_del(ds, port, addr, vid);
359 	if (err) {
360 		refcount_set(&a->refcount, 1);
361 		goto out;
362 	}
363 
364 	list_del(&a->list);
365 	kfree(a);
366 
367 out:
368 	mutex_unlock(&dp->addr_lists_lock);
369 
370 	return err;
371 }
372 
373 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
374 				   struct dsa_notifier_fdb_info *info)
375 {
376 	struct dsa_port *dp;
377 	int err = 0;
378 
379 	if (!ds->ops->port_fdb_add)
380 		return -EOPNOTSUPP;
381 
382 	dsa_switch_for_each_port(dp, ds) {
383 		if (dsa_port_host_address_match(dp, info->sw_index,
384 						info->port)) {
385 			err = dsa_port_do_fdb_add(dp, info->addr, info->vid);
386 			if (err)
387 				break;
388 		}
389 	}
390 
391 	return err;
392 }
393 
394 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
395 				   struct dsa_notifier_fdb_info *info)
396 {
397 	struct dsa_port *dp;
398 	int err = 0;
399 
400 	if (!ds->ops->port_fdb_del)
401 		return -EOPNOTSUPP;
402 
403 	dsa_switch_for_each_port(dp, ds) {
404 		if (dsa_port_host_address_match(dp, info->sw_index,
405 						info->port)) {
406 			err = dsa_port_do_fdb_del(dp, info->addr, info->vid);
407 			if (err)
408 				break;
409 		}
410 	}
411 
412 	return err;
413 }
414 
415 static int dsa_switch_fdb_add(struct dsa_switch *ds,
416 			      struct dsa_notifier_fdb_info *info)
417 {
418 	int port = dsa_towards_port(ds, info->sw_index, info->port);
419 	struct dsa_port *dp = dsa_to_port(ds, port);
420 
421 	if (!ds->ops->port_fdb_add)
422 		return -EOPNOTSUPP;
423 
424 	return dsa_port_do_fdb_add(dp, info->addr, info->vid);
425 }
426 
427 static int dsa_switch_fdb_del(struct dsa_switch *ds,
428 			      struct dsa_notifier_fdb_info *info)
429 {
430 	int port = dsa_towards_port(ds, info->sw_index, info->port);
431 	struct dsa_port *dp = dsa_to_port(ds, port);
432 
433 	if (!ds->ops->port_fdb_del)
434 		return -EOPNOTSUPP;
435 
436 	return dsa_port_do_fdb_del(dp, info->addr, info->vid);
437 }
438 
439 static int dsa_switch_hsr_join(struct dsa_switch *ds,
440 			       struct dsa_notifier_hsr_info *info)
441 {
442 	if (ds->index == info->sw_index && ds->ops->port_hsr_join)
443 		return ds->ops->port_hsr_join(ds, info->port, info->hsr);
444 
445 	return -EOPNOTSUPP;
446 }
447 
448 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
449 				struct dsa_notifier_hsr_info *info)
450 {
451 	if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
452 		return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
453 
454 	return -EOPNOTSUPP;
455 }
456 
457 static int dsa_switch_lag_change(struct dsa_switch *ds,
458 				 struct dsa_notifier_lag_info *info)
459 {
460 	if (ds->index == info->sw_index && ds->ops->port_lag_change)
461 		return ds->ops->port_lag_change(ds, info->port);
462 
463 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
464 		return ds->ops->crosschip_lag_change(ds, info->sw_index,
465 						     info->port);
466 
467 	return 0;
468 }
469 
470 static int dsa_switch_lag_join(struct dsa_switch *ds,
471 			       struct dsa_notifier_lag_info *info)
472 {
473 	if (ds->index == info->sw_index && ds->ops->port_lag_join)
474 		return ds->ops->port_lag_join(ds, info->port, info->lag,
475 					      info->info);
476 
477 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
478 		return ds->ops->crosschip_lag_join(ds, info->sw_index,
479 						   info->port, info->lag,
480 						   info->info);
481 
482 	return -EOPNOTSUPP;
483 }
484 
485 static int dsa_switch_lag_leave(struct dsa_switch *ds,
486 				struct dsa_notifier_lag_info *info)
487 {
488 	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
489 		return ds->ops->port_lag_leave(ds, info->port, info->lag);
490 
491 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
492 		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
493 						    info->port, info->lag);
494 
495 	return -EOPNOTSUPP;
496 }
497 
498 static int dsa_switch_mdb_add(struct dsa_switch *ds,
499 			      struct dsa_notifier_mdb_info *info)
500 {
501 	int port = dsa_towards_port(ds, info->sw_index, info->port);
502 	struct dsa_port *dp = dsa_to_port(ds, port);
503 
504 	if (!ds->ops->port_mdb_add)
505 		return -EOPNOTSUPP;
506 
507 	return dsa_port_do_mdb_add(dp, info->mdb);
508 }
509 
510 static int dsa_switch_mdb_del(struct dsa_switch *ds,
511 			      struct dsa_notifier_mdb_info *info)
512 {
513 	int port = dsa_towards_port(ds, info->sw_index, info->port);
514 	struct dsa_port *dp = dsa_to_port(ds, port);
515 
516 	if (!ds->ops->port_mdb_del)
517 		return -EOPNOTSUPP;
518 
519 	return dsa_port_do_mdb_del(dp, info->mdb);
520 }
521 
522 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
523 				   struct dsa_notifier_mdb_info *info)
524 {
525 	struct dsa_port *dp;
526 	int err = 0;
527 
528 	if (!ds->ops->port_mdb_add)
529 		return -EOPNOTSUPP;
530 
531 	dsa_switch_for_each_port(dp, ds) {
532 		if (dsa_port_host_address_match(dp, info->sw_index,
533 						info->port)) {
534 			err = dsa_port_do_mdb_add(dp, info->mdb);
535 			if (err)
536 				break;
537 		}
538 	}
539 
540 	return err;
541 }
542 
543 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
544 				   struct dsa_notifier_mdb_info *info)
545 {
546 	struct dsa_port *dp;
547 	int err = 0;
548 
549 	if (!ds->ops->port_mdb_del)
550 		return -EOPNOTSUPP;
551 
552 	dsa_switch_for_each_port(dp, ds) {
553 		if (dsa_port_host_address_match(dp, info->sw_index,
554 						info->port)) {
555 			err = dsa_port_do_mdb_del(dp, info->mdb);
556 			if (err)
557 				break;
558 		}
559 	}
560 
561 	return err;
562 }
563 
564 static bool dsa_port_vlan_match(struct dsa_port *dp,
565 				struct dsa_notifier_vlan_info *info)
566 {
567 	if (dp->ds->index == info->sw_index && dp->index == info->port)
568 		return true;
569 
570 	if (dsa_port_is_dsa(dp))
571 		return true;
572 
573 	return false;
574 }
575 
576 static int dsa_switch_vlan_add(struct dsa_switch *ds,
577 			       struct dsa_notifier_vlan_info *info)
578 {
579 	struct dsa_port *dp;
580 	int err;
581 
582 	if (!ds->ops->port_vlan_add)
583 		return -EOPNOTSUPP;
584 
585 	dsa_switch_for_each_port(dp, ds) {
586 		if (dsa_port_vlan_match(dp, info)) {
587 			err = ds->ops->port_vlan_add(ds, dp->index, info->vlan,
588 						     info->extack);
589 			if (err)
590 				return err;
591 		}
592 	}
593 
594 	return 0;
595 }
596 
597 static int dsa_switch_vlan_del(struct dsa_switch *ds,
598 			       struct dsa_notifier_vlan_info *info)
599 {
600 	if (!ds->ops->port_vlan_del)
601 		return -EOPNOTSUPP;
602 
603 	if (ds->index == info->sw_index)
604 		return ds->ops->port_vlan_del(ds, info->port, info->vlan);
605 
606 	/* Do not deprogram the DSA links as they may be used as conduit
607 	 * for other VLAN members in the fabric.
608 	 */
609 	return 0;
610 }
611 
612 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
613 				       struct dsa_notifier_tag_proto_info *info)
614 {
615 	const struct dsa_device_ops *tag_ops = info->tag_ops;
616 	struct dsa_port *dp, *cpu_dp;
617 	int err;
618 
619 	if (!ds->ops->change_tag_protocol)
620 		return -EOPNOTSUPP;
621 
622 	ASSERT_RTNL();
623 
624 	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
625 		err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
626 						   tag_ops->proto);
627 		if (err)
628 			return err;
629 
630 		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
631 	}
632 
633 	/* Now that changing the tag protocol can no longer fail, let's update
634 	 * the remaining bits which are "duplicated for faster access", and the
635 	 * bits that depend on the tagger, such as the MTU.
636 	 */
637 	dsa_switch_for_each_user_port(dp, ds) {
638 		struct net_device *slave = dp->slave;
639 
640 		dsa_slave_setup_tagger(slave);
641 
642 		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
643 		dsa_slave_change_mtu(slave, slave->mtu);
644 	}
645 
646 	return 0;
647 }
648 
649 static int dsa_switch_mrp_add(struct dsa_switch *ds,
650 			      struct dsa_notifier_mrp_info *info)
651 {
652 	if (!ds->ops->port_mrp_add)
653 		return -EOPNOTSUPP;
654 
655 	if (ds->index == info->sw_index)
656 		return ds->ops->port_mrp_add(ds, info->port, info->mrp);
657 
658 	return 0;
659 }
660 
661 static int dsa_switch_mrp_del(struct dsa_switch *ds,
662 			      struct dsa_notifier_mrp_info *info)
663 {
664 	if (!ds->ops->port_mrp_del)
665 		return -EOPNOTSUPP;
666 
667 	if (ds->index == info->sw_index)
668 		return ds->ops->port_mrp_del(ds, info->port, info->mrp);
669 
670 	return 0;
671 }
672 
673 static int
674 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
675 			     struct dsa_notifier_mrp_ring_role_info *info)
676 {
677 	if (!ds->ops->port_mrp_add)
678 		return -EOPNOTSUPP;
679 
680 	if (ds->index == info->sw_index)
681 		return ds->ops->port_mrp_add_ring_role(ds, info->port,
682 						       info->mrp);
683 
684 	return 0;
685 }
686 
687 static int
688 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
689 			     struct dsa_notifier_mrp_ring_role_info *info)
690 {
691 	if (!ds->ops->port_mrp_del)
692 		return -EOPNOTSUPP;
693 
694 	if (ds->index == info->sw_index)
695 		return ds->ops->port_mrp_del_ring_role(ds, info->port,
696 						       info->mrp);
697 
698 	return 0;
699 }
700 
701 static int dsa_switch_event(struct notifier_block *nb,
702 			    unsigned long event, void *info)
703 {
704 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
705 	int err;
706 
707 	switch (event) {
708 	case DSA_NOTIFIER_AGEING_TIME:
709 		err = dsa_switch_ageing_time(ds, info);
710 		break;
711 	case DSA_NOTIFIER_BRIDGE_JOIN:
712 		err = dsa_switch_bridge_join(ds, info);
713 		break;
714 	case DSA_NOTIFIER_BRIDGE_LEAVE:
715 		err = dsa_switch_bridge_leave(ds, info);
716 		break;
717 	case DSA_NOTIFIER_FDB_ADD:
718 		err = dsa_switch_fdb_add(ds, info);
719 		break;
720 	case DSA_NOTIFIER_FDB_DEL:
721 		err = dsa_switch_fdb_del(ds, info);
722 		break;
723 	case DSA_NOTIFIER_HOST_FDB_ADD:
724 		err = dsa_switch_host_fdb_add(ds, info);
725 		break;
726 	case DSA_NOTIFIER_HOST_FDB_DEL:
727 		err = dsa_switch_host_fdb_del(ds, info);
728 		break;
729 	case DSA_NOTIFIER_HSR_JOIN:
730 		err = dsa_switch_hsr_join(ds, info);
731 		break;
732 	case DSA_NOTIFIER_HSR_LEAVE:
733 		err = dsa_switch_hsr_leave(ds, info);
734 		break;
735 	case DSA_NOTIFIER_LAG_CHANGE:
736 		err = dsa_switch_lag_change(ds, info);
737 		break;
738 	case DSA_NOTIFIER_LAG_JOIN:
739 		err = dsa_switch_lag_join(ds, info);
740 		break;
741 	case DSA_NOTIFIER_LAG_LEAVE:
742 		err = dsa_switch_lag_leave(ds, info);
743 		break;
744 	case DSA_NOTIFIER_MDB_ADD:
745 		err = dsa_switch_mdb_add(ds, info);
746 		break;
747 	case DSA_NOTIFIER_MDB_DEL:
748 		err = dsa_switch_mdb_del(ds, info);
749 		break;
750 	case DSA_NOTIFIER_HOST_MDB_ADD:
751 		err = dsa_switch_host_mdb_add(ds, info);
752 		break;
753 	case DSA_NOTIFIER_HOST_MDB_DEL:
754 		err = dsa_switch_host_mdb_del(ds, info);
755 		break;
756 	case DSA_NOTIFIER_VLAN_ADD:
757 		err = dsa_switch_vlan_add(ds, info);
758 		break;
759 	case DSA_NOTIFIER_VLAN_DEL:
760 		err = dsa_switch_vlan_del(ds, info);
761 		break;
762 	case DSA_NOTIFIER_MTU:
763 		err = dsa_switch_mtu(ds, info);
764 		break;
765 	case DSA_NOTIFIER_TAG_PROTO:
766 		err = dsa_switch_change_tag_proto(ds, info);
767 		break;
768 	case DSA_NOTIFIER_MRP_ADD:
769 		err = dsa_switch_mrp_add(ds, info);
770 		break;
771 	case DSA_NOTIFIER_MRP_DEL:
772 		err = dsa_switch_mrp_del(ds, info);
773 		break;
774 	case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
775 		err = dsa_switch_mrp_add_ring_role(ds, info);
776 		break;
777 	case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
778 		err = dsa_switch_mrp_del_ring_role(ds, info);
779 		break;
780 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
781 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
782 		break;
783 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
784 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
785 		break;
786 	default:
787 		err = -EOPNOTSUPP;
788 		break;
789 	}
790 
791 	if (err)
792 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
793 			event, err);
794 
795 	return notifier_from_errno(err);
796 }
797 
798 int dsa_switch_register_notifier(struct dsa_switch *ds)
799 {
800 	ds->nb.notifier_call = dsa_switch_event;
801 
802 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
803 }
804 
805 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
806 {
807 	int err;
808 
809 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
810 	if (err)
811 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
812 }
813