xref: /linux/net/dsa/switch.c (revision d4b996f9ef1fe83d9ce9ad5c1ca0bd8231638ce5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	int i;
21 
22 	for (i = 0; i < ds->num_ports; ++i) {
23 		struct dsa_port *dp = dsa_to_port(ds, i);
24 
25 		if (dp->ageing_time && dp->ageing_time < ageing_time)
26 			ageing_time = dp->ageing_time;
27 	}
28 
29 	return ageing_time;
30 }
31 
32 static int dsa_switch_ageing_time(struct dsa_switch *ds,
33 				  struct dsa_notifier_ageing_time_info *info)
34 {
35 	unsigned int ageing_time = info->ageing_time;
36 
37 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
38 		return -ERANGE;
39 
40 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
41 		return -ERANGE;
42 
43 	/* Program the fastest ageing time in case of multiple bridges */
44 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
45 
46 	if (ds->ops->set_ageing_time)
47 		return ds->ops->set_ageing_time(ds, ageing_time);
48 
49 	return 0;
50 }
51 
52 static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
53 				 struct dsa_notifier_mtu_info *info)
54 {
55 	if (ds->index == info->sw_index && port == info->port)
56 		return true;
57 
58 	/* Do not propagate to other switches in the tree if the notifier was
59 	 * targeted for a single switch.
60 	 */
61 	if (info->targeted_match)
62 		return false;
63 
64 	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
65 		return true;
66 
67 	return false;
68 }
69 
70 static int dsa_switch_mtu(struct dsa_switch *ds,
71 			  struct dsa_notifier_mtu_info *info)
72 {
73 	int port, ret;
74 
75 	if (!ds->ops->port_change_mtu)
76 		return -EOPNOTSUPP;
77 
78 	for (port = 0; port < ds->num_ports; port++) {
79 		if (dsa_switch_mtu_match(ds, port, info)) {
80 			ret = ds->ops->port_change_mtu(ds, port, info->mtu);
81 			if (ret)
82 				return ret;
83 		}
84 	}
85 
86 	return 0;
87 }
88 
89 static int dsa_switch_bridge_join(struct dsa_switch *ds,
90 				  struct dsa_notifier_bridge_info *info)
91 {
92 	struct dsa_switch_tree *dst = ds->dst;
93 	int err;
94 
95 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
96 	    ds->ops->port_bridge_join) {
97 		err = ds->ops->port_bridge_join(ds, info->port, info->br);
98 		if (err)
99 			return err;
100 	}
101 
102 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
103 	    ds->ops->crosschip_bridge_join) {
104 		err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
105 						     info->sw_index,
106 						     info->port, info->br);
107 		if (err)
108 			return err;
109 	}
110 
111 	return dsa_tag_8021q_bridge_join(ds, info);
112 }
113 
114 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
115 				   struct dsa_notifier_bridge_info *info)
116 {
117 	bool unset_vlan_filtering = br_vlan_enabled(info->br);
118 	struct dsa_switch_tree *dst = ds->dst;
119 	struct netlink_ext_ack extack = {0};
120 	int err, port;
121 
122 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
123 	    ds->ops->port_bridge_leave)
124 		ds->ops->port_bridge_leave(ds, info->port, info->br);
125 
126 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
127 	    ds->ops->crosschip_bridge_leave)
128 		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
129 						info->sw_index, info->port,
130 						info->br);
131 
132 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
133 	 * event for changing vlan_filtering setting upon slave ports leaving
134 	 * it. That is a good thing, because that lets us handle it and also
135 	 * handle the case where the switch's vlan_filtering setting is global
136 	 * (not per port). When that happens, the correct moment to trigger the
137 	 * vlan_filtering callback is only when the last port leaves the last
138 	 * VLAN-aware bridge.
139 	 */
140 	if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
141 		for (port = 0; port < ds->num_ports; port++) {
142 			struct net_device *bridge_dev;
143 
144 			bridge_dev = dsa_to_port(ds, port)->bridge_dev;
145 
146 			if (bridge_dev && br_vlan_enabled(bridge_dev)) {
147 				unset_vlan_filtering = false;
148 				break;
149 			}
150 		}
151 	}
152 	if (unset_vlan_filtering) {
153 		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
154 					      false, &extack);
155 		if (extack._msg)
156 			dev_err(ds->dev, "port %d: %s\n", info->port,
157 				extack._msg);
158 		if (err && err != EOPNOTSUPP)
159 			return err;
160 	}
161 
162 	return dsa_tag_8021q_bridge_leave(ds, info);
163 }
164 
165 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
166  * DSA links) that sit between the targeted port on which the notifier was
167  * emitted and its dedicated CPU port.
168  */
169 static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
170 					  int info_sw_index, int info_port)
171 {
172 	struct dsa_port *targeted_dp, *cpu_dp;
173 	struct dsa_switch *targeted_ds;
174 
175 	targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
176 	targeted_dp = dsa_to_port(targeted_ds, info_port);
177 	cpu_dp = targeted_dp->cpu_dp;
178 
179 	if (dsa_switch_is_upstream_of(ds, targeted_ds))
180 		return port == dsa_towards_port(ds, cpu_dp->ds->index,
181 						cpu_dp->index);
182 
183 	return false;
184 }
185 
186 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
187 					      const unsigned char *addr,
188 					      u16 vid)
189 {
190 	struct dsa_mac_addr *a;
191 
192 	list_for_each_entry(a, addr_list, list)
193 		if (ether_addr_equal(a->addr, addr) && a->vid == vid)
194 			return a;
195 
196 	return NULL;
197 }
198 
199 static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
200 				 const struct switchdev_obj_port_mdb *mdb)
201 {
202 	struct dsa_port *dp = dsa_to_port(ds, port);
203 	struct dsa_mac_addr *a;
204 	int err;
205 
206 	/* No need to bother with refcounting for user ports */
207 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
208 		return ds->ops->port_mdb_add(ds, port, mdb);
209 
210 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
211 	if (a) {
212 		refcount_inc(&a->refcount);
213 		return 0;
214 	}
215 
216 	a = kzalloc(sizeof(*a), GFP_KERNEL);
217 	if (!a)
218 		return -ENOMEM;
219 
220 	err = ds->ops->port_mdb_add(ds, port, mdb);
221 	if (err) {
222 		kfree(a);
223 		return err;
224 	}
225 
226 	ether_addr_copy(a->addr, mdb->addr);
227 	a->vid = mdb->vid;
228 	refcount_set(&a->refcount, 1);
229 	list_add_tail(&a->list, &dp->mdbs);
230 
231 	return 0;
232 }
233 
234 static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
235 				 const struct switchdev_obj_port_mdb *mdb)
236 {
237 	struct dsa_port *dp = dsa_to_port(ds, port);
238 	struct dsa_mac_addr *a;
239 	int err;
240 
241 	/* No need to bother with refcounting for user ports */
242 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
243 		return ds->ops->port_mdb_del(ds, port, mdb);
244 
245 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
246 	if (!a)
247 		return -ENOENT;
248 
249 	if (!refcount_dec_and_test(&a->refcount))
250 		return 0;
251 
252 	err = ds->ops->port_mdb_del(ds, port, mdb);
253 	if (err) {
254 		refcount_inc(&a->refcount);
255 		return err;
256 	}
257 
258 	list_del(&a->list);
259 	kfree(a);
260 
261 	return 0;
262 }
263 
264 static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
265 				 const unsigned char *addr, u16 vid)
266 {
267 	struct dsa_port *dp = dsa_to_port(ds, port);
268 	struct dsa_mac_addr *a;
269 	int err;
270 
271 	/* No need to bother with refcounting for user ports */
272 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
273 		return ds->ops->port_fdb_add(ds, port, addr, vid);
274 
275 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
276 	if (a) {
277 		refcount_inc(&a->refcount);
278 		return 0;
279 	}
280 
281 	a = kzalloc(sizeof(*a), GFP_KERNEL);
282 	if (!a)
283 		return -ENOMEM;
284 
285 	err = ds->ops->port_fdb_add(ds, port, addr, vid);
286 	if (err) {
287 		kfree(a);
288 		return err;
289 	}
290 
291 	ether_addr_copy(a->addr, addr);
292 	a->vid = vid;
293 	refcount_set(&a->refcount, 1);
294 	list_add_tail(&a->list, &dp->fdbs);
295 
296 	return 0;
297 }
298 
299 static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
300 				 const unsigned char *addr, u16 vid)
301 {
302 	struct dsa_port *dp = dsa_to_port(ds, port);
303 	struct dsa_mac_addr *a;
304 	int err;
305 
306 	/* No need to bother with refcounting for user ports */
307 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
308 		return ds->ops->port_fdb_del(ds, port, addr, vid);
309 
310 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
311 	if (!a)
312 		return -ENOENT;
313 
314 	if (!refcount_dec_and_test(&a->refcount))
315 		return 0;
316 
317 	err = ds->ops->port_fdb_del(ds, port, addr, vid);
318 	if (err) {
319 		refcount_inc(&a->refcount);
320 		return err;
321 	}
322 
323 	list_del(&a->list);
324 	kfree(a);
325 
326 	return 0;
327 }
328 
329 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
330 				   struct dsa_notifier_fdb_info *info)
331 {
332 	int err = 0;
333 	int port;
334 
335 	if (!ds->ops->port_fdb_add)
336 		return -EOPNOTSUPP;
337 
338 	for (port = 0; port < ds->num_ports; port++) {
339 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
340 						  info->port)) {
341 			err = dsa_switch_do_fdb_add(ds, port, info->addr,
342 						    info->vid);
343 			if (err)
344 				break;
345 		}
346 	}
347 
348 	return err;
349 }
350 
351 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
352 				   struct dsa_notifier_fdb_info *info)
353 {
354 	int err = 0;
355 	int port;
356 
357 	if (!ds->ops->port_fdb_del)
358 		return -EOPNOTSUPP;
359 
360 	for (port = 0; port < ds->num_ports; port++) {
361 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
362 						  info->port)) {
363 			err = dsa_switch_do_fdb_del(ds, port, info->addr,
364 						    info->vid);
365 			if (err)
366 				break;
367 		}
368 	}
369 
370 	return err;
371 }
372 
373 static int dsa_switch_fdb_add(struct dsa_switch *ds,
374 			      struct dsa_notifier_fdb_info *info)
375 {
376 	int port = dsa_towards_port(ds, info->sw_index, info->port);
377 
378 	if (!ds->ops->port_fdb_add)
379 		return -EOPNOTSUPP;
380 
381 	return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
382 }
383 
384 static int dsa_switch_fdb_del(struct dsa_switch *ds,
385 			      struct dsa_notifier_fdb_info *info)
386 {
387 	int port = dsa_towards_port(ds, info->sw_index, info->port);
388 
389 	if (!ds->ops->port_fdb_del)
390 		return -EOPNOTSUPP;
391 
392 	return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
393 }
394 
395 static int dsa_switch_hsr_join(struct dsa_switch *ds,
396 			       struct dsa_notifier_hsr_info *info)
397 {
398 	if (ds->index == info->sw_index && ds->ops->port_hsr_join)
399 		return ds->ops->port_hsr_join(ds, info->port, info->hsr);
400 
401 	return -EOPNOTSUPP;
402 }
403 
404 static int dsa_switch_hsr_leave(struct dsa_switch *ds,
405 				struct dsa_notifier_hsr_info *info)
406 {
407 	if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
408 		return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
409 
410 	return -EOPNOTSUPP;
411 }
412 
413 static int dsa_switch_lag_change(struct dsa_switch *ds,
414 				 struct dsa_notifier_lag_info *info)
415 {
416 	if (ds->index == info->sw_index && ds->ops->port_lag_change)
417 		return ds->ops->port_lag_change(ds, info->port);
418 
419 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
420 		return ds->ops->crosschip_lag_change(ds, info->sw_index,
421 						     info->port);
422 
423 	return 0;
424 }
425 
426 static int dsa_switch_lag_join(struct dsa_switch *ds,
427 			       struct dsa_notifier_lag_info *info)
428 {
429 	if (ds->index == info->sw_index && ds->ops->port_lag_join)
430 		return ds->ops->port_lag_join(ds, info->port, info->lag,
431 					      info->info);
432 
433 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
434 		return ds->ops->crosschip_lag_join(ds, info->sw_index,
435 						   info->port, info->lag,
436 						   info->info);
437 
438 	return -EOPNOTSUPP;
439 }
440 
441 static int dsa_switch_lag_leave(struct dsa_switch *ds,
442 				struct dsa_notifier_lag_info *info)
443 {
444 	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
445 		return ds->ops->port_lag_leave(ds, info->port, info->lag);
446 
447 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
448 		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
449 						    info->port, info->lag);
450 
451 	return -EOPNOTSUPP;
452 }
453 
454 static int dsa_switch_mdb_add(struct dsa_switch *ds,
455 			      struct dsa_notifier_mdb_info *info)
456 {
457 	int port = dsa_towards_port(ds, info->sw_index, info->port);
458 
459 	if (!ds->ops->port_mdb_add)
460 		return -EOPNOTSUPP;
461 
462 	return dsa_switch_do_mdb_add(ds, port, info->mdb);
463 }
464 
465 static int dsa_switch_mdb_del(struct dsa_switch *ds,
466 			      struct dsa_notifier_mdb_info *info)
467 {
468 	int port = dsa_towards_port(ds, info->sw_index, info->port);
469 
470 	if (!ds->ops->port_mdb_del)
471 		return -EOPNOTSUPP;
472 
473 	return dsa_switch_do_mdb_del(ds, port, info->mdb);
474 }
475 
476 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
477 				   struct dsa_notifier_mdb_info *info)
478 {
479 	int err = 0;
480 	int port;
481 
482 	if (!ds->ops->port_mdb_add)
483 		return -EOPNOTSUPP;
484 
485 	for (port = 0; port < ds->num_ports; port++) {
486 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
487 						  info->port)) {
488 			err = dsa_switch_do_mdb_add(ds, port, info->mdb);
489 			if (err)
490 				break;
491 		}
492 	}
493 
494 	return err;
495 }
496 
497 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
498 				   struct dsa_notifier_mdb_info *info)
499 {
500 	int err = 0;
501 	int port;
502 
503 	if (!ds->ops->port_mdb_del)
504 		return -EOPNOTSUPP;
505 
506 	for (port = 0; port < ds->num_ports; port++) {
507 		if (dsa_switch_host_address_match(ds, port, info->sw_index,
508 						  info->port)) {
509 			err = dsa_switch_do_mdb_del(ds, port, info->mdb);
510 			if (err)
511 				break;
512 		}
513 	}
514 
515 	return err;
516 }
517 
518 static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
519 				  struct dsa_notifier_vlan_info *info)
520 {
521 	if (ds->index == info->sw_index && port == info->port)
522 		return true;
523 
524 	if (dsa_is_dsa_port(ds, port))
525 		return true;
526 
527 	return false;
528 }
529 
530 static int dsa_switch_vlan_add(struct dsa_switch *ds,
531 			       struct dsa_notifier_vlan_info *info)
532 {
533 	int port, err;
534 
535 	if (!ds->ops->port_vlan_add)
536 		return -EOPNOTSUPP;
537 
538 	for (port = 0; port < ds->num_ports; port++) {
539 		if (dsa_switch_vlan_match(ds, port, info)) {
540 			err = ds->ops->port_vlan_add(ds, port, info->vlan,
541 						     info->extack);
542 			if (err)
543 				return err;
544 		}
545 	}
546 
547 	return 0;
548 }
549 
550 static int dsa_switch_vlan_del(struct dsa_switch *ds,
551 			       struct dsa_notifier_vlan_info *info)
552 {
553 	if (!ds->ops->port_vlan_del)
554 		return -EOPNOTSUPP;
555 
556 	if (ds->index == info->sw_index)
557 		return ds->ops->port_vlan_del(ds, info->port, info->vlan);
558 
559 	/* Do not deprogram the DSA links as they may be used as conduit
560 	 * for other VLAN members in the fabric.
561 	 */
562 	return 0;
563 }
564 
565 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
566 				       struct dsa_notifier_tag_proto_info *info)
567 {
568 	const struct dsa_device_ops *tag_ops = info->tag_ops;
569 	int port, err;
570 
571 	if (!ds->ops->change_tag_protocol)
572 		return -EOPNOTSUPP;
573 
574 	ASSERT_RTNL();
575 
576 	for (port = 0; port < ds->num_ports; port++) {
577 		if (!dsa_is_cpu_port(ds, port))
578 			continue;
579 
580 		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
581 		if (err)
582 			return err;
583 
584 		dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
585 	}
586 
587 	/* Now that changing the tag protocol can no longer fail, let's update
588 	 * the remaining bits which are "duplicated for faster access", and the
589 	 * bits that depend on the tagger, such as the MTU.
590 	 */
591 	for (port = 0; port < ds->num_ports; port++) {
592 		if (dsa_is_user_port(ds, port)) {
593 			struct net_device *slave;
594 
595 			slave = dsa_to_port(ds, port)->slave;
596 			dsa_slave_setup_tagger(slave);
597 
598 			/* rtnl_mutex is held in dsa_tree_change_tag_proto */
599 			dsa_slave_change_mtu(slave, slave->mtu);
600 		}
601 	}
602 
603 	return 0;
604 }
605 
606 static int dsa_switch_mrp_add(struct dsa_switch *ds,
607 			      struct dsa_notifier_mrp_info *info)
608 {
609 	if (!ds->ops->port_mrp_add)
610 		return -EOPNOTSUPP;
611 
612 	if (ds->index == info->sw_index)
613 		return ds->ops->port_mrp_add(ds, info->port, info->mrp);
614 
615 	return 0;
616 }
617 
618 static int dsa_switch_mrp_del(struct dsa_switch *ds,
619 			      struct dsa_notifier_mrp_info *info)
620 {
621 	if (!ds->ops->port_mrp_del)
622 		return -EOPNOTSUPP;
623 
624 	if (ds->index == info->sw_index)
625 		return ds->ops->port_mrp_del(ds, info->port, info->mrp);
626 
627 	return 0;
628 }
629 
630 static int
631 dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
632 			     struct dsa_notifier_mrp_ring_role_info *info)
633 {
634 	if (!ds->ops->port_mrp_add)
635 		return -EOPNOTSUPP;
636 
637 	if (ds->index == info->sw_index)
638 		return ds->ops->port_mrp_add_ring_role(ds, info->port,
639 						       info->mrp);
640 
641 	return 0;
642 }
643 
644 static int
645 dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
646 			     struct dsa_notifier_mrp_ring_role_info *info)
647 {
648 	if (!ds->ops->port_mrp_del)
649 		return -EOPNOTSUPP;
650 
651 	if (ds->index == info->sw_index)
652 		return ds->ops->port_mrp_del_ring_role(ds, info->port,
653 						       info->mrp);
654 
655 	return 0;
656 }
657 
658 static int dsa_switch_event(struct notifier_block *nb,
659 			    unsigned long event, void *info)
660 {
661 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
662 	int err;
663 
664 	switch (event) {
665 	case DSA_NOTIFIER_AGEING_TIME:
666 		err = dsa_switch_ageing_time(ds, info);
667 		break;
668 	case DSA_NOTIFIER_BRIDGE_JOIN:
669 		err = dsa_switch_bridge_join(ds, info);
670 		break;
671 	case DSA_NOTIFIER_BRIDGE_LEAVE:
672 		err = dsa_switch_bridge_leave(ds, info);
673 		break;
674 	case DSA_NOTIFIER_FDB_ADD:
675 		err = dsa_switch_fdb_add(ds, info);
676 		break;
677 	case DSA_NOTIFIER_FDB_DEL:
678 		err = dsa_switch_fdb_del(ds, info);
679 		break;
680 	case DSA_NOTIFIER_HOST_FDB_ADD:
681 		err = dsa_switch_host_fdb_add(ds, info);
682 		break;
683 	case DSA_NOTIFIER_HOST_FDB_DEL:
684 		err = dsa_switch_host_fdb_del(ds, info);
685 		break;
686 	case DSA_NOTIFIER_HSR_JOIN:
687 		err = dsa_switch_hsr_join(ds, info);
688 		break;
689 	case DSA_NOTIFIER_HSR_LEAVE:
690 		err = dsa_switch_hsr_leave(ds, info);
691 		break;
692 	case DSA_NOTIFIER_LAG_CHANGE:
693 		err = dsa_switch_lag_change(ds, info);
694 		break;
695 	case DSA_NOTIFIER_LAG_JOIN:
696 		err = dsa_switch_lag_join(ds, info);
697 		break;
698 	case DSA_NOTIFIER_LAG_LEAVE:
699 		err = dsa_switch_lag_leave(ds, info);
700 		break;
701 	case DSA_NOTIFIER_MDB_ADD:
702 		err = dsa_switch_mdb_add(ds, info);
703 		break;
704 	case DSA_NOTIFIER_MDB_DEL:
705 		err = dsa_switch_mdb_del(ds, info);
706 		break;
707 	case DSA_NOTIFIER_HOST_MDB_ADD:
708 		err = dsa_switch_host_mdb_add(ds, info);
709 		break;
710 	case DSA_NOTIFIER_HOST_MDB_DEL:
711 		err = dsa_switch_host_mdb_del(ds, info);
712 		break;
713 	case DSA_NOTIFIER_VLAN_ADD:
714 		err = dsa_switch_vlan_add(ds, info);
715 		break;
716 	case DSA_NOTIFIER_VLAN_DEL:
717 		err = dsa_switch_vlan_del(ds, info);
718 		break;
719 	case DSA_NOTIFIER_MTU:
720 		err = dsa_switch_mtu(ds, info);
721 		break;
722 	case DSA_NOTIFIER_TAG_PROTO:
723 		err = dsa_switch_change_tag_proto(ds, info);
724 		break;
725 	case DSA_NOTIFIER_MRP_ADD:
726 		err = dsa_switch_mrp_add(ds, info);
727 		break;
728 	case DSA_NOTIFIER_MRP_DEL:
729 		err = dsa_switch_mrp_del(ds, info);
730 		break;
731 	case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
732 		err = dsa_switch_mrp_add_ring_role(ds, info);
733 		break;
734 	case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
735 		err = dsa_switch_mrp_del_ring_role(ds, info);
736 		break;
737 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
738 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
739 		break;
740 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
741 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
742 		break;
743 	default:
744 		err = -EOPNOTSUPP;
745 		break;
746 	}
747 
748 	if (err)
749 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
750 			event, err);
751 
752 	return notifier_from_errno(err);
753 }
754 
755 int dsa_switch_register_notifier(struct dsa_switch *ds)
756 {
757 	ds->nb.notifier_call = dsa_switch_event;
758 
759 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
760 }
761 
762 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
763 {
764 	int err;
765 
766 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
767 	if (err)
768 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
769 }
770