xref: /linux/net/dsa/switch.c (revision d27656d02d85078c63f060fca9c5d99794791a75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch chip, part of a switch fabric
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/netdevice.h>
11 #include <linux/notifier.h>
12 #include <linux/if_vlan.h>
13 #include <net/switchdev.h>
14 
15 #include "dsa_priv.h"
16 
17 static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
18 						   unsigned int ageing_time)
19 {
20 	struct dsa_port *dp;
21 
22 	dsa_switch_for_each_port(dp, ds)
23 		if (dp->ageing_time && dp->ageing_time < ageing_time)
24 			ageing_time = dp->ageing_time;
25 
26 	return ageing_time;
27 }
28 
29 static int dsa_switch_ageing_time(struct dsa_switch *ds,
30 				  struct dsa_notifier_ageing_time_info *info)
31 {
32 	unsigned int ageing_time = info->ageing_time;
33 
34 	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
35 		return -ERANGE;
36 
37 	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
38 		return -ERANGE;
39 
40 	/* Program the fastest ageing time in case of multiple bridges */
41 	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
42 
43 	if (ds->ops->set_ageing_time)
44 		return ds->ops->set_ageing_time(ds, ageing_time);
45 
46 	return 0;
47 }
48 
49 static bool dsa_port_mtu_match(struct dsa_port *dp,
50 			       struct dsa_notifier_mtu_info *info)
51 {
52 	if (dp->ds->index == info->sw_index && dp->index == info->port)
53 		return true;
54 
55 	/* Do not propagate to other switches in the tree if the notifier was
56 	 * targeted for a single switch.
57 	 */
58 	if (info->targeted_match)
59 		return false;
60 
61 	if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
62 		return true;
63 
64 	return false;
65 }
66 
67 static int dsa_switch_mtu(struct dsa_switch *ds,
68 			  struct dsa_notifier_mtu_info *info)
69 {
70 	struct dsa_port *dp;
71 	int ret;
72 
73 	if (!ds->ops->port_change_mtu)
74 		return -EOPNOTSUPP;
75 
76 	dsa_switch_for_each_port(dp, ds) {
77 		if (dsa_port_mtu_match(dp, info)) {
78 			ret = ds->ops->port_change_mtu(ds, dp->index,
79 						       info->mtu);
80 			if (ret)
81 				return ret;
82 		}
83 	}
84 
85 	return 0;
86 }
87 
88 static int dsa_switch_bridge_join(struct dsa_switch *ds,
89 				  struct dsa_notifier_bridge_info *info)
90 {
91 	struct dsa_switch_tree *dst = ds->dst;
92 	int err;
93 
94 	if (dst->index == info->tree_index && ds->index == info->sw_index) {
95 		if (!ds->ops->port_bridge_join)
96 			return -EOPNOTSUPP;
97 
98 		err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
99 						&info->tx_fwd_offload);
100 		if (err)
101 			return err;
102 	}
103 
104 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
105 	    ds->ops->crosschip_bridge_join) {
106 		err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
107 						     info->sw_index,
108 						     info->port, info->bridge);
109 		if (err)
110 			return err;
111 	}
112 
113 	return 0;
114 }
115 
116 static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds,
117 					  struct dsa_notifier_bridge_info *info)
118 {
119 	struct netlink_ext_ack extack = {0};
120 	bool change_vlan_filtering = false;
121 	bool vlan_filtering;
122 	struct dsa_port *dp;
123 	int err;
124 
125 	if (ds->needs_standalone_vlan_filtering &&
126 	    !br_vlan_enabled(info->bridge.dev)) {
127 		change_vlan_filtering = true;
128 		vlan_filtering = true;
129 	} else if (!ds->needs_standalone_vlan_filtering &&
130 		   br_vlan_enabled(info->bridge.dev)) {
131 		change_vlan_filtering = true;
132 		vlan_filtering = false;
133 	}
134 
135 	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
136 	 * event for changing vlan_filtering setting upon slave ports leaving
137 	 * it. That is a good thing, because that lets us handle it and also
138 	 * handle the case where the switch's vlan_filtering setting is global
139 	 * (not per port). When that happens, the correct moment to trigger the
140 	 * vlan_filtering callback is only when the last port leaves the last
141 	 * VLAN-aware bridge.
142 	 */
143 	if (change_vlan_filtering && ds->vlan_filtering_is_global) {
144 		dsa_switch_for_each_port(dp, ds) {
145 			struct net_device *br = dsa_port_bridge_dev_get(dp);
146 
147 			if (br && br_vlan_enabled(br)) {
148 				change_vlan_filtering = false;
149 				break;
150 			}
151 		}
152 	}
153 
154 	if (change_vlan_filtering) {
155 		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
156 					      vlan_filtering, &extack);
157 		if (extack._msg)
158 			dev_err(ds->dev, "port %d: %s\n", info->port,
159 				extack._msg);
160 		if (err && err != -EOPNOTSUPP)
161 			return err;
162 	}
163 
164 	return 0;
165 }
166 
167 static int dsa_switch_bridge_leave(struct dsa_switch *ds,
168 				   struct dsa_notifier_bridge_info *info)
169 {
170 	struct dsa_switch_tree *dst = ds->dst;
171 	int err;
172 
173 	if (dst->index == info->tree_index && ds->index == info->sw_index &&
174 	    ds->ops->port_bridge_leave)
175 		ds->ops->port_bridge_leave(ds, info->port, info->bridge);
176 
177 	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
178 	    ds->ops->crosschip_bridge_leave)
179 		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
180 						info->sw_index, info->port,
181 						info->bridge);
182 
183 	if (ds->dst->index == info->tree_index && ds->index == info->sw_index) {
184 		err = dsa_switch_sync_vlan_filtering(ds, info);
185 		if (err)
186 			return err;
187 	}
188 
189 	return 0;
190 }
191 
192 /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
193  * DSA links) that sit between the targeted port on which the notifier was
194  * emitted and its dedicated CPU port.
195  */
196 static bool dsa_port_host_address_match(struct dsa_port *dp,
197 					int info_sw_index, int info_port)
198 {
199 	struct dsa_port *targeted_dp, *cpu_dp;
200 	struct dsa_switch *targeted_ds;
201 
202 	targeted_ds = dsa_switch_find(dp->ds->dst->index, info_sw_index);
203 	targeted_dp = dsa_to_port(targeted_ds, info_port);
204 	cpu_dp = targeted_dp->cpu_dp;
205 
206 	if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
207 		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
208 						     cpu_dp->index);
209 
210 	return false;
211 }
212 
213 static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
214 					      const unsigned char *addr,
215 					      u16 vid)
216 {
217 	struct dsa_mac_addr *a;
218 
219 	list_for_each_entry(a, addr_list, list)
220 		if (ether_addr_equal(a->addr, addr) && a->vid == vid)
221 			return a;
222 
223 	return NULL;
224 }
225 
226 static int dsa_port_do_mdb_add(struct dsa_port *dp,
227 			       const struct switchdev_obj_port_mdb *mdb)
228 {
229 	struct dsa_switch *ds = dp->ds;
230 	struct dsa_mac_addr *a;
231 	int port = dp->index;
232 	int err = 0;
233 
234 	/* No need to bother with refcounting for user ports */
235 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
236 		return ds->ops->port_mdb_add(ds, port, mdb);
237 
238 	mutex_lock(&dp->addr_lists_lock);
239 
240 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
241 	if (a) {
242 		refcount_inc(&a->refcount);
243 		goto out;
244 	}
245 
246 	a = kzalloc(sizeof(*a), GFP_KERNEL);
247 	if (!a) {
248 		err = -ENOMEM;
249 		goto out;
250 	}
251 
252 	err = ds->ops->port_mdb_add(ds, port, mdb);
253 	if (err) {
254 		kfree(a);
255 		goto out;
256 	}
257 
258 	ether_addr_copy(a->addr, mdb->addr);
259 	a->vid = mdb->vid;
260 	refcount_set(&a->refcount, 1);
261 	list_add_tail(&a->list, &dp->mdbs);
262 
263 out:
264 	mutex_unlock(&dp->addr_lists_lock);
265 
266 	return err;
267 }
268 
269 static int dsa_port_do_mdb_del(struct dsa_port *dp,
270 			       const struct switchdev_obj_port_mdb *mdb)
271 {
272 	struct dsa_switch *ds = dp->ds;
273 	struct dsa_mac_addr *a;
274 	int port = dp->index;
275 	int err = 0;
276 
277 	/* No need to bother with refcounting for user ports */
278 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
279 		return ds->ops->port_mdb_del(ds, port, mdb);
280 
281 	mutex_lock(&dp->addr_lists_lock);
282 
283 	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
284 	if (!a) {
285 		err = -ENOENT;
286 		goto out;
287 	}
288 
289 	if (!refcount_dec_and_test(&a->refcount))
290 		goto out;
291 
292 	err = ds->ops->port_mdb_del(ds, port, mdb);
293 	if (err) {
294 		refcount_set(&a->refcount, 1);
295 		goto out;
296 	}
297 
298 	list_del(&a->list);
299 	kfree(a);
300 
301 out:
302 	mutex_unlock(&dp->addr_lists_lock);
303 
304 	return err;
305 }
306 
307 static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
308 			       u16 vid)
309 {
310 	struct dsa_switch *ds = dp->ds;
311 	struct dsa_mac_addr *a;
312 	int port = dp->index;
313 	int err = 0;
314 
315 	/* No need to bother with refcounting for user ports */
316 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
317 		return ds->ops->port_fdb_add(ds, port, addr, vid);
318 
319 	mutex_lock(&dp->addr_lists_lock);
320 
321 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
322 	if (a) {
323 		refcount_inc(&a->refcount);
324 		goto out;
325 	}
326 
327 	a = kzalloc(sizeof(*a), GFP_KERNEL);
328 	if (!a) {
329 		err = -ENOMEM;
330 		goto out;
331 	}
332 
333 	err = ds->ops->port_fdb_add(ds, port, addr, vid);
334 	if (err) {
335 		kfree(a);
336 		goto out;
337 	}
338 
339 	ether_addr_copy(a->addr, addr);
340 	a->vid = vid;
341 	refcount_set(&a->refcount, 1);
342 	list_add_tail(&a->list, &dp->fdbs);
343 
344 out:
345 	mutex_unlock(&dp->addr_lists_lock);
346 
347 	return err;
348 }
349 
350 static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
351 			       u16 vid)
352 {
353 	struct dsa_switch *ds = dp->ds;
354 	struct dsa_mac_addr *a;
355 	int port = dp->index;
356 	int err = 0;
357 
358 	/* No need to bother with refcounting for user ports */
359 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
360 		return ds->ops->port_fdb_del(ds, port, addr, vid);
361 
362 	mutex_lock(&dp->addr_lists_lock);
363 
364 	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
365 	if (!a) {
366 		err = -ENOENT;
367 		goto out;
368 	}
369 
370 	if (!refcount_dec_and_test(&a->refcount))
371 		goto out;
372 
373 	err = ds->ops->port_fdb_del(ds, port, addr, vid);
374 	if (err) {
375 		refcount_set(&a->refcount, 1);
376 		goto out;
377 	}
378 
379 	list_del(&a->list);
380 	kfree(a);
381 
382 out:
383 	mutex_unlock(&dp->addr_lists_lock);
384 
385 	return err;
386 }
387 
388 static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
389 				     const unsigned char *addr, u16 vid)
390 {
391 	struct dsa_mac_addr *a;
392 	int err = 0;
393 
394 	mutex_lock(&lag->fdb_lock);
395 
396 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid);
397 	if (a) {
398 		refcount_inc(&a->refcount);
399 		goto out;
400 	}
401 
402 	a = kzalloc(sizeof(*a), GFP_KERNEL);
403 	if (!a) {
404 		err = -ENOMEM;
405 		goto out;
406 	}
407 
408 	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid);
409 	if (err) {
410 		kfree(a);
411 		goto out;
412 	}
413 
414 	ether_addr_copy(a->addr, addr);
415 	a->vid = vid;
416 	refcount_set(&a->refcount, 1);
417 	list_add_tail(&a->list, &lag->fdbs);
418 
419 out:
420 	mutex_unlock(&lag->fdb_lock);
421 
422 	return err;
423 }
424 
425 static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
426 				     const unsigned char *addr, u16 vid)
427 {
428 	struct dsa_mac_addr *a;
429 	int err = 0;
430 
431 	mutex_lock(&lag->fdb_lock);
432 
433 	a = dsa_mac_addr_find(&lag->fdbs, addr, vid);
434 	if (!a) {
435 		err = -ENOENT;
436 		goto out;
437 	}
438 
439 	if (!refcount_dec_and_test(&a->refcount))
440 		goto out;
441 
442 	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid);
443 	if (err) {
444 		refcount_set(&a->refcount, 1);
445 		goto out;
446 	}
447 
448 	list_del(&a->list);
449 	kfree(a);
450 
451 out:
452 	mutex_unlock(&lag->fdb_lock);
453 
454 	return err;
455 }
456 
457 static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
458 				   struct dsa_notifier_fdb_info *info)
459 {
460 	struct dsa_port *dp;
461 	int err = 0;
462 
463 	if (!ds->ops->port_fdb_add)
464 		return -EOPNOTSUPP;
465 
466 	dsa_switch_for_each_port(dp, ds) {
467 		if (dsa_port_host_address_match(dp, info->sw_index,
468 						info->port)) {
469 			err = dsa_port_do_fdb_add(dp, info->addr, info->vid);
470 			if (err)
471 				break;
472 		}
473 	}
474 
475 	return err;
476 }
477 
478 static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
479 				   struct dsa_notifier_fdb_info *info)
480 {
481 	struct dsa_port *dp;
482 	int err = 0;
483 
484 	if (!ds->ops->port_fdb_del)
485 		return -EOPNOTSUPP;
486 
487 	dsa_switch_for_each_port(dp, ds) {
488 		if (dsa_port_host_address_match(dp, info->sw_index,
489 						info->port)) {
490 			err = dsa_port_do_fdb_del(dp, info->addr, info->vid);
491 			if (err)
492 				break;
493 		}
494 	}
495 
496 	return err;
497 }
498 
499 static int dsa_switch_fdb_add(struct dsa_switch *ds,
500 			      struct dsa_notifier_fdb_info *info)
501 {
502 	int port = dsa_towards_port(ds, info->sw_index, info->port);
503 	struct dsa_port *dp = dsa_to_port(ds, port);
504 
505 	if (!ds->ops->port_fdb_add)
506 		return -EOPNOTSUPP;
507 
508 	return dsa_port_do_fdb_add(dp, info->addr, info->vid);
509 }
510 
511 static int dsa_switch_fdb_del(struct dsa_switch *ds,
512 			      struct dsa_notifier_fdb_info *info)
513 {
514 	int port = dsa_towards_port(ds, info->sw_index, info->port);
515 	struct dsa_port *dp = dsa_to_port(ds, port);
516 
517 	if (!ds->ops->port_fdb_del)
518 		return -EOPNOTSUPP;
519 
520 	return dsa_port_do_fdb_del(dp, info->addr, info->vid);
521 }
522 
523 static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
524 				  struct dsa_notifier_lag_fdb_info *info)
525 {
526 	struct dsa_port *dp;
527 
528 	if (!ds->ops->lag_fdb_add)
529 		return -EOPNOTSUPP;
530 
531 	/* Notify switch only if it has a port in this LAG */
532 	dsa_switch_for_each_port(dp, ds)
533 		if (dsa_port_offloads_lag(dp, info->lag))
534 			return dsa_switch_do_lag_fdb_add(ds, info->lag,
535 							 info->addr, info->vid);
536 
537 	return 0;
538 }
539 
540 static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
541 				  struct dsa_notifier_lag_fdb_info *info)
542 {
543 	struct dsa_port *dp;
544 
545 	if (!ds->ops->lag_fdb_del)
546 		return -EOPNOTSUPP;
547 
548 	/* Notify switch only if it has a port in this LAG */
549 	dsa_switch_for_each_port(dp, ds)
550 		if (dsa_port_offloads_lag(dp, info->lag))
551 			return dsa_switch_do_lag_fdb_del(ds, info->lag,
552 							 info->addr, info->vid);
553 
554 	return 0;
555 }
556 
557 static int dsa_switch_lag_change(struct dsa_switch *ds,
558 				 struct dsa_notifier_lag_info *info)
559 {
560 	if (ds->index == info->sw_index && ds->ops->port_lag_change)
561 		return ds->ops->port_lag_change(ds, info->port);
562 
563 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
564 		return ds->ops->crosschip_lag_change(ds, info->sw_index,
565 						     info->port);
566 
567 	return 0;
568 }
569 
570 static int dsa_switch_lag_join(struct dsa_switch *ds,
571 			       struct dsa_notifier_lag_info *info)
572 {
573 	if (ds->index == info->sw_index && ds->ops->port_lag_join)
574 		return ds->ops->port_lag_join(ds, info->port, info->lag,
575 					      info->info);
576 
577 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
578 		return ds->ops->crosschip_lag_join(ds, info->sw_index,
579 						   info->port, info->lag,
580 						   info->info);
581 
582 	return -EOPNOTSUPP;
583 }
584 
585 static int dsa_switch_lag_leave(struct dsa_switch *ds,
586 				struct dsa_notifier_lag_info *info)
587 {
588 	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
589 		return ds->ops->port_lag_leave(ds, info->port, info->lag);
590 
591 	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
592 		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
593 						    info->port, info->lag);
594 
595 	return -EOPNOTSUPP;
596 }
597 
598 static int dsa_switch_mdb_add(struct dsa_switch *ds,
599 			      struct dsa_notifier_mdb_info *info)
600 {
601 	int port = dsa_towards_port(ds, info->sw_index, info->port);
602 	struct dsa_port *dp = dsa_to_port(ds, port);
603 
604 	if (!ds->ops->port_mdb_add)
605 		return -EOPNOTSUPP;
606 
607 	return dsa_port_do_mdb_add(dp, info->mdb);
608 }
609 
610 static int dsa_switch_mdb_del(struct dsa_switch *ds,
611 			      struct dsa_notifier_mdb_info *info)
612 {
613 	int port = dsa_towards_port(ds, info->sw_index, info->port);
614 	struct dsa_port *dp = dsa_to_port(ds, port);
615 
616 	if (!ds->ops->port_mdb_del)
617 		return -EOPNOTSUPP;
618 
619 	return dsa_port_do_mdb_del(dp, info->mdb);
620 }
621 
622 static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
623 				   struct dsa_notifier_mdb_info *info)
624 {
625 	struct dsa_port *dp;
626 	int err = 0;
627 
628 	if (!ds->ops->port_mdb_add)
629 		return -EOPNOTSUPP;
630 
631 	dsa_switch_for_each_port(dp, ds) {
632 		if (dsa_port_host_address_match(dp, info->sw_index,
633 						info->port)) {
634 			err = dsa_port_do_mdb_add(dp, info->mdb);
635 			if (err)
636 				break;
637 		}
638 	}
639 
640 	return err;
641 }
642 
643 static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
644 				   struct dsa_notifier_mdb_info *info)
645 {
646 	struct dsa_port *dp;
647 	int err = 0;
648 
649 	if (!ds->ops->port_mdb_del)
650 		return -EOPNOTSUPP;
651 
652 	dsa_switch_for_each_port(dp, ds) {
653 		if (dsa_port_host_address_match(dp, info->sw_index,
654 						info->port)) {
655 			err = dsa_port_do_mdb_del(dp, info->mdb);
656 			if (err)
657 				break;
658 		}
659 	}
660 
661 	return err;
662 }
663 
664 /* Port VLANs match on the targeted port and on all DSA ports */
665 static bool dsa_port_vlan_match(struct dsa_port *dp,
666 				struct dsa_notifier_vlan_info *info)
667 {
668 	if (dp->ds->index == info->sw_index && dp->index == info->port)
669 		return true;
670 
671 	if (dsa_port_is_dsa(dp))
672 		return true;
673 
674 	return false;
675 }
676 
677 /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
678  * (upstream and downstream) of that switch and its upstream switches.
679  */
680 static bool dsa_port_host_vlan_match(struct dsa_port *dp,
681 				     struct dsa_notifier_vlan_info *info)
682 {
683 	struct dsa_port *targeted_dp, *cpu_dp;
684 	struct dsa_switch *targeted_ds;
685 
686 	targeted_ds = dsa_switch_find(dp->ds->dst->index, info->sw_index);
687 	targeted_dp = dsa_to_port(targeted_ds, info->port);
688 	cpu_dp = targeted_dp->cpu_dp;
689 
690 	if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
691 		return dsa_port_is_dsa(dp) || dp == cpu_dp;
692 
693 	return false;
694 }
695 
696 static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
697 				      const struct switchdev_obj_port_vlan *vlan)
698 {
699 	struct dsa_vlan *v;
700 
701 	list_for_each_entry(v, vlan_list, list)
702 		if (v->vid == vlan->vid)
703 			return v;
704 
705 	return NULL;
706 }
707 
708 static int dsa_port_do_vlan_add(struct dsa_port *dp,
709 				const struct switchdev_obj_port_vlan *vlan,
710 				struct netlink_ext_ack *extack)
711 {
712 	struct dsa_switch *ds = dp->ds;
713 	int port = dp->index;
714 	struct dsa_vlan *v;
715 	int err = 0;
716 
717 	/* No need to bother with refcounting for user ports. */
718 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
719 		return ds->ops->port_vlan_add(ds, port, vlan, extack);
720 
721 	/* No need to propagate on shared ports the existing VLANs that were
722 	 * re-notified after just the flags have changed. This would cause a
723 	 * refcount bump which we need to avoid, since it unbalances the
724 	 * additions with the deletions.
725 	 */
726 	if (vlan->changed)
727 		return 0;
728 
729 	mutex_lock(&dp->vlans_lock);
730 
731 	v = dsa_vlan_find(&dp->vlans, vlan);
732 	if (v) {
733 		refcount_inc(&v->refcount);
734 		goto out;
735 	}
736 
737 	v = kzalloc(sizeof(*v), GFP_KERNEL);
738 	if (!v) {
739 		err = -ENOMEM;
740 		goto out;
741 	}
742 
743 	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
744 	if (err) {
745 		kfree(v);
746 		goto out;
747 	}
748 
749 	v->vid = vlan->vid;
750 	refcount_set(&v->refcount, 1);
751 	list_add_tail(&v->list, &dp->vlans);
752 
753 out:
754 	mutex_unlock(&dp->vlans_lock);
755 
756 	return err;
757 }
758 
759 static int dsa_port_do_vlan_del(struct dsa_port *dp,
760 				const struct switchdev_obj_port_vlan *vlan)
761 {
762 	struct dsa_switch *ds = dp->ds;
763 	int port = dp->index;
764 	struct dsa_vlan *v;
765 	int err = 0;
766 
767 	/* No need to bother with refcounting for user ports */
768 	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
769 		return ds->ops->port_vlan_del(ds, port, vlan);
770 
771 	mutex_lock(&dp->vlans_lock);
772 
773 	v = dsa_vlan_find(&dp->vlans, vlan);
774 	if (!v) {
775 		err = -ENOENT;
776 		goto out;
777 	}
778 
779 	if (!refcount_dec_and_test(&v->refcount))
780 		goto out;
781 
782 	err = ds->ops->port_vlan_del(ds, port, vlan);
783 	if (err) {
784 		refcount_set(&v->refcount, 1);
785 		goto out;
786 	}
787 
788 	list_del(&v->list);
789 	kfree(v);
790 
791 out:
792 	mutex_unlock(&dp->vlans_lock);
793 
794 	return err;
795 }
796 
797 static int dsa_switch_vlan_add(struct dsa_switch *ds,
798 			       struct dsa_notifier_vlan_info *info)
799 {
800 	struct dsa_port *dp;
801 	int err;
802 
803 	if (!ds->ops->port_vlan_add)
804 		return -EOPNOTSUPP;
805 
806 	dsa_switch_for_each_port(dp, ds) {
807 		if (dsa_port_vlan_match(dp, info)) {
808 			err = dsa_port_do_vlan_add(dp, info->vlan,
809 						   info->extack);
810 			if (err)
811 				return err;
812 		}
813 	}
814 
815 	return 0;
816 }
817 
818 static int dsa_switch_vlan_del(struct dsa_switch *ds,
819 			       struct dsa_notifier_vlan_info *info)
820 {
821 	struct dsa_port *dp;
822 	int err;
823 
824 	if (!ds->ops->port_vlan_del)
825 		return -EOPNOTSUPP;
826 
827 	dsa_switch_for_each_port(dp, ds) {
828 		if (dsa_port_vlan_match(dp, info)) {
829 			err = dsa_port_do_vlan_del(dp, info->vlan);
830 			if (err)
831 				return err;
832 		}
833 	}
834 
835 	return 0;
836 }
837 
838 static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
839 				    struct dsa_notifier_vlan_info *info)
840 {
841 	struct dsa_port *dp;
842 	int err;
843 
844 	if (!ds->ops->port_vlan_add)
845 		return -EOPNOTSUPP;
846 
847 	dsa_switch_for_each_port(dp, ds) {
848 		if (dsa_port_host_vlan_match(dp, info)) {
849 			err = dsa_port_do_vlan_add(dp, info->vlan,
850 						   info->extack);
851 			if (err)
852 				return err;
853 		}
854 	}
855 
856 	return 0;
857 }
858 
859 static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
860 				    struct dsa_notifier_vlan_info *info)
861 {
862 	struct dsa_port *dp;
863 	int err;
864 
865 	if (!ds->ops->port_vlan_del)
866 		return -EOPNOTSUPP;
867 
868 	dsa_switch_for_each_port(dp, ds) {
869 		if (dsa_port_host_vlan_match(dp, info)) {
870 			err = dsa_port_do_vlan_del(dp, info->vlan);
871 			if (err)
872 				return err;
873 		}
874 	}
875 
876 	return 0;
877 }
878 
879 static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
880 				       struct dsa_notifier_tag_proto_info *info)
881 {
882 	const struct dsa_device_ops *tag_ops = info->tag_ops;
883 	struct dsa_port *dp, *cpu_dp;
884 	int err;
885 
886 	if (!ds->ops->change_tag_protocol)
887 		return -EOPNOTSUPP;
888 
889 	ASSERT_RTNL();
890 
891 	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
892 		err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
893 						   tag_ops->proto);
894 		if (err)
895 			return err;
896 
897 		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
898 	}
899 
900 	/* Now that changing the tag protocol can no longer fail, let's update
901 	 * the remaining bits which are "duplicated for faster access", and the
902 	 * bits that depend on the tagger, such as the MTU.
903 	 */
904 	dsa_switch_for_each_user_port(dp, ds) {
905 		struct net_device *slave = dp->slave;
906 
907 		dsa_slave_setup_tagger(slave);
908 
909 		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
910 		dsa_slave_change_mtu(slave, slave->mtu);
911 	}
912 
913 	return 0;
914 }
915 
916 /* We use the same cross-chip notifiers to inform both the tagger side, as well
917  * as the switch side, of connection and disconnection events.
918  * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
919  * switch side doesn't support connecting to this tagger, and therefore, the
920  * fact that we don't disconnect the tagger side doesn't constitute a memory
921  * leak: the tagger will still operate with persistent per-switch memory, just
922  * with the switch side unconnected to it. What does constitute a hard error is
923  * when the switch side supports connecting but fails.
924  */
925 static int
926 dsa_switch_connect_tag_proto(struct dsa_switch *ds,
927 			     struct dsa_notifier_tag_proto_info *info)
928 {
929 	const struct dsa_device_ops *tag_ops = info->tag_ops;
930 	int err;
931 
932 	/* Notify the new tagger about the connection to this switch */
933 	if (tag_ops->connect) {
934 		err = tag_ops->connect(ds);
935 		if (err)
936 			return err;
937 	}
938 
939 	if (!ds->ops->connect_tag_protocol)
940 		return -EOPNOTSUPP;
941 
942 	/* Notify the switch about the connection to the new tagger */
943 	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
944 	if (err) {
945 		/* Revert the new tagger's connection to this tree */
946 		if (tag_ops->disconnect)
947 			tag_ops->disconnect(ds);
948 		return err;
949 	}
950 
951 	return 0;
952 }
953 
954 static int
955 dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
956 				struct dsa_notifier_tag_proto_info *info)
957 {
958 	const struct dsa_device_ops *tag_ops = info->tag_ops;
959 
960 	/* Notify the tagger about the disconnection from this switch */
961 	if (tag_ops->disconnect && ds->tagger_data)
962 		tag_ops->disconnect(ds);
963 
964 	/* No need to notify the switch, since it shouldn't have any
965 	 * resources to tear down
966 	 */
967 	return 0;
968 }
969 
970 static int
971 dsa_switch_master_state_change(struct dsa_switch *ds,
972 			       struct dsa_notifier_master_state_info *info)
973 {
974 	if (!ds->ops->master_state_change)
975 		return 0;
976 
977 	ds->ops->master_state_change(ds, info->master, info->operational);
978 
979 	return 0;
980 }
981 
982 static int dsa_switch_event(struct notifier_block *nb,
983 			    unsigned long event, void *info)
984 {
985 	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
986 	int err;
987 
988 	switch (event) {
989 	case DSA_NOTIFIER_AGEING_TIME:
990 		err = dsa_switch_ageing_time(ds, info);
991 		break;
992 	case DSA_NOTIFIER_BRIDGE_JOIN:
993 		err = dsa_switch_bridge_join(ds, info);
994 		break;
995 	case DSA_NOTIFIER_BRIDGE_LEAVE:
996 		err = dsa_switch_bridge_leave(ds, info);
997 		break;
998 	case DSA_NOTIFIER_FDB_ADD:
999 		err = dsa_switch_fdb_add(ds, info);
1000 		break;
1001 	case DSA_NOTIFIER_FDB_DEL:
1002 		err = dsa_switch_fdb_del(ds, info);
1003 		break;
1004 	case DSA_NOTIFIER_HOST_FDB_ADD:
1005 		err = dsa_switch_host_fdb_add(ds, info);
1006 		break;
1007 	case DSA_NOTIFIER_HOST_FDB_DEL:
1008 		err = dsa_switch_host_fdb_del(ds, info);
1009 		break;
1010 	case DSA_NOTIFIER_LAG_FDB_ADD:
1011 		err = dsa_switch_lag_fdb_add(ds, info);
1012 		break;
1013 	case DSA_NOTIFIER_LAG_FDB_DEL:
1014 		err = dsa_switch_lag_fdb_del(ds, info);
1015 		break;
1016 	case DSA_NOTIFIER_LAG_CHANGE:
1017 		err = dsa_switch_lag_change(ds, info);
1018 		break;
1019 	case DSA_NOTIFIER_LAG_JOIN:
1020 		err = dsa_switch_lag_join(ds, info);
1021 		break;
1022 	case DSA_NOTIFIER_LAG_LEAVE:
1023 		err = dsa_switch_lag_leave(ds, info);
1024 		break;
1025 	case DSA_NOTIFIER_MDB_ADD:
1026 		err = dsa_switch_mdb_add(ds, info);
1027 		break;
1028 	case DSA_NOTIFIER_MDB_DEL:
1029 		err = dsa_switch_mdb_del(ds, info);
1030 		break;
1031 	case DSA_NOTIFIER_HOST_MDB_ADD:
1032 		err = dsa_switch_host_mdb_add(ds, info);
1033 		break;
1034 	case DSA_NOTIFIER_HOST_MDB_DEL:
1035 		err = dsa_switch_host_mdb_del(ds, info);
1036 		break;
1037 	case DSA_NOTIFIER_VLAN_ADD:
1038 		err = dsa_switch_vlan_add(ds, info);
1039 		break;
1040 	case DSA_NOTIFIER_VLAN_DEL:
1041 		err = dsa_switch_vlan_del(ds, info);
1042 		break;
1043 	case DSA_NOTIFIER_HOST_VLAN_ADD:
1044 		err = dsa_switch_host_vlan_add(ds, info);
1045 		break;
1046 	case DSA_NOTIFIER_HOST_VLAN_DEL:
1047 		err = dsa_switch_host_vlan_del(ds, info);
1048 		break;
1049 	case DSA_NOTIFIER_MTU:
1050 		err = dsa_switch_mtu(ds, info);
1051 		break;
1052 	case DSA_NOTIFIER_TAG_PROTO:
1053 		err = dsa_switch_change_tag_proto(ds, info);
1054 		break;
1055 	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
1056 		err = dsa_switch_connect_tag_proto(ds, info);
1057 		break;
1058 	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
1059 		err = dsa_switch_disconnect_tag_proto(ds, info);
1060 		break;
1061 	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1062 		err = dsa_switch_tag_8021q_vlan_add(ds, info);
1063 		break;
1064 	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1065 		err = dsa_switch_tag_8021q_vlan_del(ds, info);
1066 		break;
1067 	case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1068 		err = dsa_switch_master_state_change(ds, info);
1069 		break;
1070 	default:
1071 		err = -EOPNOTSUPP;
1072 		break;
1073 	}
1074 
1075 	if (err)
1076 		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1077 			event, err);
1078 
1079 	return notifier_from_errno(err);
1080 }
1081 
1082 int dsa_switch_register_notifier(struct dsa_switch *ds)
1083 {
1084 	ds->nb.notifier_call = dsa_switch_event;
1085 
1086 	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1087 }
1088 
1089 void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1090 {
1091 	int err;
1092 
1093 	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1094 	if (err)
1095 		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1096 }
1097