1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * DSA topology and switch handling
4 *
5 * Copyright (c) 2008-2009 Marvell Semiconductor
6 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
7 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
8 */
9
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/slab.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/of.h>
18 #include <linux/of_net.h>
19 #include <net/dsa_stubs.h>
20 #include <net/sch_generic.h>
21
22 #include "conduit.h"
23 #include "devlink.h"
24 #include "dsa.h"
25 #include "netlink.h"
26 #include "port.h"
27 #include "switch.h"
28 #include "tag.h"
29 #include "user.h"
30
31 #define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
32
33 static DEFINE_MUTEX(dsa2_mutex);
34 LIST_HEAD(dsa_tree_list);
35
36 static struct workqueue_struct *dsa_owq;
37
38 /* Track the bridges with forwarding offload enabled */
39 static unsigned long dsa_fwd_offloading_bridges;
40
dsa_schedule_work(struct work_struct * work)41 bool dsa_schedule_work(struct work_struct *work)
42 {
43 return queue_work(dsa_owq, work);
44 }
45
dsa_flush_workqueue(void)46 void dsa_flush_workqueue(void)
47 {
48 flush_workqueue(dsa_owq);
49 }
50 EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
51
52 /**
53 * dsa_lag_map() - Map LAG structure to a linear LAG array
54 * @dst: Tree in which to record the mapping.
55 * @lag: LAG structure that is to be mapped to the tree's array.
56 *
57 * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
58 * two spaces. The size of the mapping space is determined by the
59 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
60 * it unset if it is not needed, in which case these functions become
61 * no-ops.
62 */
dsa_lag_map(struct dsa_switch_tree * dst,struct dsa_lag * lag)63 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
64 {
65 unsigned int id;
66
67 for (id = 1; id <= dst->lags_len; id++) {
68 if (!dsa_lag_by_id(dst, id)) {
69 dst->lags[id - 1] = lag;
70 lag->id = id;
71 return;
72 }
73 }
74
75 /* No IDs left, which is OK. Some drivers do not need it. The
76 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
77 * returns an error for this device when joining the LAG. The
78 * driver can then return -EOPNOTSUPP back to DSA, which will
79 * fall back to a software LAG.
80 */
81 }
82
83 /**
84 * dsa_lag_unmap() - Remove a LAG ID mapping
85 * @dst: Tree in which the mapping is recorded.
86 * @lag: LAG structure that was mapped.
87 *
88 * As there may be multiple users of the mapping, it is only removed
89 * if there are no other references to it.
90 */
dsa_lag_unmap(struct dsa_switch_tree * dst,struct dsa_lag * lag)91 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
92 {
93 unsigned int id;
94
95 dsa_lags_foreach_id(id, dst) {
96 if (dsa_lag_by_id(dst, id) == lag) {
97 dst->lags[id - 1] = NULL;
98 lag->id = 0;
99 break;
100 }
101 }
102 }
103
dsa_tree_lag_find(struct dsa_switch_tree * dst,const struct net_device * lag_dev)104 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
105 const struct net_device *lag_dev)
106 {
107 struct dsa_port *dp;
108
109 list_for_each_entry(dp, &dst->ports, list)
110 if (dsa_port_lag_dev_get(dp) == lag_dev)
111 return dp->lag;
112
113 return NULL;
114 }
115
dsa_tree_bridge_find(struct dsa_switch_tree * dst,const struct net_device * br)116 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
117 const struct net_device *br)
118 {
119 struct dsa_port *dp;
120
121 list_for_each_entry(dp, &dst->ports, list)
122 if (dsa_port_bridge_dev_get(dp) == br)
123 return dp->bridge;
124
125 return NULL;
126 }
127
dsa_bridge_num_find(const struct net_device * bridge_dev)128 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
129 {
130 struct dsa_switch_tree *dst;
131
132 list_for_each_entry(dst, &dsa_tree_list, list) {
133 struct dsa_bridge *bridge;
134
135 bridge = dsa_tree_bridge_find(dst, bridge_dev);
136 if (bridge)
137 return bridge->num;
138 }
139
140 return 0;
141 }
142
dsa_bridge_num_get(const struct net_device * bridge_dev,int max)143 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
144 {
145 unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
146
147 /* Switches without FDB isolation support don't get unique
148 * bridge numbering
149 */
150 if (!max)
151 return 0;
152
153 if (!bridge_num) {
154 /* First port that requests FDB isolation or TX forwarding
155 * offload for this bridge
156 */
157 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
158 DSA_MAX_NUM_OFFLOADING_BRIDGES,
159 1);
160 if (bridge_num >= max)
161 return 0;
162
163 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
164 }
165
166 return bridge_num;
167 }
168
dsa_bridge_num_put(const struct net_device * bridge_dev,unsigned int bridge_num)169 void dsa_bridge_num_put(const struct net_device *bridge_dev,
170 unsigned int bridge_num)
171 {
172 /* Since we refcount bridges, we know that when we call this function
173 * it is no longer in use, so we can just go ahead and remove it from
174 * the bit mask.
175 */
176 clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
177 }
178
dsa_switch_find(int tree_index,int sw_index)179 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
180 {
181 struct dsa_switch_tree *dst;
182 struct dsa_port *dp;
183
184 list_for_each_entry(dst, &dsa_tree_list, list) {
185 if (dst->index != tree_index)
186 continue;
187
188 list_for_each_entry(dp, &dst->ports, list) {
189 if (dp->ds->index != sw_index)
190 continue;
191
192 return dp->ds;
193 }
194 }
195
196 return NULL;
197 }
198 EXPORT_SYMBOL_GPL(dsa_switch_find);
199
dsa_tree_find(int index)200 static struct dsa_switch_tree *dsa_tree_find(int index)
201 {
202 struct dsa_switch_tree *dst;
203
204 list_for_each_entry(dst, &dsa_tree_list, list)
205 if (dst->index == index)
206 return dst;
207
208 return NULL;
209 }
210
dsa_tree_alloc(int index)211 static struct dsa_switch_tree *dsa_tree_alloc(int index)
212 {
213 struct dsa_switch_tree *dst;
214
215 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
216 if (!dst)
217 return NULL;
218
219 dst->index = index;
220
221 INIT_LIST_HEAD(&dst->rtable);
222
223 INIT_LIST_HEAD(&dst->ports);
224
225 INIT_LIST_HEAD(&dst->list);
226 list_add_tail(&dst->list, &dsa_tree_list);
227
228 kref_init(&dst->refcount);
229
230 return dst;
231 }
232
dsa_tree_free(struct dsa_switch_tree * dst)233 static void dsa_tree_free(struct dsa_switch_tree *dst)
234 {
235 if (dst->tag_ops)
236 dsa_tag_driver_put(dst->tag_ops);
237 list_del(&dst->list);
238 kfree(dst);
239 }
240
dsa_tree_get(struct dsa_switch_tree * dst)241 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
242 {
243 if (dst)
244 kref_get(&dst->refcount);
245
246 return dst;
247 }
248
dsa_tree_touch(int index)249 static struct dsa_switch_tree *dsa_tree_touch(int index)
250 {
251 struct dsa_switch_tree *dst;
252
253 dst = dsa_tree_find(index);
254 if (dst)
255 return dsa_tree_get(dst);
256 else
257 return dsa_tree_alloc(index);
258 }
259
dsa_tree_release(struct kref * ref)260 static void dsa_tree_release(struct kref *ref)
261 {
262 struct dsa_switch_tree *dst;
263
264 dst = container_of(ref, struct dsa_switch_tree, refcount);
265
266 dsa_tree_free(dst);
267 }
268
dsa_tree_put(struct dsa_switch_tree * dst)269 static void dsa_tree_put(struct dsa_switch_tree *dst)
270 {
271 if (dst)
272 kref_put(&dst->refcount, dsa_tree_release);
273 }
274
dsa_tree_find_port_by_node(struct dsa_switch_tree * dst,struct device_node * dn)275 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
276 struct device_node *dn)
277 {
278 struct dsa_port *dp;
279
280 list_for_each_entry(dp, &dst->ports, list)
281 if (dp->dn == dn)
282 return dp;
283
284 return NULL;
285 }
286
dsa_link_touch(struct dsa_port * dp,struct dsa_port * link_dp)287 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
288 struct dsa_port *link_dp)
289 {
290 struct dsa_switch *ds = dp->ds;
291 struct dsa_switch_tree *dst;
292 struct dsa_link *dl;
293
294 dst = ds->dst;
295
296 list_for_each_entry(dl, &dst->rtable, list)
297 if (dl->dp == dp && dl->link_dp == link_dp)
298 return dl;
299
300 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
301 if (!dl)
302 return NULL;
303
304 dl->dp = dp;
305 dl->link_dp = link_dp;
306
307 INIT_LIST_HEAD(&dl->list);
308 list_add_tail(&dl->list, &dst->rtable);
309
310 return dl;
311 }
312
dsa_port_setup_routing_table(struct dsa_port * dp)313 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
314 {
315 struct dsa_switch *ds = dp->ds;
316 struct dsa_switch_tree *dst = ds->dst;
317 struct device_node *dn = dp->dn;
318 struct of_phandle_iterator it;
319 struct dsa_port *link_dp;
320 struct dsa_link *dl;
321 int err;
322
323 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
324 link_dp = dsa_tree_find_port_by_node(dst, it.node);
325 if (!link_dp) {
326 of_node_put(it.node);
327 return false;
328 }
329
330 dl = dsa_link_touch(dp, link_dp);
331 if (!dl) {
332 of_node_put(it.node);
333 return false;
334 }
335 }
336
337 return true;
338 }
339
dsa_tree_setup_routing_table(struct dsa_switch_tree * dst)340 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
341 {
342 bool complete = true;
343 struct dsa_port *dp;
344
345 list_for_each_entry(dp, &dst->ports, list) {
346 if (dsa_port_is_dsa(dp)) {
347 complete = dsa_port_setup_routing_table(dp);
348 if (!complete)
349 break;
350 }
351 }
352
353 return complete;
354 }
355
dsa_tree_find_first_cpu(struct dsa_switch_tree * dst)356 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
357 {
358 struct dsa_port *dp;
359
360 list_for_each_entry(dp, &dst->ports, list)
361 if (dsa_port_is_cpu(dp))
362 return dp;
363
364 return NULL;
365 }
366
dsa_tree_find_first_conduit(struct dsa_switch_tree * dst)367 struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst)
368 {
369 struct device_node *ethernet;
370 struct net_device *conduit;
371 struct dsa_port *cpu_dp;
372
373 cpu_dp = dsa_tree_find_first_cpu(dst);
374 ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
375 conduit = of_find_net_device_by_node(ethernet);
376 of_node_put(ethernet);
377
378 return conduit;
379 }
380
381 /* Assign the default CPU port (the first one in the tree) to all ports of the
382 * fabric which don't already have one as part of their own switch.
383 */
dsa_tree_setup_default_cpu(struct dsa_switch_tree * dst)384 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
385 {
386 struct dsa_port *cpu_dp, *dp;
387
388 cpu_dp = dsa_tree_find_first_cpu(dst);
389 if (!cpu_dp) {
390 pr_err("DSA: tree %d has no CPU port\n", dst->index);
391 return -EINVAL;
392 }
393
394 list_for_each_entry(dp, &dst->ports, list) {
395 if (dp->cpu_dp)
396 continue;
397
398 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
399 dp->cpu_dp = cpu_dp;
400 }
401
402 return 0;
403 }
404
405 static struct dsa_port *
dsa_switch_preferred_default_local_cpu_port(struct dsa_switch * ds)406 dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds)
407 {
408 struct dsa_port *cpu_dp;
409
410 if (!ds->ops->preferred_default_local_cpu_port)
411 return NULL;
412
413 cpu_dp = ds->ops->preferred_default_local_cpu_port(ds);
414 if (!cpu_dp)
415 return NULL;
416
417 if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds))
418 return NULL;
419
420 return cpu_dp;
421 }
422
423 /* Perform initial assignment of CPU ports to user ports and DSA links in the
424 * fabric, giving preference to CPU ports local to each switch. Default to
425 * using the first CPU port in the switch tree if the port does not have a CPU
426 * port local to this switch.
427 */
dsa_tree_setup_cpu_ports(struct dsa_switch_tree * dst)428 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
429 {
430 struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
431
432 list_for_each_entry(cpu_dp, &dst->ports, list) {
433 if (!dsa_port_is_cpu(cpu_dp))
434 continue;
435
436 preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
437 if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
438 continue;
439
440 /* Prefer a local CPU port */
441 dsa_switch_for_each_port(dp, cpu_dp->ds) {
442 /* Prefer the first local CPU port found */
443 if (dp->cpu_dp)
444 continue;
445
446 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
447 dp->cpu_dp = cpu_dp;
448 }
449 }
450
451 return dsa_tree_setup_default_cpu(dst);
452 }
453
dsa_tree_teardown_cpu_ports(struct dsa_switch_tree * dst)454 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
455 {
456 struct dsa_port *dp;
457
458 list_for_each_entry(dp, &dst->ports, list)
459 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
460 dp->cpu_dp = NULL;
461 }
462
dsa_port_setup(struct dsa_port * dp)463 static int dsa_port_setup(struct dsa_port *dp)
464 {
465 bool dsa_port_link_registered = false;
466 struct dsa_switch *ds = dp->ds;
467 bool dsa_port_enabled = false;
468 int err = 0;
469
470 if (dp->setup)
471 return 0;
472
473 err = dsa_port_devlink_setup(dp);
474 if (err)
475 return err;
476
477 switch (dp->type) {
478 case DSA_PORT_TYPE_UNUSED:
479 dsa_port_disable(dp);
480 break;
481 case DSA_PORT_TYPE_CPU:
482 if (dp->dn) {
483 err = dsa_shared_port_link_register_of(dp);
484 if (err)
485 break;
486 dsa_port_link_registered = true;
487 } else {
488 dev_warn(ds->dev,
489 "skipping link registration for CPU port %d\n",
490 dp->index);
491 }
492
493 err = dsa_port_enable(dp, NULL);
494 if (err)
495 break;
496 dsa_port_enabled = true;
497
498 break;
499 case DSA_PORT_TYPE_DSA:
500 if (dp->dn) {
501 err = dsa_shared_port_link_register_of(dp);
502 if (err)
503 break;
504 dsa_port_link_registered = true;
505 } else {
506 dev_warn(ds->dev,
507 "skipping link registration for DSA port %d\n",
508 dp->index);
509 }
510
511 err = dsa_port_enable(dp, NULL);
512 if (err)
513 break;
514 dsa_port_enabled = true;
515
516 break;
517 case DSA_PORT_TYPE_USER:
518 of_get_mac_address(dp->dn, dp->mac);
519 err = dsa_user_create(dp);
520 break;
521 }
522
523 if (err && dsa_port_enabled)
524 dsa_port_disable(dp);
525 if (err && dsa_port_link_registered)
526 dsa_shared_port_link_unregister_of(dp);
527 if (err) {
528 dsa_port_devlink_teardown(dp);
529 return err;
530 }
531
532 dp->setup = true;
533
534 return 0;
535 }
536
dsa_port_teardown(struct dsa_port * dp)537 static void dsa_port_teardown(struct dsa_port *dp)
538 {
539 if (!dp->setup)
540 return;
541
542 switch (dp->type) {
543 case DSA_PORT_TYPE_UNUSED:
544 break;
545 case DSA_PORT_TYPE_CPU:
546 dsa_port_disable(dp);
547 if (dp->dn)
548 dsa_shared_port_link_unregister_of(dp);
549 break;
550 case DSA_PORT_TYPE_DSA:
551 dsa_port_disable(dp);
552 if (dp->dn)
553 dsa_shared_port_link_unregister_of(dp);
554 break;
555 case DSA_PORT_TYPE_USER:
556 if (dp->user) {
557 dsa_user_destroy(dp->user);
558 dp->user = NULL;
559 }
560 break;
561 }
562
563 dsa_port_devlink_teardown(dp);
564
565 dp->setup = false;
566 }
567
dsa_port_setup_as_unused(struct dsa_port * dp)568 static int dsa_port_setup_as_unused(struct dsa_port *dp)
569 {
570 dp->type = DSA_PORT_TYPE_UNUSED;
571 return dsa_port_setup(dp);
572 }
573
dsa_switch_setup_tag_protocol(struct dsa_switch * ds)574 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
575 {
576 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
577 struct dsa_switch_tree *dst = ds->dst;
578 int err;
579
580 if (tag_ops->proto == dst->default_proto)
581 goto connect;
582
583 rtnl_lock();
584 err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
585 rtnl_unlock();
586 if (err) {
587 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
588 tag_ops->name, ERR_PTR(err));
589 return err;
590 }
591
592 connect:
593 if (tag_ops->connect) {
594 err = tag_ops->connect(ds);
595 if (err)
596 return err;
597 }
598
599 if (ds->ops->connect_tag_protocol) {
600 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
601 if (err) {
602 dev_err(ds->dev,
603 "Unable to connect to tag protocol \"%s\": %pe\n",
604 tag_ops->name, ERR_PTR(err));
605 goto disconnect;
606 }
607 }
608
609 return 0;
610
611 disconnect:
612 if (tag_ops->disconnect)
613 tag_ops->disconnect(ds);
614
615 return err;
616 }
617
dsa_switch_teardown_tag_protocol(struct dsa_switch * ds)618 static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
619 {
620 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
621
622 if (tag_ops->disconnect)
623 tag_ops->disconnect(ds);
624 }
625
dsa_switch_setup(struct dsa_switch * ds)626 static int dsa_switch_setup(struct dsa_switch *ds)
627 {
628 int err;
629
630 if (ds->setup)
631 return 0;
632
633 /* Initialize ds->phys_mii_mask before registering the user MDIO bus
634 * driver and before ops->setup() has run, since the switch drivers and
635 * the user MDIO bus driver rely on these values for probing PHY
636 * devices or not
637 */
638 ds->phys_mii_mask |= dsa_user_ports(ds);
639
640 err = dsa_switch_devlink_alloc(ds);
641 if (err)
642 return err;
643
644 err = dsa_switch_register_notifier(ds);
645 if (err)
646 goto devlink_free;
647
648 ds->configure_vlan_while_not_filtering = true;
649
650 err = ds->ops->setup(ds);
651 if (err < 0)
652 goto unregister_notifier;
653
654 err = dsa_switch_setup_tag_protocol(ds);
655 if (err)
656 goto teardown;
657
658 if (!ds->user_mii_bus && ds->ops->phy_read) {
659 ds->user_mii_bus = mdiobus_alloc();
660 if (!ds->user_mii_bus) {
661 err = -ENOMEM;
662 goto teardown;
663 }
664
665 dsa_user_mii_bus_init(ds);
666
667 err = mdiobus_register(ds->user_mii_bus);
668 if (err < 0)
669 goto free_user_mii_bus;
670 }
671
672 dsa_switch_devlink_register(ds);
673
674 ds->setup = true;
675 return 0;
676
677 free_user_mii_bus:
678 if (ds->user_mii_bus && ds->ops->phy_read)
679 mdiobus_free(ds->user_mii_bus);
680 teardown:
681 if (ds->ops->teardown)
682 ds->ops->teardown(ds);
683 unregister_notifier:
684 dsa_switch_unregister_notifier(ds);
685 devlink_free:
686 dsa_switch_devlink_free(ds);
687 return err;
688 }
689
dsa_switch_teardown(struct dsa_switch * ds)690 static void dsa_switch_teardown(struct dsa_switch *ds)
691 {
692 if (!ds->setup)
693 return;
694
695 dsa_switch_devlink_unregister(ds);
696
697 if (ds->user_mii_bus && ds->ops->phy_read) {
698 mdiobus_unregister(ds->user_mii_bus);
699 mdiobus_free(ds->user_mii_bus);
700 ds->user_mii_bus = NULL;
701 }
702
703 dsa_switch_teardown_tag_protocol(ds);
704
705 if (ds->ops->teardown)
706 ds->ops->teardown(ds);
707
708 dsa_switch_unregister_notifier(ds);
709
710 dsa_switch_devlink_free(ds);
711
712 ds->setup = false;
713 }
714
715 /* First tear down the non-shared, then the shared ports. This ensures that
716 * all work items scheduled by our switchdev handlers for user ports have
717 * completed before we destroy the refcounting kept on the shared ports.
718 */
dsa_tree_teardown_ports(struct dsa_switch_tree * dst)719 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
720 {
721 struct dsa_port *dp;
722
723 list_for_each_entry(dp, &dst->ports, list)
724 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
725 dsa_port_teardown(dp);
726
727 dsa_flush_workqueue();
728
729 list_for_each_entry(dp, &dst->ports, list)
730 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
731 dsa_port_teardown(dp);
732 }
733
dsa_tree_teardown_switches(struct dsa_switch_tree * dst)734 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
735 {
736 struct dsa_port *dp;
737
738 list_for_each_entry(dp, &dst->ports, list)
739 dsa_switch_teardown(dp->ds);
740 }
741
742 /* Bring shared ports up first, then non-shared ports */
dsa_tree_setup_ports(struct dsa_switch_tree * dst)743 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
744 {
745 struct dsa_port *dp;
746 int err = 0;
747
748 list_for_each_entry(dp, &dst->ports, list) {
749 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
750 err = dsa_port_setup(dp);
751 if (err)
752 goto teardown;
753 }
754 }
755
756 list_for_each_entry(dp, &dst->ports, list) {
757 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
758 err = dsa_port_setup(dp);
759 if (err) {
760 err = dsa_port_setup_as_unused(dp);
761 if (err)
762 goto teardown;
763 }
764 }
765 }
766
767 return 0;
768
769 teardown:
770 dsa_tree_teardown_ports(dst);
771
772 return err;
773 }
774
dsa_tree_setup_switches(struct dsa_switch_tree * dst)775 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
776 {
777 struct dsa_port *dp;
778 int err = 0;
779
780 list_for_each_entry(dp, &dst->ports, list) {
781 err = dsa_switch_setup(dp->ds);
782 if (err) {
783 dsa_tree_teardown_switches(dst);
784 break;
785 }
786 }
787
788 return err;
789 }
790
dsa_tree_setup_conduit(struct dsa_switch_tree * dst)791 static int dsa_tree_setup_conduit(struct dsa_switch_tree *dst)
792 {
793 struct dsa_port *cpu_dp;
794 int err = 0;
795
796 rtnl_lock();
797
798 dsa_tree_for_each_cpu_port(cpu_dp, dst) {
799 struct net_device *conduit = cpu_dp->conduit;
800 bool admin_up = (conduit->flags & IFF_UP) &&
801 !qdisc_tx_is_noop(conduit);
802
803 err = dsa_conduit_setup(conduit, cpu_dp);
804 if (err)
805 break;
806
807 /* Replay conduit state event */
808 dsa_tree_conduit_admin_state_change(dst, conduit, admin_up);
809 dsa_tree_conduit_oper_state_change(dst, conduit,
810 netif_oper_up(conduit));
811 }
812
813 rtnl_unlock();
814
815 return err;
816 }
817
dsa_tree_teardown_conduit(struct dsa_switch_tree * dst)818 static void dsa_tree_teardown_conduit(struct dsa_switch_tree *dst)
819 {
820 struct dsa_port *cpu_dp;
821
822 rtnl_lock();
823
824 dsa_tree_for_each_cpu_port(cpu_dp, dst) {
825 struct net_device *conduit = cpu_dp->conduit;
826
827 /* Synthesizing an "admin down" state is sufficient for
828 * the switches to get a notification if the conduit is
829 * currently up and running.
830 */
831 dsa_tree_conduit_admin_state_change(dst, conduit, false);
832
833 dsa_conduit_teardown(conduit);
834 }
835
836 rtnl_unlock();
837 }
838
dsa_tree_setup_lags(struct dsa_switch_tree * dst)839 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
840 {
841 unsigned int len = 0;
842 struct dsa_port *dp;
843
844 list_for_each_entry(dp, &dst->ports, list) {
845 if (dp->ds->num_lag_ids > len)
846 len = dp->ds->num_lag_ids;
847 }
848
849 if (!len)
850 return 0;
851
852 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
853 if (!dst->lags)
854 return -ENOMEM;
855
856 dst->lags_len = len;
857 return 0;
858 }
859
dsa_tree_teardown_lags(struct dsa_switch_tree * dst)860 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
861 {
862 kfree(dst->lags);
863 }
864
dsa_tree_setup(struct dsa_switch_tree * dst)865 static int dsa_tree_setup(struct dsa_switch_tree *dst)
866 {
867 bool complete;
868 int err;
869
870 if (dst->setup) {
871 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
872 dst->index);
873 return -EEXIST;
874 }
875
876 complete = dsa_tree_setup_routing_table(dst);
877 if (!complete)
878 return 0;
879
880 err = dsa_tree_setup_cpu_ports(dst);
881 if (err)
882 return err;
883
884 err = dsa_tree_setup_switches(dst);
885 if (err)
886 goto teardown_cpu_ports;
887
888 err = dsa_tree_setup_ports(dst);
889 if (err)
890 goto teardown_switches;
891
892 err = dsa_tree_setup_conduit(dst);
893 if (err)
894 goto teardown_ports;
895
896 err = dsa_tree_setup_lags(dst);
897 if (err)
898 goto teardown_conduit;
899
900 dst->setup = true;
901
902 pr_info("DSA: tree %d setup\n", dst->index);
903
904 return 0;
905
906 teardown_conduit:
907 dsa_tree_teardown_conduit(dst);
908 teardown_ports:
909 dsa_tree_teardown_ports(dst);
910 teardown_switches:
911 dsa_tree_teardown_switches(dst);
912 teardown_cpu_ports:
913 dsa_tree_teardown_cpu_ports(dst);
914
915 return err;
916 }
917
dsa_tree_teardown(struct dsa_switch_tree * dst)918 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
919 {
920 struct dsa_link *dl, *next;
921
922 if (!dst->setup)
923 return;
924
925 dsa_tree_teardown_lags(dst);
926
927 dsa_tree_teardown_conduit(dst);
928
929 dsa_tree_teardown_ports(dst);
930
931 dsa_tree_teardown_switches(dst);
932
933 dsa_tree_teardown_cpu_ports(dst);
934
935 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
936 list_del(&dl->list);
937 kfree(dl);
938 }
939
940 pr_info("DSA: tree %d torn down\n", dst->index);
941
942 dst->setup = false;
943 }
944
dsa_tree_bind_tag_proto(struct dsa_switch_tree * dst,const struct dsa_device_ops * tag_ops)945 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
946 const struct dsa_device_ops *tag_ops)
947 {
948 const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
949 struct dsa_notifier_tag_proto_info info;
950 int err;
951
952 dst->tag_ops = tag_ops;
953
954 /* Notify the switches from this tree about the connection
955 * to the new tagger
956 */
957 info.tag_ops = tag_ops;
958 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
959 if (err && err != -EOPNOTSUPP)
960 goto out_disconnect;
961
962 /* Notify the old tagger about the disconnection from this tree */
963 info.tag_ops = old_tag_ops;
964 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
965
966 return 0;
967
968 out_disconnect:
969 info.tag_ops = tag_ops;
970 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
971 dst->tag_ops = old_tag_ops;
972
973 return err;
974 }
975
976 /* Since the dsa/tagging sysfs device attribute is per conduit, the assumption
977 * is that all DSA switches within a tree share the same tagger, otherwise
978 * they would have formed disjoint trees (different "dsa,member" values).
979 */
dsa_tree_change_tag_proto(struct dsa_switch_tree * dst,const struct dsa_device_ops * tag_ops,const struct dsa_device_ops * old_tag_ops)980 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
981 const struct dsa_device_ops *tag_ops,
982 const struct dsa_device_ops *old_tag_ops)
983 {
984 struct dsa_notifier_tag_proto_info info;
985 struct dsa_port *dp;
986 int err = -EBUSY;
987
988 if (!rtnl_trylock())
989 return restart_syscall();
990
991 /* At the moment we don't allow changing the tag protocol under
992 * traffic. The rtnl_mutex also happens to serialize concurrent
993 * attempts to change the tagging protocol. If we ever lift the IFF_UP
994 * restriction, there needs to be another mutex which serializes this.
995 */
996 dsa_tree_for_each_user_port(dp, dst) {
997 if (dsa_port_to_conduit(dp)->flags & IFF_UP)
998 goto out_unlock;
999
1000 if (dp->user->flags & IFF_UP)
1001 goto out_unlock;
1002 }
1003
1004 /* Notify the tag protocol change */
1005 info.tag_ops = tag_ops;
1006 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1007 if (err)
1008 goto out_unwind_tagger;
1009
1010 err = dsa_tree_bind_tag_proto(dst, tag_ops);
1011 if (err)
1012 goto out_unwind_tagger;
1013
1014 rtnl_unlock();
1015
1016 return 0;
1017
1018 out_unwind_tagger:
1019 info.tag_ops = old_tag_ops;
1020 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1021 out_unlock:
1022 rtnl_unlock();
1023 return err;
1024 }
1025
dsa_tree_conduit_state_change(struct dsa_switch_tree * dst,struct net_device * conduit)1026 static void dsa_tree_conduit_state_change(struct dsa_switch_tree *dst,
1027 struct net_device *conduit)
1028 {
1029 struct dsa_notifier_conduit_state_info info;
1030 struct dsa_port *cpu_dp = conduit->dsa_ptr;
1031
1032 info.conduit = conduit;
1033 info.operational = dsa_port_conduit_is_operational(cpu_dp);
1034
1035 dsa_tree_notify(dst, DSA_NOTIFIER_CONDUIT_STATE_CHANGE, &info);
1036 }
1037
dsa_tree_conduit_admin_state_change(struct dsa_switch_tree * dst,struct net_device * conduit,bool up)1038 void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst,
1039 struct net_device *conduit,
1040 bool up)
1041 {
1042 struct dsa_port *cpu_dp = conduit->dsa_ptr;
1043 bool notify = false;
1044
1045 /* Don't keep track of admin state on LAG DSA conduits,
1046 * but rather just of physical DSA conduits
1047 */
1048 if (netif_is_lag_master(conduit))
1049 return;
1050
1051 if ((dsa_port_conduit_is_operational(cpu_dp)) !=
1052 (up && cpu_dp->conduit_oper_up))
1053 notify = true;
1054
1055 cpu_dp->conduit_admin_up = up;
1056
1057 if (notify)
1058 dsa_tree_conduit_state_change(dst, conduit);
1059 }
1060
dsa_tree_conduit_oper_state_change(struct dsa_switch_tree * dst,struct net_device * conduit,bool up)1061 void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst,
1062 struct net_device *conduit,
1063 bool up)
1064 {
1065 struct dsa_port *cpu_dp = conduit->dsa_ptr;
1066 bool notify = false;
1067
1068 /* Don't keep track of oper state on LAG DSA conduits,
1069 * but rather just of physical DSA conduits
1070 */
1071 if (netif_is_lag_master(conduit))
1072 return;
1073
1074 if ((dsa_port_conduit_is_operational(cpu_dp)) !=
1075 (cpu_dp->conduit_admin_up && up))
1076 notify = true;
1077
1078 cpu_dp->conduit_oper_up = up;
1079
1080 if (notify)
1081 dsa_tree_conduit_state_change(dst, conduit);
1082 }
1083
dsa_port_touch(struct dsa_switch * ds,int index)1084 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1085 {
1086 struct dsa_switch_tree *dst = ds->dst;
1087 struct dsa_port *dp;
1088
1089 dsa_switch_for_each_port(dp, ds)
1090 if (dp->index == index)
1091 return dp;
1092
1093 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1094 if (!dp)
1095 return NULL;
1096
1097 dp->ds = ds;
1098 dp->index = index;
1099
1100 mutex_init(&dp->addr_lists_lock);
1101 mutex_init(&dp->vlans_lock);
1102 INIT_LIST_HEAD(&dp->fdbs);
1103 INIT_LIST_HEAD(&dp->mdbs);
1104 INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */
1105 INIT_LIST_HEAD(&dp->list);
1106 list_add_tail(&dp->list, &dst->ports);
1107
1108 return dp;
1109 }
1110
dsa_port_parse_user(struct dsa_port * dp,const char * name)1111 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1112 {
1113 dp->type = DSA_PORT_TYPE_USER;
1114 dp->name = name;
1115
1116 return 0;
1117 }
1118
dsa_port_parse_dsa(struct dsa_port * dp)1119 static int dsa_port_parse_dsa(struct dsa_port *dp)
1120 {
1121 dp->type = DSA_PORT_TYPE_DSA;
1122
1123 return 0;
1124 }
1125
dsa_get_tag_protocol(struct dsa_port * dp,struct net_device * conduit)1126 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1127 struct net_device *conduit)
1128 {
1129 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1130 struct dsa_switch *mds, *ds = dp->ds;
1131 unsigned int mdp_upstream;
1132 struct dsa_port *mdp;
1133
1134 /* It is possible to stack DSA switches onto one another when that
1135 * happens the switch driver may want to know if its tagging protocol
1136 * is going to work in such a configuration.
1137 */
1138 if (dsa_user_dev_check(conduit)) {
1139 mdp = dsa_user_to_port(conduit);
1140 mds = mdp->ds;
1141 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1142 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1143 DSA_TAG_PROTO_NONE);
1144 }
1145
1146 /* If the conduit device is not itself a DSA user in a disjoint DSA
1147 * tree, then return immediately.
1148 */
1149 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1150 }
1151
dsa_port_parse_cpu(struct dsa_port * dp,struct net_device * conduit,const char * user_protocol)1152 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit,
1153 const char *user_protocol)
1154 {
1155 const struct dsa_device_ops *tag_ops = NULL;
1156 struct dsa_switch *ds = dp->ds;
1157 struct dsa_switch_tree *dst = ds->dst;
1158 enum dsa_tag_protocol default_proto;
1159
1160 /* Find out which protocol the switch would prefer. */
1161 default_proto = dsa_get_tag_protocol(dp, conduit);
1162 if (dst->default_proto) {
1163 if (dst->default_proto != default_proto) {
1164 dev_err(ds->dev,
1165 "A DSA switch tree can have only one tagging protocol\n");
1166 return -EINVAL;
1167 }
1168 } else {
1169 dst->default_proto = default_proto;
1170 }
1171
1172 /* See if the user wants to override that preference. */
1173 if (user_protocol) {
1174 if (!ds->ops->change_tag_protocol) {
1175 dev_err(ds->dev, "Tag protocol cannot be modified\n");
1176 return -EINVAL;
1177 }
1178
1179 tag_ops = dsa_tag_driver_get_by_name(user_protocol);
1180 if (IS_ERR(tag_ops)) {
1181 dev_warn(ds->dev,
1182 "Failed to find a tagging driver for protocol %s, using default\n",
1183 user_protocol);
1184 tag_ops = NULL;
1185 }
1186 }
1187
1188 if (!tag_ops)
1189 tag_ops = dsa_tag_driver_get_by_id(default_proto);
1190
1191 if (IS_ERR(tag_ops)) {
1192 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1193 return -EPROBE_DEFER;
1194
1195 dev_warn(ds->dev, "No tagger for this switch\n");
1196 return PTR_ERR(tag_ops);
1197 }
1198
1199 if (dst->tag_ops) {
1200 if (dst->tag_ops != tag_ops) {
1201 dev_err(ds->dev,
1202 "A DSA switch tree can have only one tagging protocol\n");
1203
1204 dsa_tag_driver_put(tag_ops);
1205 return -EINVAL;
1206 }
1207
1208 /* In the case of multiple CPU ports per switch, the tagging
1209 * protocol is still reference-counted only per switch tree.
1210 */
1211 dsa_tag_driver_put(tag_ops);
1212 } else {
1213 dst->tag_ops = tag_ops;
1214 }
1215
1216 dp->conduit = conduit;
1217 dp->type = DSA_PORT_TYPE_CPU;
1218 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1219 dp->dst = dst;
1220
1221 /* At this point, the tree may be configured to use a different
1222 * tagger than the one chosen by the switch driver during
1223 * .setup, in the case when a user selects a custom protocol
1224 * through the DT.
1225 *
1226 * This is resolved by syncing the driver with the tree in
1227 * dsa_switch_setup_tag_protocol once .setup has run and the
1228 * driver is ready to accept calls to .change_tag_protocol. If
1229 * the driver does not support the custom protocol at that
1230 * point, the tree is wholly rejected, thereby ensuring that the
1231 * tree and driver are always in agreement on the protocol to
1232 * use.
1233 */
1234 return 0;
1235 }
1236
dsa_port_parse_of(struct dsa_port * dp,struct device_node * dn)1237 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1238 {
1239 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1240 const char *name = of_get_property(dn, "label", NULL);
1241 bool link = of_property_read_bool(dn, "link");
1242
1243 dp->dn = dn;
1244
1245 if (ethernet) {
1246 struct net_device *conduit;
1247 const char *user_protocol;
1248
1249 conduit = of_find_net_device_by_node(ethernet);
1250 of_node_put(ethernet);
1251 if (!conduit)
1252 return -EPROBE_DEFER;
1253
1254 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1255 return dsa_port_parse_cpu(dp, conduit, user_protocol);
1256 }
1257
1258 if (link)
1259 return dsa_port_parse_dsa(dp);
1260
1261 return dsa_port_parse_user(dp, name);
1262 }
1263
dsa_switch_parse_ports_of(struct dsa_switch * ds,struct device_node * dn)1264 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1265 struct device_node *dn)
1266 {
1267 struct device_node *ports, *port;
1268 struct dsa_port *dp;
1269 int err = 0;
1270 u32 reg;
1271
1272 ports = of_get_child_by_name(dn, "ports");
1273 if (!ports) {
1274 /* The second possibility is "ethernet-ports" */
1275 ports = of_get_child_by_name(dn, "ethernet-ports");
1276 if (!ports) {
1277 dev_err(ds->dev, "no ports child node found\n");
1278 return -EINVAL;
1279 }
1280 }
1281
1282 for_each_available_child_of_node(ports, port) {
1283 err = of_property_read_u32(port, "reg", ®);
1284 if (err) {
1285 of_node_put(port);
1286 goto out_put_node;
1287 }
1288
1289 if (reg >= ds->num_ports) {
1290 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1291 port, reg, ds->num_ports);
1292 of_node_put(port);
1293 err = -EINVAL;
1294 goto out_put_node;
1295 }
1296
1297 dp = dsa_to_port(ds, reg);
1298
1299 err = dsa_port_parse_of(dp, port);
1300 if (err) {
1301 of_node_put(port);
1302 goto out_put_node;
1303 }
1304 }
1305
1306 out_put_node:
1307 of_node_put(ports);
1308 return err;
1309 }
1310
dsa_switch_parse_member_of(struct dsa_switch * ds,struct device_node * dn)1311 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1312 struct device_node *dn)
1313 {
1314 u32 m[2] = { 0, 0 };
1315 int sz;
1316
1317 /* Don't error out if this optional property isn't found */
1318 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1319 if (sz < 0 && sz != -EINVAL)
1320 return sz;
1321
1322 ds->index = m[1];
1323
1324 ds->dst = dsa_tree_touch(m[0]);
1325 if (!ds->dst)
1326 return -ENOMEM;
1327
1328 if (dsa_switch_find(ds->dst->index, ds->index)) {
1329 dev_err(ds->dev,
1330 "A DSA switch with index %d already exists in tree %d\n",
1331 ds->index, ds->dst->index);
1332 return -EEXIST;
1333 }
1334
1335 if (ds->dst->last_switch < ds->index)
1336 ds->dst->last_switch = ds->index;
1337
1338 return 0;
1339 }
1340
dsa_switch_touch_ports(struct dsa_switch * ds)1341 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1342 {
1343 struct dsa_port *dp;
1344 int port;
1345
1346 for (port = 0; port < ds->num_ports; port++) {
1347 dp = dsa_port_touch(ds, port);
1348 if (!dp)
1349 return -ENOMEM;
1350 }
1351
1352 return 0;
1353 }
1354
dsa_switch_parse_of(struct dsa_switch * ds,struct device_node * dn)1355 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1356 {
1357 int err;
1358
1359 err = dsa_switch_parse_member_of(ds, dn);
1360 if (err)
1361 return err;
1362
1363 err = dsa_switch_touch_ports(ds);
1364 if (err)
1365 return err;
1366
1367 return dsa_switch_parse_ports_of(ds, dn);
1368 }
1369
dev_is_class(struct device * dev,void * class)1370 static int dev_is_class(struct device *dev, void *class)
1371 {
1372 if (dev->class != NULL && !strcmp(dev->class->name, class))
1373 return 1;
1374
1375 return 0;
1376 }
1377
dev_find_class(struct device * parent,char * class)1378 static struct device *dev_find_class(struct device *parent, char *class)
1379 {
1380 if (dev_is_class(parent, class)) {
1381 get_device(parent);
1382 return parent;
1383 }
1384
1385 return device_find_child(parent, class, dev_is_class);
1386 }
1387
dsa_dev_to_net_device(struct device * dev)1388 static struct net_device *dsa_dev_to_net_device(struct device *dev)
1389 {
1390 struct device *d;
1391
1392 d = dev_find_class(dev, "net");
1393 if (d != NULL) {
1394 struct net_device *nd;
1395
1396 nd = to_net_dev(d);
1397 dev_hold(nd);
1398 put_device(d);
1399
1400 return nd;
1401 }
1402
1403 return NULL;
1404 }
1405
dsa_port_parse(struct dsa_port * dp,const char * name,struct device * dev)1406 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1407 struct device *dev)
1408 {
1409 if (!strcmp(name, "cpu")) {
1410 struct net_device *conduit;
1411
1412 conduit = dsa_dev_to_net_device(dev);
1413 if (!conduit)
1414 return -EPROBE_DEFER;
1415
1416 dev_put(conduit);
1417
1418 return dsa_port_parse_cpu(dp, conduit, NULL);
1419 }
1420
1421 if (!strcmp(name, "dsa"))
1422 return dsa_port_parse_dsa(dp);
1423
1424 return dsa_port_parse_user(dp, name);
1425 }
1426
dsa_switch_parse_ports(struct dsa_switch * ds,struct dsa_chip_data * cd)1427 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1428 struct dsa_chip_data *cd)
1429 {
1430 bool valid_name_found = false;
1431 struct dsa_port *dp;
1432 struct device *dev;
1433 const char *name;
1434 unsigned int i;
1435 int err;
1436
1437 for (i = 0; i < DSA_MAX_PORTS; i++) {
1438 name = cd->port_names[i];
1439 dev = cd->netdev[i];
1440 dp = dsa_to_port(ds, i);
1441
1442 if (!name)
1443 continue;
1444
1445 err = dsa_port_parse(dp, name, dev);
1446 if (err)
1447 return err;
1448
1449 valid_name_found = true;
1450 }
1451
1452 if (!valid_name_found && i == DSA_MAX_PORTS)
1453 return -EINVAL;
1454
1455 return 0;
1456 }
1457
dsa_switch_parse(struct dsa_switch * ds,struct dsa_chip_data * cd)1458 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1459 {
1460 int err;
1461
1462 ds->cd = cd;
1463
1464 /* We don't support interconnected switches nor multiple trees via
1465 * platform data, so this is the unique switch of the tree.
1466 */
1467 ds->index = 0;
1468 ds->dst = dsa_tree_touch(0);
1469 if (!ds->dst)
1470 return -ENOMEM;
1471
1472 err = dsa_switch_touch_ports(ds);
1473 if (err)
1474 return err;
1475
1476 return dsa_switch_parse_ports(ds, cd);
1477 }
1478
dsa_switch_release_ports(struct dsa_switch * ds)1479 static void dsa_switch_release_ports(struct dsa_switch *ds)
1480 {
1481 struct dsa_port *dp, *next;
1482
1483 dsa_switch_for_each_port_safe(dp, next, ds) {
1484 WARN_ON(!list_empty(&dp->fdbs));
1485 WARN_ON(!list_empty(&dp->mdbs));
1486 WARN_ON(!list_empty(&dp->vlans));
1487 list_del(&dp->list);
1488 kfree(dp);
1489 }
1490 }
1491
dsa_switch_probe(struct dsa_switch * ds)1492 static int dsa_switch_probe(struct dsa_switch *ds)
1493 {
1494 struct dsa_switch_tree *dst;
1495 struct dsa_chip_data *pdata;
1496 struct device_node *np;
1497 int err;
1498
1499 if (!ds->dev)
1500 return -ENODEV;
1501
1502 pdata = ds->dev->platform_data;
1503 np = ds->dev->of_node;
1504
1505 if (!ds->num_ports)
1506 return -EINVAL;
1507
1508 if (ds->phylink_mac_ops) {
1509 if (ds->ops->phylink_mac_select_pcs ||
1510 ds->ops->phylink_mac_config ||
1511 ds->ops->phylink_mac_link_down ||
1512 ds->ops->phylink_mac_link_up)
1513 return -EINVAL;
1514 }
1515
1516 if (np) {
1517 err = dsa_switch_parse_of(ds, np);
1518 if (err)
1519 dsa_switch_release_ports(ds);
1520 } else if (pdata) {
1521 err = dsa_switch_parse(ds, pdata);
1522 if (err)
1523 dsa_switch_release_ports(ds);
1524 } else {
1525 err = -ENODEV;
1526 }
1527
1528 if (err)
1529 return err;
1530
1531 dst = ds->dst;
1532 dsa_tree_get(dst);
1533 err = dsa_tree_setup(dst);
1534 if (err) {
1535 dsa_switch_release_ports(ds);
1536 dsa_tree_put(dst);
1537 }
1538
1539 return err;
1540 }
1541
dsa_register_switch(struct dsa_switch * ds)1542 int dsa_register_switch(struct dsa_switch *ds)
1543 {
1544 int err;
1545
1546 mutex_lock(&dsa2_mutex);
1547 err = dsa_switch_probe(ds);
1548 dsa_tree_put(ds->dst);
1549 mutex_unlock(&dsa2_mutex);
1550
1551 return err;
1552 }
1553 EXPORT_SYMBOL_GPL(dsa_register_switch);
1554
dsa_switch_remove(struct dsa_switch * ds)1555 static void dsa_switch_remove(struct dsa_switch *ds)
1556 {
1557 struct dsa_switch_tree *dst = ds->dst;
1558
1559 dsa_tree_teardown(dst);
1560 dsa_switch_release_ports(ds);
1561 dsa_tree_put(dst);
1562 }
1563
dsa_unregister_switch(struct dsa_switch * ds)1564 void dsa_unregister_switch(struct dsa_switch *ds)
1565 {
1566 mutex_lock(&dsa2_mutex);
1567 dsa_switch_remove(ds);
1568 mutex_unlock(&dsa2_mutex);
1569 }
1570 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1571
1572 /* If the DSA conduit chooses to unregister its net_device on .shutdown, DSA is
1573 * blocking that operation from completion, due to the dev_hold taken inside
1574 * netdev_upper_dev_link. Unlink the DSA user interfaces from being uppers of
1575 * the DSA conduit, so that the system can reboot successfully.
1576 */
dsa_switch_shutdown(struct dsa_switch * ds)1577 void dsa_switch_shutdown(struct dsa_switch *ds)
1578 {
1579 struct net_device *conduit, *user_dev;
1580 LIST_HEAD(close_list);
1581 struct dsa_port *dp;
1582
1583 mutex_lock(&dsa2_mutex);
1584
1585 if (!ds->setup)
1586 goto out;
1587
1588 rtnl_lock();
1589
1590 dsa_switch_for_each_cpu_port(dp, ds)
1591 list_add(&dp->conduit->close_list, &close_list);
1592
1593 dev_close_many(&close_list, true);
1594
1595 dsa_switch_for_each_user_port(dp, ds) {
1596 conduit = dsa_port_to_conduit(dp);
1597 user_dev = dp->user;
1598
1599 netif_device_detach(user_dev);
1600 netdev_upper_dev_unlink(conduit, user_dev);
1601 }
1602
1603 /* Disconnect from further netdevice notifiers on the conduit,
1604 * since netdev_uses_dsa() will now return false.
1605 */
1606 dsa_switch_for_each_cpu_port(dp, ds)
1607 dp->conduit->dsa_ptr = NULL;
1608
1609 rtnl_unlock();
1610 out:
1611 mutex_unlock(&dsa2_mutex);
1612 }
1613 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
1614
1615 #ifdef CONFIG_PM_SLEEP
dsa_port_is_initialized(const struct dsa_port * dp)1616 static bool dsa_port_is_initialized(const struct dsa_port *dp)
1617 {
1618 return dp->type == DSA_PORT_TYPE_USER && dp->user;
1619 }
1620
dsa_switch_suspend(struct dsa_switch * ds)1621 int dsa_switch_suspend(struct dsa_switch *ds)
1622 {
1623 struct dsa_port *dp;
1624 int ret = 0;
1625
1626 /* Suspend user network devices */
1627 dsa_switch_for_each_port(dp, ds) {
1628 if (!dsa_port_is_initialized(dp))
1629 continue;
1630
1631 ret = dsa_user_suspend(dp->user);
1632 if (ret)
1633 return ret;
1634 }
1635
1636 if (ds->ops->suspend)
1637 ret = ds->ops->suspend(ds);
1638
1639 return ret;
1640 }
1641 EXPORT_SYMBOL_GPL(dsa_switch_suspend);
1642
dsa_switch_resume(struct dsa_switch * ds)1643 int dsa_switch_resume(struct dsa_switch *ds)
1644 {
1645 struct dsa_port *dp;
1646 int ret = 0;
1647
1648 if (ds->ops->resume)
1649 ret = ds->ops->resume(ds);
1650
1651 if (ret)
1652 return ret;
1653
1654 /* Resume user network devices */
1655 dsa_switch_for_each_port(dp, ds) {
1656 if (!dsa_port_is_initialized(dp))
1657 continue;
1658
1659 ret = dsa_user_resume(dp->user);
1660 if (ret)
1661 return ret;
1662 }
1663
1664 return 0;
1665 }
1666 EXPORT_SYMBOL_GPL(dsa_switch_resume);
1667 #endif
1668
dsa_port_from_netdev(struct net_device * netdev)1669 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
1670 {
1671 if (!netdev || !dsa_user_dev_check(netdev))
1672 return ERR_PTR(-ENODEV);
1673
1674 return dsa_user_to_port(netdev);
1675 }
1676 EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
1677
dsa_db_equal(const struct dsa_db * a,const struct dsa_db * b)1678 bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
1679 {
1680 if (a->type != b->type)
1681 return false;
1682
1683 switch (a->type) {
1684 case DSA_DB_PORT:
1685 return a->dp == b->dp;
1686 case DSA_DB_LAG:
1687 return a->lag.dev == b->lag.dev;
1688 case DSA_DB_BRIDGE:
1689 return a->bridge.num == b->bridge.num;
1690 default:
1691 WARN_ON(1);
1692 return false;
1693 }
1694 }
1695
dsa_fdb_present_in_other_db(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1696 bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
1697 const unsigned char *addr, u16 vid,
1698 struct dsa_db db)
1699 {
1700 struct dsa_port *dp = dsa_to_port(ds, port);
1701 struct dsa_mac_addr *a;
1702
1703 lockdep_assert_held(&dp->addr_lists_lock);
1704
1705 list_for_each_entry(a, &dp->fdbs, list) {
1706 if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
1707 continue;
1708
1709 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
1710 return true;
1711 }
1712
1713 return false;
1714 }
1715 EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
1716
dsa_mdb_present_in_other_db(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)1717 bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
1718 const struct switchdev_obj_port_mdb *mdb,
1719 struct dsa_db db)
1720 {
1721 struct dsa_port *dp = dsa_to_port(ds, port);
1722 struct dsa_mac_addr *a;
1723
1724 lockdep_assert_held(&dp->addr_lists_lock);
1725
1726 list_for_each_entry(a, &dp->mdbs, list) {
1727 if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
1728 continue;
1729
1730 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
1731 return true;
1732 }
1733
1734 return false;
1735 }
1736 EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
1737
1738 static const struct dsa_stubs __dsa_stubs = {
1739 .conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate,
1740 };
1741
dsa_register_stubs(void)1742 static void dsa_register_stubs(void)
1743 {
1744 dsa_stubs = &__dsa_stubs;
1745 }
1746
dsa_unregister_stubs(void)1747 static void dsa_unregister_stubs(void)
1748 {
1749 dsa_stubs = NULL;
1750 }
1751
dsa_init_module(void)1752 static int __init dsa_init_module(void)
1753 {
1754 int rc;
1755
1756 dsa_owq = alloc_ordered_workqueue("dsa_ordered",
1757 WQ_MEM_RECLAIM);
1758 if (!dsa_owq)
1759 return -ENOMEM;
1760
1761 rc = dsa_user_register_notifier();
1762 if (rc)
1763 goto register_notifier_fail;
1764
1765 dev_add_pack(&dsa_pack_type);
1766
1767 rc = rtnl_link_register(&dsa_link_ops);
1768 if (rc)
1769 goto netlink_register_fail;
1770
1771 dsa_register_stubs();
1772
1773 return 0;
1774
1775 netlink_register_fail:
1776 dsa_user_unregister_notifier();
1777 dev_remove_pack(&dsa_pack_type);
1778 register_notifier_fail:
1779 destroy_workqueue(dsa_owq);
1780
1781 return rc;
1782 }
1783 module_init(dsa_init_module);
1784
dsa_cleanup_module(void)1785 static void __exit dsa_cleanup_module(void)
1786 {
1787 dsa_unregister_stubs();
1788
1789 rtnl_link_unregister(&dsa_link_ops);
1790
1791 dsa_user_unregister_notifier();
1792 dev_remove_pack(&dsa_pack_type);
1793 destroy_workqueue(dsa_owq);
1794 }
1795 module_exit(dsa_cleanup_module);
1796
1797 MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1798 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
1799 MODULE_LICENSE("GPL");
1800 MODULE_ALIAS("platform:dsa");
1801