xref: /linux/net/dsa/port.c (revision ef6af7bdb9e6c14eae8dc5fe852aefe1e089c85c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 
14 #include "dsa_priv.h"
15 
16 static int dsa_broadcast(unsigned long e, void *v)
17 {
18 	struct dsa_switch_tree *dst;
19 	int err = 0;
20 
21 	list_for_each_entry(dst, &dsa_tree_list, list) {
22 		struct raw_notifier_head *nh = &dst->nh;
23 
24 		err = raw_notifier_call_chain(nh, e, v);
25 		err = notifier_to_errno(err);
26 		if (err)
27 			break;
28 	}
29 
30 	return err;
31 }
32 
33 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
34 {
35 	struct raw_notifier_head *nh = &dp->ds->dst->nh;
36 	int err;
37 
38 	err = raw_notifier_call_chain(nh, e, v);
39 
40 	return notifier_to_errno(err);
41 }
42 
43 int dsa_port_set_state(struct dsa_port *dp, u8 state)
44 {
45 	struct dsa_switch *ds = dp->ds;
46 	int port = dp->index;
47 
48 	if (!ds->ops->port_stp_state_set)
49 		return -EOPNOTSUPP;
50 
51 	ds->ops->port_stp_state_set(ds, port, state);
52 
53 	if (ds->ops->port_fast_age) {
54 		/* Fast age FDB entries or flush appropriate forwarding database
55 		 * for the given port, if we are moving it from Learning or
56 		 * Forwarding state, to Disabled or Blocking or Listening state.
57 		 */
58 
59 		if ((dp->stp_state == BR_STATE_LEARNING ||
60 		     dp->stp_state == BR_STATE_FORWARDING) &&
61 		    (state == BR_STATE_DISABLED ||
62 		     state == BR_STATE_BLOCKING ||
63 		     state == BR_STATE_LISTENING))
64 			ds->ops->port_fast_age(ds, port);
65 	}
66 
67 	dp->stp_state = state;
68 
69 	return 0;
70 }
71 
72 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
73 {
74 	int err;
75 
76 	err = dsa_port_set_state(dp, state);
77 	if (err)
78 		pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
79 }
80 
81 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
82 {
83 	struct dsa_switch *ds = dp->ds;
84 	int port = dp->index;
85 	int err;
86 
87 	if (ds->ops->port_enable) {
88 		err = ds->ops->port_enable(ds, port, phy);
89 		if (err)
90 			return err;
91 	}
92 
93 	if (!dp->bridge_dev)
94 		dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
95 
96 	if (dp->pl)
97 		phylink_start(dp->pl);
98 
99 	return 0;
100 }
101 
102 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
103 {
104 	int err;
105 
106 	rtnl_lock();
107 	err = dsa_port_enable_rt(dp, phy);
108 	rtnl_unlock();
109 
110 	return err;
111 }
112 
113 void dsa_port_disable_rt(struct dsa_port *dp)
114 {
115 	struct dsa_switch *ds = dp->ds;
116 	int port = dp->index;
117 
118 	if (dp->pl)
119 		phylink_stop(dp->pl);
120 
121 	if (!dp->bridge_dev)
122 		dsa_port_set_state_now(dp, BR_STATE_DISABLED);
123 
124 	if (ds->ops->port_disable)
125 		ds->ops->port_disable(ds, port);
126 }
127 
128 void dsa_port_disable(struct dsa_port *dp)
129 {
130 	rtnl_lock();
131 	dsa_port_disable_rt(dp);
132 	rtnl_unlock();
133 }
134 
135 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
136 {
137 	struct dsa_notifier_bridge_info info = {
138 		.tree_index = dp->ds->dst->index,
139 		.sw_index = dp->ds->index,
140 		.port = dp->index,
141 		.br = br,
142 	};
143 	int err;
144 
145 	/* Set the flooding mode before joining the port in the switch */
146 	err = dsa_port_bridge_flags(dp, BR_FLOOD | BR_MCAST_FLOOD);
147 	if (err)
148 		return err;
149 
150 	/* Here the interface is already bridged. Reflect the current
151 	 * configuration so that drivers can program their chips accordingly.
152 	 */
153 	dp->bridge_dev = br;
154 
155 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
156 
157 	/* The bridging is rolled back on error */
158 	if (err) {
159 		dsa_port_bridge_flags(dp, 0);
160 		dp->bridge_dev = NULL;
161 	}
162 
163 	return err;
164 }
165 
166 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
167 {
168 	struct dsa_notifier_bridge_info info = {
169 		.tree_index = dp->ds->dst->index,
170 		.sw_index = dp->ds->index,
171 		.port = dp->index,
172 		.br = br,
173 	};
174 	int err;
175 
176 	/* Here the port is already unbridged. Reflect the current configuration
177 	 * so that drivers can program their chips accordingly.
178 	 */
179 	dp->bridge_dev = NULL;
180 
181 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
182 	if (err)
183 		pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
184 
185 	/* Port is leaving the bridge, disable flooding */
186 	dsa_port_bridge_flags(dp, 0);
187 
188 	/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
189 	 * so allow it to be in BR_STATE_FORWARDING to be kept functional
190 	 */
191 	dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
192 }
193 
194 int dsa_port_lag_change(struct dsa_port *dp,
195 			struct netdev_lag_lower_state_info *linfo)
196 {
197 	struct dsa_notifier_lag_info info = {
198 		.sw_index = dp->ds->index,
199 		.port = dp->index,
200 	};
201 	bool tx_enabled;
202 
203 	if (!dp->lag_dev)
204 		return 0;
205 
206 	/* On statically configured aggregates (e.g. loadbalance
207 	 * without LACP) ports will always be tx_enabled, even if the
208 	 * link is down. Thus we require both link_up and tx_enabled
209 	 * in order to include it in the tx set.
210 	 */
211 	tx_enabled = linfo->link_up && linfo->tx_enabled;
212 
213 	if (tx_enabled == dp->lag_tx_enabled)
214 		return 0;
215 
216 	dp->lag_tx_enabled = tx_enabled;
217 
218 	return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
219 }
220 
221 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
222 		      struct netdev_lag_upper_info *uinfo)
223 {
224 	struct dsa_notifier_lag_info info = {
225 		.sw_index = dp->ds->index,
226 		.port = dp->index,
227 		.lag = lag,
228 		.info = uinfo,
229 	};
230 	int err;
231 
232 	dsa_lag_map(dp->ds->dst, lag);
233 	dp->lag_dev = lag;
234 
235 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
236 	if (err) {
237 		dp->lag_dev = NULL;
238 		dsa_lag_unmap(dp->ds->dst, lag);
239 	}
240 
241 	return err;
242 }
243 
244 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
245 {
246 	struct dsa_notifier_lag_info info = {
247 		.sw_index = dp->ds->index,
248 		.port = dp->index,
249 		.lag = lag,
250 	};
251 	int err;
252 
253 	if (!dp->lag_dev)
254 		return;
255 
256 	/* Port might have been part of a LAG that in turn was
257 	 * attached to a bridge.
258 	 */
259 	if (dp->bridge_dev)
260 		dsa_port_bridge_leave(dp, dp->bridge_dev);
261 
262 	dp->lag_tx_enabled = false;
263 	dp->lag_dev = NULL;
264 
265 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
266 	if (err)
267 		pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
268 		       err);
269 
270 	dsa_lag_unmap(dp->ds->dst, lag);
271 }
272 
273 /* Must be called under rcu_read_lock() */
274 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
275 					      bool vlan_filtering)
276 {
277 	struct dsa_switch *ds = dp->ds;
278 	int err, i;
279 
280 	/* VLAN awareness was off, so the question is "can we turn it on".
281 	 * We may have had 8021q uppers, those need to go. Make sure we don't
282 	 * enter an inconsistent state: deny changing the VLAN awareness state
283 	 * as long as we have 8021q uppers.
284 	 */
285 	if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
286 		struct net_device *upper_dev, *slave = dp->slave;
287 		struct net_device *br = dp->bridge_dev;
288 		struct list_head *iter;
289 
290 		netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
291 			struct bridge_vlan_info br_info;
292 			u16 vid;
293 
294 			if (!is_vlan_dev(upper_dev))
295 				continue;
296 
297 			vid = vlan_dev_vlan_id(upper_dev);
298 
299 			/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
300 			 * device, respectively the VID is not found, returning
301 			 * 0 means success, which is a failure for us here.
302 			 */
303 			err = br_vlan_get_info(br, vid, &br_info);
304 			if (err == 0) {
305 				dev_err(ds->dev, "Must remove upper %s first\n",
306 					upper_dev->name);
307 				return false;
308 			}
309 		}
310 	}
311 
312 	if (!ds->vlan_filtering_is_global)
313 		return true;
314 
315 	/* For cases where enabling/disabling VLAN awareness is global to the
316 	 * switch, we need to handle the case where multiple bridges span
317 	 * different ports of the same switch device and one of them has a
318 	 * different setting than what is being requested.
319 	 */
320 	for (i = 0; i < ds->num_ports; i++) {
321 		struct net_device *other_bridge;
322 
323 		other_bridge = dsa_to_port(ds, i)->bridge_dev;
324 		if (!other_bridge)
325 			continue;
326 		/* If it's the same bridge, it also has same
327 		 * vlan_filtering setting => no need to check
328 		 */
329 		if (other_bridge == dp->bridge_dev)
330 			continue;
331 		if (br_vlan_enabled(other_bridge) != vlan_filtering) {
332 			dev_err(ds->dev, "VLAN filtering is a global setting\n");
333 			return false;
334 		}
335 	}
336 	return true;
337 }
338 
339 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering)
340 {
341 	struct dsa_switch *ds = dp->ds;
342 	bool apply;
343 	int err;
344 
345 	if (!ds->ops->port_vlan_filtering)
346 		return -EOPNOTSUPP;
347 
348 	/* We are called from dsa_slave_switchdev_blocking_event(),
349 	 * which is not under rcu_read_lock(), unlike
350 	 * dsa_slave_switchdev_event().
351 	 */
352 	rcu_read_lock();
353 	apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering);
354 	rcu_read_unlock();
355 	if (!apply)
356 		return -EINVAL;
357 
358 	if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
359 		return 0;
360 
361 	err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering);
362 	if (err)
363 		return err;
364 
365 	if (ds->vlan_filtering_is_global)
366 		ds->vlan_filtering = vlan_filtering;
367 	else
368 		dp->vlan_filtering = vlan_filtering;
369 
370 	return 0;
371 }
372 
373 /* This enforces legacy behavior for switch drivers which assume they can't
374  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
375  */
376 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
377 {
378 	struct dsa_switch *ds = dp->ds;
379 
380 	if (!dp->bridge_dev)
381 		return false;
382 
383 	return (!ds->configure_vlan_while_not_filtering &&
384 		!br_vlan_enabled(dp->bridge_dev));
385 }
386 
387 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
388 {
389 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
390 	unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
391 	struct dsa_notifier_ageing_time_info info;
392 	int err;
393 
394 	info.ageing_time = ageing_time;
395 
396 	err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
397 	if (err)
398 		return err;
399 
400 	dp->ageing_time = ageing_time;
401 
402 	return 0;
403 }
404 
405 int dsa_port_pre_bridge_flags(const struct dsa_port *dp, unsigned long flags)
406 {
407 	struct dsa_switch *ds = dp->ds;
408 
409 	if (!ds->ops->port_egress_floods ||
410 	    (flags & ~(BR_FLOOD | BR_MCAST_FLOOD)))
411 		return -EINVAL;
412 
413 	return 0;
414 }
415 
416 int dsa_port_bridge_flags(const struct dsa_port *dp, unsigned long flags)
417 {
418 	struct dsa_switch *ds = dp->ds;
419 	int port = dp->index;
420 	int err = 0;
421 
422 	if (ds->ops->port_egress_floods)
423 		err = ds->ops->port_egress_floods(ds, port, flags & BR_FLOOD,
424 						  flags & BR_MCAST_FLOOD);
425 
426 	return err;
427 }
428 
429 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter)
430 {
431 	struct dsa_switch *ds = dp->ds;
432 	int port = dp->index;
433 
434 	if (!ds->ops->port_egress_floods)
435 		return -EOPNOTSUPP;
436 
437 	return ds->ops->port_egress_floods(ds, port, true, mrouter);
438 }
439 
440 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
441 			bool propagate_upstream)
442 {
443 	struct dsa_notifier_mtu_info info = {
444 		.sw_index = dp->ds->index,
445 		.propagate_upstream = propagate_upstream,
446 		.port = dp->index,
447 		.mtu = new_mtu,
448 	};
449 
450 	return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
451 }
452 
453 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
454 		     u16 vid)
455 {
456 	struct dsa_notifier_fdb_info info = {
457 		.sw_index = dp->ds->index,
458 		.port = dp->index,
459 		.addr = addr,
460 		.vid = vid,
461 	};
462 
463 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
464 }
465 
466 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
467 		     u16 vid)
468 {
469 	struct dsa_notifier_fdb_info info = {
470 		.sw_index = dp->ds->index,
471 		.port = dp->index,
472 		.addr = addr,
473 		.vid = vid,
474 
475 	};
476 
477 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
478 }
479 
480 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
481 {
482 	struct dsa_switch *ds = dp->ds;
483 	int port = dp->index;
484 
485 	if (!ds->ops->port_fdb_dump)
486 		return -EOPNOTSUPP;
487 
488 	return ds->ops->port_fdb_dump(ds, port, cb, data);
489 }
490 
491 int dsa_port_mdb_add(const struct dsa_port *dp,
492 		     const struct switchdev_obj_port_mdb *mdb)
493 {
494 	struct dsa_notifier_mdb_info info = {
495 		.sw_index = dp->ds->index,
496 		.port = dp->index,
497 		.mdb = mdb,
498 	};
499 
500 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
501 }
502 
503 int dsa_port_mdb_del(const struct dsa_port *dp,
504 		     const struct switchdev_obj_port_mdb *mdb)
505 {
506 	struct dsa_notifier_mdb_info info = {
507 		.sw_index = dp->ds->index,
508 		.port = dp->index,
509 		.mdb = mdb,
510 	};
511 
512 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
513 }
514 
515 int dsa_port_vlan_add(struct dsa_port *dp,
516 		      const struct switchdev_obj_port_vlan *vlan)
517 {
518 	struct dsa_notifier_vlan_info info = {
519 		.sw_index = dp->ds->index,
520 		.port = dp->index,
521 		.vlan = vlan,
522 	};
523 
524 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
525 }
526 
527 int dsa_port_vlan_del(struct dsa_port *dp,
528 		      const struct switchdev_obj_port_vlan *vlan)
529 {
530 	struct dsa_notifier_vlan_info info = {
531 		.sw_index = dp->ds->index,
532 		.port = dp->index,
533 		.vlan = vlan,
534 	};
535 
536 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
537 }
538 
539 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
540 {
541 	struct device_node *phy_dn;
542 	struct phy_device *phydev;
543 
544 	phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
545 	if (!phy_dn)
546 		return NULL;
547 
548 	phydev = of_phy_find_device(phy_dn);
549 	if (!phydev) {
550 		of_node_put(phy_dn);
551 		return ERR_PTR(-EPROBE_DEFER);
552 	}
553 
554 	of_node_put(phy_dn);
555 	return phydev;
556 }
557 
558 static void dsa_port_phylink_validate(struct phylink_config *config,
559 				      unsigned long *supported,
560 				      struct phylink_link_state *state)
561 {
562 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
563 	struct dsa_switch *ds = dp->ds;
564 
565 	if (!ds->ops->phylink_validate)
566 		return;
567 
568 	ds->ops->phylink_validate(ds, dp->index, supported, state);
569 }
570 
571 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
572 					       struct phylink_link_state *state)
573 {
574 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
575 	struct dsa_switch *ds = dp->ds;
576 	int err;
577 
578 	/* Only called for inband modes */
579 	if (!ds->ops->phylink_mac_link_state) {
580 		state->link = 0;
581 		return;
582 	}
583 
584 	err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
585 	if (err < 0) {
586 		dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
587 			dp->index, err);
588 		state->link = 0;
589 	}
590 }
591 
592 static void dsa_port_phylink_mac_config(struct phylink_config *config,
593 					unsigned int mode,
594 					const struct phylink_link_state *state)
595 {
596 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
597 	struct dsa_switch *ds = dp->ds;
598 
599 	if (!ds->ops->phylink_mac_config)
600 		return;
601 
602 	ds->ops->phylink_mac_config(ds, dp->index, mode, state);
603 }
604 
605 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
606 {
607 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
608 	struct dsa_switch *ds = dp->ds;
609 
610 	if (!ds->ops->phylink_mac_an_restart)
611 		return;
612 
613 	ds->ops->phylink_mac_an_restart(ds, dp->index);
614 }
615 
616 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
617 					   unsigned int mode,
618 					   phy_interface_t interface)
619 {
620 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
621 	struct phy_device *phydev = NULL;
622 	struct dsa_switch *ds = dp->ds;
623 
624 	if (dsa_is_user_port(ds, dp->index))
625 		phydev = dp->slave->phydev;
626 
627 	if (!ds->ops->phylink_mac_link_down) {
628 		if (ds->ops->adjust_link && phydev)
629 			ds->ops->adjust_link(ds, dp->index, phydev);
630 		return;
631 	}
632 
633 	ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
634 }
635 
636 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
637 					 struct phy_device *phydev,
638 					 unsigned int mode,
639 					 phy_interface_t interface,
640 					 int speed, int duplex,
641 					 bool tx_pause, bool rx_pause)
642 {
643 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
644 	struct dsa_switch *ds = dp->ds;
645 
646 	if (!ds->ops->phylink_mac_link_up) {
647 		if (ds->ops->adjust_link && phydev)
648 			ds->ops->adjust_link(ds, dp->index, phydev);
649 		return;
650 	}
651 
652 	ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
653 				     speed, duplex, tx_pause, rx_pause);
654 }
655 
656 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
657 	.validate = dsa_port_phylink_validate,
658 	.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
659 	.mac_config = dsa_port_phylink_mac_config,
660 	.mac_an_restart = dsa_port_phylink_mac_an_restart,
661 	.mac_link_down = dsa_port_phylink_mac_link_down,
662 	.mac_link_up = dsa_port_phylink_mac_link_up,
663 };
664 
665 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
666 {
667 	struct dsa_switch *ds = dp->ds;
668 	struct phy_device *phydev;
669 	int port = dp->index;
670 	int err = 0;
671 
672 	phydev = dsa_port_get_phy_device(dp);
673 	if (!phydev)
674 		return 0;
675 
676 	if (IS_ERR(phydev))
677 		return PTR_ERR(phydev);
678 
679 	if (enable) {
680 		err = genphy_resume(phydev);
681 		if (err < 0)
682 			goto err_put_dev;
683 
684 		err = genphy_read_status(phydev);
685 		if (err < 0)
686 			goto err_put_dev;
687 	} else {
688 		err = genphy_suspend(phydev);
689 		if (err < 0)
690 			goto err_put_dev;
691 	}
692 
693 	if (ds->ops->adjust_link)
694 		ds->ops->adjust_link(ds, port, phydev);
695 
696 	dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
697 
698 err_put_dev:
699 	put_device(&phydev->mdio.dev);
700 	return err;
701 }
702 
703 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
704 {
705 	struct device_node *dn = dp->dn;
706 	struct dsa_switch *ds = dp->ds;
707 	struct phy_device *phydev;
708 	int port = dp->index;
709 	phy_interface_t mode;
710 	int err;
711 
712 	err = of_phy_register_fixed_link(dn);
713 	if (err) {
714 		dev_err(ds->dev,
715 			"failed to register the fixed PHY of port %d\n",
716 			port);
717 		return err;
718 	}
719 
720 	phydev = of_phy_find_device(dn);
721 
722 	err = of_get_phy_mode(dn, &mode);
723 	if (err)
724 		mode = PHY_INTERFACE_MODE_NA;
725 	phydev->interface = mode;
726 
727 	genphy_read_status(phydev);
728 
729 	if (ds->ops->adjust_link)
730 		ds->ops->adjust_link(ds, port, phydev);
731 
732 	put_device(&phydev->mdio.dev);
733 
734 	return 0;
735 }
736 
737 static int dsa_port_phylink_register(struct dsa_port *dp)
738 {
739 	struct dsa_switch *ds = dp->ds;
740 	struct device_node *port_dn = dp->dn;
741 	phy_interface_t mode;
742 	int err;
743 
744 	err = of_get_phy_mode(port_dn, &mode);
745 	if (err)
746 		mode = PHY_INTERFACE_MODE_NA;
747 
748 	dp->pl_config.dev = ds->dev;
749 	dp->pl_config.type = PHYLINK_DEV;
750 	dp->pl_config.pcs_poll = ds->pcs_poll;
751 
752 	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
753 				mode, &dsa_port_phylink_mac_ops);
754 	if (IS_ERR(dp->pl)) {
755 		pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
756 		return PTR_ERR(dp->pl);
757 	}
758 
759 	err = phylink_of_phy_connect(dp->pl, port_dn, 0);
760 	if (err && err != -ENODEV) {
761 		pr_err("could not attach to PHY: %d\n", err);
762 		goto err_phy_connect;
763 	}
764 
765 	return 0;
766 
767 err_phy_connect:
768 	phylink_destroy(dp->pl);
769 	return err;
770 }
771 
772 int dsa_port_link_register_of(struct dsa_port *dp)
773 {
774 	struct dsa_switch *ds = dp->ds;
775 	struct device_node *phy_np;
776 	int port = dp->index;
777 
778 	if (!ds->ops->adjust_link) {
779 		phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
780 		if (of_phy_is_fixed_link(dp->dn) || phy_np) {
781 			if (ds->ops->phylink_mac_link_down)
782 				ds->ops->phylink_mac_link_down(ds, port,
783 					MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
784 			return dsa_port_phylink_register(dp);
785 		}
786 		return 0;
787 	}
788 
789 	dev_warn(ds->dev,
790 		 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
791 
792 	if (of_phy_is_fixed_link(dp->dn))
793 		return dsa_port_fixed_link_register_of(dp);
794 	else
795 		return dsa_port_setup_phy_of(dp, true);
796 }
797 
798 void dsa_port_link_unregister_of(struct dsa_port *dp)
799 {
800 	struct dsa_switch *ds = dp->ds;
801 
802 	if (!ds->ops->adjust_link && dp->pl) {
803 		rtnl_lock();
804 		phylink_disconnect_phy(dp->pl);
805 		rtnl_unlock();
806 		phylink_destroy(dp->pl);
807 		dp->pl = NULL;
808 		return;
809 	}
810 
811 	if (of_phy_is_fixed_link(dp->dn))
812 		of_phy_deregister_fixed_link(dp->dn);
813 	else
814 		dsa_port_setup_phy_of(dp, false);
815 }
816 
817 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
818 {
819 	struct phy_device *phydev;
820 	int ret = -EOPNOTSUPP;
821 
822 	if (of_phy_is_fixed_link(dp->dn))
823 		return ret;
824 
825 	phydev = dsa_port_get_phy_device(dp);
826 	if (IS_ERR_OR_NULL(phydev))
827 		return ret;
828 
829 	ret = phy_ethtool_get_strings(phydev, data);
830 	put_device(&phydev->mdio.dev);
831 
832 	return ret;
833 }
834 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
835 
836 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
837 {
838 	struct phy_device *phydev;
839 	int ret = -EOPNOTSUPP;
840 
841 	if (of_phy_is_fixed_link(dp->dn))
842 		return ret;
843 
844 	phydev = dsa_port_get_phy_device(dp);
845 	if (IS_ERR_OR_NULL(phydev))
846 		return ret;
847 
848 	ret = phy_ethtool_get_stats(phydev, NULL, data);
849 	put_device(&phydev->mdio.dev);
850 
851 	return ret;
852 }
853 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
854 
855 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
856 {
857 	struct phy_device *phydev;
858 	int ret = -EOPNOTSUPP;
859 
860 	if (of_phy_is_fixed_link(dp->dn))
861 		return ret;
862 
863 	phydev = dsa_port_get_phy_device(dp);
864 	if (IS_ERR_OR_NULL(phydev))
865 		return ret;
866 
867 	ret = phy_ethtool_get_sset_count(phydev);
868 	put_device(&phydev->mdio.dev);
869 
870 	return ret;
871 }
872 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
873