xref: /linux/net/dsa/port.c (revision a8b659e7ff75a6e766bc5691df57ceb26018db9f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 
14 #include "dsa_priv.h"
15 
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30 	return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32 
33 int dsa_port_set_state(struct dsa_port *dp, u8 state)
34 {
35 	struct dsa_switch *ds = dp->ds;
36 	int port = dp->index;
37 
38 	if (!ds->ops->port_stp_state_set)
39 		return -EOPNOTSUPP;
40 
41 	ds->ops->port_stp_state_set(ds, port, state);
42 
43 	if (ds->ops->port_fast_age) {
44 		/* Fast age FDB entries or flush appropriate forwarding database
45 		 * for the given port, if we are moving it from Learning or
46 		 * Forwarding state, to Disabled or Blocking or Listening state.
47 		 */
48 
49 		if ((dp->stp_state == BR_STATE_LEARNING ||
50 		     dp->stp_state == BR_STATE_FORWARDING) &&
51 		    (state == BR_STATE_DISABLED ||
52 		     state == BR_STATE_BLOCKING ||
53 		     state == BR_STATE_LISTENING))
54 			ds->ops->port_fast_age(ds, port);
55 	}
56 
57 	dp->stp_state = state;
58 
59 	return 0;
60 }
61 
62 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
63 {
64 	int err;
65 
66 	err = dsa_port_set_state(dp, state);
67 	if (err)
68 		pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
69 }
70 
71 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
72 {
73 	struct dsa_switch *ds = dp->ds;
74 	int port = dp->index;
75 	int err;
76 
77 	if (ds->ops->port_enable) {
78 		err = ds->ops->port_enable(ds, port, phy);
79 		if (err)
80 			return err;
81 	}
82 
83 	if (!dp->bridge_dev)
84 		dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
85 
86 	if (dp->pl)
87 		phylink_start(dp->pl);
88 
89 	return 0;
90 }
91 
92 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
93 {
94 	int err;
95 
96 	rtnl_lock();
97 	err = dsa_port_enable_rt(dp, phy);
98 	rtnl_unlock();
99 
100 	return err;
101 }
102 
103 void dsa_port_disable_rt(struct dsa_port *dp)
104 {
105 	struct dsa_switch *ds = dp->ds;
106 	int port = dp->index;
107 
108 	if (dp->pl)
109 		phylink_stop(dp->pl);
110 
111 	if (!dp->bridge_dev)
112 		dsa_port_set_state_now(dp, BR_STATE_DISABLED);
113 
114 	if (ds->ops->port_disable)
115 		ds->ops->port_disable(ds, port);
116 }
117 
118 void dsa_port_disable(struct dsa_port *dp)
119 {
120 	rtnl_lock();
121 	dsa_port_disable_rt(dp);
122 	rtnl_unlock();
123 }
124 
125 static void dsa_port_change_brport_flags(struct dsa_port *dp,
126 					 bool bridge_offload)
127 {
128 	struct switchdev_brport_flags flags;
129 	int flag;
130 
131 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
132 	if (bridge_offload)
133 		flags.val = flags.mask;
134 	else
135 		flags.val = flags.mask & ~BR_LEARNING;
136 
137 	for_each_set_bit(flag, &flags.mask, 32) {
138 		struct switchdev_brport_flags tmp;
139 
140 		tmp.val = flags.val & BIT(flag);
141 		tmp.mask = BIT(flag);
142 
143 		dsa_port_bridge_flags(dp, tmp, NULL);
144 	}
145 }
146 
147 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
148 {
149 	struct dsa_notifier_bridge_info info = {
150 		.tree_index = dp->ds->dst->index,
151 		.sw_index = dp->ds->index,
152 		.port = dp->index,
153 		.br = br,
154 	};
155 	int err;
156 
157 	/* Notify the port driver to set its configurable flags in a way that
158 	 * matches the initial settings of a bridge port.
159 	 */
160 	dsa_port_change_brport_flags(dp, true);
161 
162 	/* Here the interface is already bridged. Reflect the current
163 	 * configuration so that drivers can program their chips accordingly.
164 	 */
165 	dp->bridge_dev = br;
166 
167 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
168 
169 	/* The bridging is rolled back on error */
170 	if (err) {
171 		dsa_port_change_brport_flags(dp, false);
172 		dp->bridge_dev = NULL;
173 	}
174 
175 	return err;
176 }
177 
178 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
179 {
180 	struct dsa_notifier_bridge_info info = {
181 		.tree_index = dp->ds->dst->index,
182 		.sw_index = dp->ds->index,
183 		.port = dp->index,
184 		.br = br,
185 	};
186 	int err;
187 
188 	/* Here the port is already unbridged. Reflect the current configuration
189 	 * so that drivers can program their chips accordingly.
190 	 */
191 	dp->bridge_dev = NULL;
192 
193 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
194 	if (err)
195 		pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
196 
197 	/* Configure the port for standalone mode (no address learning,
198 	 * flood everything).
199 	 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
200 	 * when the user requests it through netlink or sysfs, but not
201 	 * automatically at port join or leave, so we need to handle resetting
202 	 * the brport flags ourselves. But we even prefer it that way, because
203 	 * otherwise, some setups might never get the notification they need,
204 	 * for example, when a port leaves a LAG that offloads the bridge,
205 	 * it becomes standalone, but as far as the bridge is concerned, no
206 	 * port ever left.
207 	 */
208 	dsa_port_change_brport_flags(dp, false);
209 
210 	/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
211 	 * so allow it to be in BR_STATE_FORWARDING to be kept functional
212 	 */
213 	dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
214 }
215 
216 int dsa_port_lag_change(struct dsa_port *dp,
217 			struct netdev_lag_lower_state_info *linfo)
218 {
219 	struct dsa_notifier_lag_info info = {
220 		.sw_index = dp->ds->index,
221 		.port = dp->index,
222 	};
223 	bool tx_enabled;
224 
225 	if (!dp->lag_dev)
226 		return 0;
227 
228 	/* On statically configured aggregates (e.g. loadbalance
229 	 * without LACP) ports will always be tx_enabled, even if the
230 	 * link is down. Thus we require both link_up and tx_enabled
231 	 * in order to include it in the tx set.
232 	 */
233 	tx_enabled = linfo->link_up && linfo->tx_enabled;
234 
235 	if (tx_enabled == dp->lag_tx_enabled)
236 		return 0;
237 
238 	dp->lag_tx_enabled = tx_enabled;
239 
240 	return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
241 }
242 
243 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
244 		      struct netdev_lag_upper_info *uinfo)
245 {
246 	struct dsa_notifier_lag_info info = {
247 		.sw_index = dp->ds->index,
248 		.port = dp->index,
249 		.lag = lag,
250 		.info = uinfo,
251 	};
252 	int err;
253 
254 	dsa_lag_map(dp->ds->dst, lag);
255 	dp->lag_dev = lag;
256 
257 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
258 	if (err) {
259 		dp->lag_dev = NULL;
260 		dsa_lag_unmap(dp->ds->dst, lag);
261 	}
262 
263 	return err;
264 }
265 
266 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
267 {
268 	struct dsa_notifier_lag_info info = {
269 		.sw_index = dp->ds->index,
270 		.port = dp->index,
271 		.lag = lag,
272 	};
273 	int err;
274 
275 	if (!dp->lag_dev)
276 		return;
277 
278 	/* Port might have been part of a LAG that in turn was
279 	 * attached to a bridge.
280 	 */
281 	if (dp->bridge_dev)
282 		dsa_port_bridge_leave(dp, dp->bridge_dev);
283 
284 	dp->lag_tx_enabled = false;
285 	dp->lag_dev = NULL;
286 
287 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
288 	if (err)
289 		pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
290 		       err);
291 
292 	dsa_lag_unmap(dp->ds->dst, lag);
293 }
294 
295 /* Must be called under rcu_read_lock() */
296 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
297 					      bool vlan_filtering)
298 {
299 	struct dsa_switch *ds = dp->ds;
300 	int err, i;
301 
302 	/* VLAN awareness was off, so the question is "can we turn it on".
303 	 * We may have had 8021q uppers, those need to go. Make sure we don't
304 	 * enter an inconsistent state: deny changing the VLAN awareness state
305 	 * as long as we have 8021q uppers.
306 	 */
307 	if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
308 		struct net_device *upper_dev, *slave = dp->slave;
309 		struct net_device *br = dp->bridge_dev;
310 		struct list_head *iter;
311 
312 		netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
313 			struct bridge_vlan_info br_info;
314 			u16 vid;
315 
316 			if (!is_vlan_dev(upper_dev))
317 				continue;
318 
319 			vid = vlan_dev_vlan_id(upper_dev);
320 
321 			/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
322 			 * device, respectively the VID is not found, returning
323 			 * 0 means success, which is a failure for us here.
324 			 */
325 			err = br_vlan_get_info(br, vid, &br_info);
326 			if (err == 0) {
327 				dev_err(ds->dev, "Must remove upper %s first\n",
328 					upper_dev->name);
329 				return false;
330 			}
331 		}
332 	}
333 
334 	if (!ds->vlan_filtering_is_global)
335 		return true;
336 
337 	/* For cases where enabling/disabling VLAN awareness is global to the
338 	 * switch, we need to handle the case where multiple bridges span
339 	 * different ports of the same switch device and one of them has a
340 	 * different setting than what is being requested.
341 	 */
342 	for (i = 0; i < ds->num_ports; i++) {
343 		struct net_device *other_bridge;
344 
345 		other_bridge = dsa_to_port(ds, i)->bridge_dev;
346 		if (!other_bridge)
347 			continue;
348 		/* If it's the same bridge, it also has same
349 		 * vlan_filtering setting => no need to check
350 		 */
351 		if (other_bridge == dp->bridge_dev)
352 			continue;
353 		if (br_vlan_enabled(other_bridge) != vlan_filtering) {
354 			dev_err(ds->dev, "VLAN filtering is a global setting\n");
355 			return false;
356 		}
357 	}
358 	return true;
359 }
360 
361 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering)
362 {
363 	struct dsa_switch *ds = dp->ds;
364 	bool apply;
365 	int err;
366 
367 	if (!ds->ops->port_vlan_filtering)
368 		return -EOPNOTSUPP;
369 
370 	/* We are called from dsa_slave_switchdev_blocking_event(),
371 	 * which is not under rcu_read_lock(), unlike
372 	 * dsa_slave_switchdev_event().
373 	 */
374 	rcu_read_lock();
375 	apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering);
376 	rcu_read_unlock();
377 	if (!apply)
378 		return -EINVAL;
379 
380 	if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
381 		return 0;
382 
383 	err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering);
384 	if (err)
385 		return err;
386 
387 	if (ds->vlan_filtering_is_global)
388 		ds->vlan_filtering = vlan_filtering;
389 	else
390 		dp->vlan_filtering = vlan_filtering;
391 
392 	return 0;
393 }
394 
395 /* This enforces legacy behavior for switch drivers which assume they can't
396  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
397  */
398 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
399 {
400 	struct dsa_switch *ds = dp->ds;
401 
402 	if (!dp->bridge_dev)
403 		return false;
404 
405 	return (!ds->configure_vlan_while_not_filtering &&
406 		!br_vlan_enabled(dp->bridge_dev));
407 }
408 
409 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
410 {
411 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
412 	unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
413 	struct dsa_notifier_ageing_time_info info;
414 	int err;
415 
416 	info.ageing_time = ageing_time;
417 
418 	err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
419 	if (err)
420 		return err;
421 
422 	dp->ageing_time = ageing_time;
423 
424 	return 0;
425 }
426 
427 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
428 			      struct switchdev_brport_flags flags,
429 			      struct netlink_ext_ack *extack)
430 {
431 	struct dsa_switch *ds = dp->ds;
432 
433 	if (!ds->ops->port_pre_bridge_flags)
434 		return -EINVAL;
435 
436 	return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
437 }
438 
439 int dsa_port_bridge_flags(const struct dsa_port *dp,
440 			  struct switchdev_brport_flags flags,
441 			  struct netlink_ext_ack *extack)
442 {
443 	struct dsa_switch *ds = dp->ds;
444 
445 	if (!ds->ops->port_bridge_flags)
446 		return -EINVAL;
447 
448 	return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
449 }
450 
451 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
452 		     struct netlink_ext_ack *extack)
453 {
454 	struct dsa_switch *ds = dp->ds;
455 
456 	if (!ds->ops->port_set_mrouter)
457 		return -EOPNOTSUPP;
458 
459 	return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
460 }
461 
462 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
463 			bool propagate_upstream)
464 {
465 	struct dsa_notifier_mtu_info info = {
466 		.sw_index = dp->ds->index,
467 		.propagate_upstream = propagate_upstream,
468 		.port = dp->index,
469 		.mtu = new_mtu,
470 	};
471 
472 	return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
473 }
474 
475 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
476 		     u16 vid)
477 {
478 	struct dsa_notifier_fdb_info info = {
479 		.sw_index = dp->ds->index,
480 		.port = dp->index,
481 		.addr = addr,
482 		.vid = vid,
483 	};
484 
485 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
486 }
487 
488 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
489 		     u16 vid)
490 {
491 	struct dsa_notifier_fdb_info info = {
492 		.sw_index = dp->ds->index,
493 		.port = dp->index,
494 		.addr = addr,
495 		.vid = vid,
496 
497 	};
498 
499 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
500 }
501 
502 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
503 {
504 	struct dsa_switch *ds = dp->ds;
505 	int port = dp->index;
506 
507 	if (!ds->ops->port_fdb_dump)
508 		return -EOPNOTSUPP;
509 
510 	return ds->ops->port_fdb_dump(ds, port, cb, data);
511 }
512 
513 int dsa_port_mdb_add(const struct dsa_port *dp,
514 		     const struct switchdev_obj_port_mdb *mdb)
515 {
516 	struct dsa_notifier_mdb_info info = {
517 		.sw_index = dp->ds->index,
518 		.port = dp->index,
519 		.mdb = mdb,
520 	};
521 
522 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
523 }
524 
525 int dsa_port_mdb_del(const struct dsa_port *dp,
526 		     const struct switchdev_obj_port_mdb *mdb)
527 {
528 	struct dsa_notifier_mdb_info info = {
529 		.sw_index = dp->ds->index,
530 		.port = dp->index,
531 		.mdb = mdb,
532 	};
533 
534 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
535 }
536 
537 int dsa_port_vlan_add(struct dsa_port *dp,
538 		      const struct switchdev_obj_port_vlan *vlan)
539 {
540 	struct dsa_notifier_vlan_info info = {
541 		.sw_index = dp->ds->index,
542 		.port = dp->index,
543 		.vlan = vlan,
544 	};
545 
546 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
547 }
548 
549 int dsa_port_vlan_del(struct dsa_port *dp,
550 		      const struct switchdev_obj_port_vlan *vlan)
551 {
552 	struct dsa_notifier_vlan_info info = {
553 		.sw_index = dp->ds->index,
554 		.port = dp->index,
555 		.vlan = vlan,
556 	};
557 
558 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
559 }
560 
561 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
562 			       const struct dsa_device_ops *tag_ops)
563 {
564 	cpu_dp->filter = tag_ops->filter;
565 	cpu_dp->rcv = tag_ops->rcv;
566 	cpu_dp->tag_ops = tag_ops;
567 }
568 
569 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
570 {
571 	struct device_node *phy_dn;
572 	struct phy_device *phydev;
573 
574 	phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
575 	if (!phy_dn)
576 		return NULL;
577 
578 	phydev = of_phy_find_device(phy_dn);
579 	if (!phydev) {
580 		of_node_put(phy_dn);
581 		return ERR_PTR(-EPROBE_DEFER);
582 	}
583 
584 	of_node_put(phy_dn);
585 	return phydev;
586 }
587 
588 static void dsa_port_phylink_validate(struct phylink_config *config,
589 				      unsigned long *supported,
590 				      struct phylink_link_state *state)
591 {
592 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
593 	struct dsa_switch *ds = dp->ds;
594 
595 	if (!ds->ops->phylink_validate)
596 		return;
597 
598 	ds->ops->phylink_validate(ds, dp->index, supported, state);
599 }
600 
601 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
602 					       struct phylink_link_state *state)
603 {
604 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
605 	struct dsa_switch *ds = dp->ds;
606 	int err;
607 
608 	/* Only called for inband modes */
609 	if (!ds->ops->phylink_mac_link_state) {
610 		state->link = 0;
611 		return;
612 	}
613 
614 	err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
615 	if (err < 0) {
616 		dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
617 			dp->index, err);
618 		state->link = 0;
619 	}
620 }
621 
622 static void dsa_port_phylink_mac_config(struct phylink_config *config,
623 					unsigned int mode,
624 					const struct phylink_link_state *state)
625 {
626 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
627 	struct dsa_switch *ds = dp->ds;
628 
629 	if (!ds->ops->phylink_mac_config)
630 		return;
631 
632 	ds->ops->phylink_mac_config(ds, dp->index, mode, state);
633 }
634 
635 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
636 {
637 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
638 	struct dsa_switch *ds = dp->ds;
639 
640 	if (!ds->ops->phylink_mac_an_restart)
641 		return;
642 
643 	ds->ops->phylink_mac_an_restart(ds, dp->index);
644 }
645 
646 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
647 					   unsigned int mode,
648 					   phy_interface_t interface)
649 {
650 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
651 	struct phy_device *phydev = NULL;
652 	struct dsa_switch *ds = dp->ds;
653 
654 	if (dsa_is_user_port(ds, dp->index))
655 		phydev = dp->slave->phydev;
656 
657 	if (!ds->ops->phylink_mac_link_down) {
658 		if (ds->ops->adjust_link && phydev)
659 			ds->ops->adjust_link(ds, dp->index, phydev);
660 		return;
661 	}
662 
663 	ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
664 }
665 
666 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
667 					 struct phy_device *phydev,
668 					 unsigned int mode,
669 					 phy_interface_t interface,
670 					 int speed, int duplex,
671 					 bool tx_pause, bool rx_pause)
672 {
673 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
674 	struct dsa_switch *ds = dp->ds;
675 
676 	if (!ds->ops->phylink_mac_link_up) {
677 		if (ds->ops->adjust_link && phydev)
678 			ds->ops->adjust_link(ds, dp->index, phydev);
679 		return;
680 	}
681 
682 	ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
683 				     speed, duplex, tx_pause, rx_pause);
684 }
685 
686 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
687 	.validate = dsa_port_phylink_validate,
688 	.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
689 	.mac_config = dsa_port_phylink_mac_config,
690 	.mac_an_restart = dsa_port_phylink_mac_an_restart,
691 	.mac_link_down = dsa_port_phylink_mac_link_down,
692 	.mac_link_up = dsa_port_phylink_mac_link_up,
693 };
694 
695 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
696 {
697 	struct dsa_switch *ds = dp->ds;
698 	struct phy_device *phydev;
699 	int port = dp->index;
700 	int err = 0;
701 
702 	phydev = dsa_port_get_phy_device(dp);
703 	if (!phydev)
704 		return 0;
705 
706 	if (IS_ERR(phydev))
707 		return PTR_ERR(phydev);
708 
709 	if (enable) {
710 		err = genphy_resume(phydev);
711 		if (err < 0)
712 			goto err_put_dev;
713 
714 		err = genphy_read_status(phydev);
715 		if (err < 0)
716 			goto err_put_dev;
717 	} else {
718 		err = genphy_suspend(phydev);
719 		if (err < 0)
720 			goto err_put_dev;
721 	}
722 
723 	if (ds->ops->adjust_link)
724 		ds->ops->adjust_link(ds, port, phydev);
725 
726 	dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
727 
728 err_put_dev:
729 	put_device(&phydev->mdio.dev);
730 	return err;
731 }
732 
733 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
734 {
735 	struct device_node *dn = dp->dn;
736 	struct dsa_switch *ds = dp->ds;
737 	struct phy_device *phydev;
738 	int port = dp->index;
739 	phy_interface_t mode;
740 	int err;
741 
742 	err = of_phy_register_fixed_link(dn);
743 	if (err) {
744 		dev_err(ds->dev,
745 			"failed to register the fixed PHY of port %d\n",
746 			port);
747 		return err;
748 	}
749 
750 	phydev = of_phy_find_device(dn);
751 
752 	err = of_get_phy_mode(dn, &mode);
753 	if (err)
754 		mode = PHY_INTERFACE_MODE_NA;
755 	phydev->interface = mode;
756 
757 	genphy_read_status(phydev);
758 
759 	if (ds->ops->adjust_link)
760 		ds->ops->adjust_link(ds, port, phydev);
761 
762 	put_device(&phydev->mdio.dev);
763 
764 	return 0;
765 }
766 
767 static int dsa_port_phylink_register(struct dsa_port *dp)
768 {
769 	struct dsa_switch *ds = dp->ds;
770 	struct device_node *port_dn = dp->dn;
771 	phy_interface_t mode;
772 	int err;
773 
774 	err = of_get_phy_mode(port_dn, &mode);
775 	if (err)
776 		mode = PHY_INTERFACE_MODE_NA;
777 
778 	dp->pl_config.dev = ds->dev;
779 	dp->pl_config.type = PHYLINK_DEV;
780 	dp->pl_config.pcs_poll = ds->pcs_poll;
781 
782 	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
783 				mode, &dsa_port_phylink_mac_ops);
784 	if (IS_ERR(dp->pl)) {
785 		pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
786 		return PTR_ERR(dp->pl);
787 	}
788 
789 	err = phylink_of_phy_connect(dp->pl, port_dn, 0);
790 	if (err && err != -ENODEV) {
791 		pr_err("could not attach to PHY: %d\n", err);
792 		goto err_phy_connect;
793 	}
794 
795 	return 0;
796 
797 err_phy_connect:
798 	phylink_destroy(dp->pl);
799 	return err;
800 }
801 
802 int dsa_port_link_register_of(struct dsa_port *dp)
803 {
804 	struct dsa_switch *ds = dp->ds;
805 	struct device_node *phy_np;
806 	int port = dp->index;
807 
808 	if (!ds->ops->adjust_link) {
809 		phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
810 		if (of_phy_is_fixed_link(dp->dn) || phy_np) {
811 			if (ds->ops->phylink_mac_link_down)
812 				ds->ops->phylink_mac_link_down(ds, port,
813 					MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
814 			return dsa_port_phylink_register(dp);
815 		}
816 		return 0;
817 	}
818 
819 	dev_warn(ds->dev,
820 		 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
821 
822 	if (of_phy_is_fixed_link(dp->dn))
823 		return dsa_port_fixed_link_register_of(dp);
824 	else
825 		return dsa_port_setup_phy_of(dp, true);
826 }
827 
828 void dsa_port_link_unregister_of(struct dsa_port *dp)
829 {
830 	struct dsa_switch *ds = dp->ds;
831 
832 	if (!ds->ops->adjust_link && dp->pl) {
833 		rtnl_lock();
834 		phylink_disconnect_phy(dp->pl);
835 		rtnl_unlock();
836 		phylink_destroy(dp->pl);
837 		dp->pl = NULL;
838 		return;
839 	}
840 
841 	if (of_phy_is_fixed_link(dp->dn))
842 		of_phy_deregister_fixed_link(dp->dn);
843 	else
844 		dsa_port_setup_phy_of(dp, false);
845 }
846 
847 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
848 {
849 	struct phy_device *phydev;
850 	int ret = -EOPNOTSUPP;
851 
852 	if (of_phy_is_fixed_link(dp->dn))
853 		return ret;
854 
855 	phydev = dsa_port_get_phy_device(dp);
856 	if (IS_ERR_OR_NULL(phydev))
857 		return ret;
858 
859 	ret = phy_ethtool_get_strings(phydev, data);
860 	put_device(&phydev->mdio.dev);
861 
862 	return ret;
863 }
864 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
865 
866 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
867 {
868 	struct phy_device *phydev;
869 	int ret = -EOPNOTSUPP;
870 
871 	if (of_phy_is_fixed_link(dp->dn))
872 		return ret;
873 
874 	phydev = dsa_port_get_phy_device(dp);
875 	if (IS_ERR_OR_NULL(phydev))
876 		return ret;
877 
878 	ret = phy_ethtool_get_stats(phydev, NULL, data);
879 	put_device(&phydev->mdio.dev);
880 
881 	return ret;
882 }
883 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
884 
885 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
886 {
887 	struct phy_device *phydev;
888 	int ret = -EOPNOTSUPP;
889 
890 	if (of_phy_is_fixed_link(dp->dn))
891 		return ret;
892 
893 	phydev = dsa_port_get_phy_device(dp);
894 	if (IS_ERR_OR_NULL(phydev))
895 		return ret;
896 
897 	ret = phy_ethtool_get_sset_count(phydev);
898 	put_device(&phydev->mdio.dev);
899 
900 	return ret;
901 }
902 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
903 
904 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
905 {
906 	struct dsa_notifier_hsr_info info = {
907 		.sw_index = dp->ds->index,
908 		.port = dp->index,
909 		.hsr = hsr,
910 	};
911 	int err;
912 
913 	dp->hsr_dev = hsr;
914 
915 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
916 	if (err)
917 		dp->hsr_dev = NULL;
918 
919 	return err;
920 }
921 
922 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
923 {
924 	struct dsa_notifier_hsr_info info = {
925 		.sw_index = dp->ds->index,
926 		.port = dp->index,
927 		.hsr = hsr,
928 	};
929 	int err;
930 
931 	dp->hsr_dev = NULL;
932 
933 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
934 	if (err)
935 		pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
936 }
937