xref: /linux/net/dsa/port.c (revision ec8a42e7343234802b9054874fe01810880289ce)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handling of a single switch port
4  *
5  * Copyright (c) 2017 Savoir-faire Linux Inc.
6  *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7  */
8 
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 
14 #include "dsa_priv.h"
15 
16 /**
17  * dsa_port_notify - Notify the switching fabric of changes to a port
18  * @dp: port on which change occurred
19  * @e: event, must be of type DSA_NOTIFIER_*
20  * @v: event-specific value.
21  *
22  * Notify all switches in the DSA tree that this port's switch belongs to,
23  * including this switch itself, of an event. Allows the other switches to
24  * reconfigure themselves for cross-chip operations. Can also be used to
25  * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26  * a user port's state changes.
27  */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30 	return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32 
33 int dsa_port_set_state(struct dsa_port *dp, u8 state)
34 {
35 	struct dsa_switch *ds = dp->ds;
36 	int port = dp->index;
37 
38 	if (!ds->ops->port_stp_state_set)
39 		return -EOPNOTSUPP;
40 
41 	ds->ops->port_stp_state_set(ds, port, state);
42 
43 	if (ds->ops->port_fast_age) {
44 		/* Fast age FDB entries or flush appropriate forwarding database
45 		 * for the given port, if we are moving it from Learning or
46 		 * Forwarding state, to Disabled or Blocking or Listening state.
47 		 */
48 
49 		if ((dp->stp_state == BR_STATE_LEARNING ||
50 		     dp->stp_state == BR_STATE_FORWARDING) &&
51 		    (state == BR_STATE_DISABLED ||
52 		     state == BR_STATE_BLOCKING ||
53 		     state == BR_STATE_LISTENING))
54 			ds->ops->port_fast_age(ds, port);
55 	}
56 
57 	dp->stp_state = state;
58 
59 	return 0;
60 }
61 
62 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
63 {
64 	int err;
65 
66 	err = dsa_port_set_state(dp, state);
67 	if (err)
68 		pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
69 }
70 
71 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
72 {
73 	struct dsa_switch *ds = dp->ds;
74 	int port = dp->index;
75 	int err;
76 
77 	if (ds->ops->port_enable) {
78 		err = ds->ops->port_enable(ds, port, phy);
79 		if (err)
80 			return err;
81 	}
82 
83 	if (!dp->bridge_dev)
84 		dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
85 
86 	if (dp->pl)
87 		phylink_start(dp->pl);
88 
89 	return 0;
90 }
91 
92 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
93 {
94 	int err;
95 
96 	rtnl_lock();
97 	err = dsa_port_enable_rt(dp, phy);
98 	rtnl_unlock();
99 
100 	return err;
101 }
102 
103 void dsa_port_disable_rt(struct dsa_port *dp)
104 {
105 	struct dsa_switch *ds = dp->ds;
106 	int port = dp->index;
107 
108 	if (dp->pl)
109 		phylink_stop(dp->pl);
110 
111 	if (!dp->bridge_dev)
112 		dsa_port_set_state_now(dp, BR_STATE_DISABLED);
113 
114 	if (ds->ops->port_disable)
115 		ds->ops->port_disable(ds, port);
116 }
117 
118 void dsa_port_disable(struct dsa_port *dp)
119 {
120 	rtnl_lock();
121 	dsa_port_disable_rt(dp);
122 	rtnl_unlock();
123 }
124 
125 static void dsa_port_change_brport_flags(struct dsa_port *dp,
126 					 bool bridge_offload)
127 {
128 	struct switchdev_brport_flags flags;
129 	int flag;
130 
131 	flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
132 	if (bridge_offload)
133 		flags.val = flags.mask;
134 	else
135 		flags.val = flags.mask & ~BR_LEARNING;
136 
137 	for_each_set_bit(flag, &flags.mask, 32) {
138 		struct switchdev_brport_flags tmp;
139 
140 		tmp.val = flags.val & BIT(flag);
141 		tmp.mask = BIT(flag);
142 
143 		dsa_port_bridge_flags(dp, tmp, NULL);
144 	}
145 }
146 
147 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
148 {
149 	struct dsa_notifier_bridge_info info = {
150 		.tree_index = dp->ds->dst->index,
151 		.sw_index = dp->ds->index,
152 		.port = dp->index,
153 		.br = br,
154 	};
155 	int err;
156 
157 	/* Notify the port driver to set its configurable flags in a way that
158 	 * matches the initial settings of a bridge port.
159 	 */
160 	dsa_port_change_brport_flags(dp, true);
161 
162 	/* Here the interface is already bridged. Reflect the current
163 	 * configuration so that drivers can program their chips accordingly.
164 	 */
165 	dp->bridge_dev = br;
166 
167 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
168 
169 	/* The bridging is rolled back on error */
170 	if (err) {
171 		dsa_port_change_brport_flags(dp, false);
172 		dp->bridge_dev = NULL;
173 	}
174 
175 	return err;
176 }
177 
178 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
179 {
180 	struct dsa_notifier_bridge_info info = {
181 		.tree_index = dp->ds->dst->index,
182 		.sw_index = dp->ds->index,
183 		.port = dp->index,
184 		.br = br,
185 	};
186 	int err;
187 
188 	/* Here the port is already unbridged. Reflect the current configuration
189 	 * so that drivers can program their chips accordingly.
190 	 */
191 	dp->bridge_dev = NULL;
192 
193 	err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
194 	if (err)
195 		pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
196 
197 	/* Configure the port for standalone mode (no address learning,
198 	 * flood everything).
199 	 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
200 	 * when the user requests it through netlink or sysfs, but not
201 	 * automatically at port join or leave, so we need to handle resetting
202 	 * the brport flags ourselves. But we even prefer it that way, because
203 	 * otherwise, some setups might never get the notification they need,
204 	 * for example, when a port leaves a LAG that offloads the bridge,
205 	 * it becomes standalone, but as far as the bridge is concerned, no
206 	 * port ever left.
207 	 */
208 	dsa_port_change_brport_flags(dp, false);
209 
210 	/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
211 	 * so allow it to be in BR_STATE_FORWARDING to be kept functional
212 	 */
213 	dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
214 }
215 
216 int dsa_port_lag_change(struct dsa_port *dp,
217 			struct netdev_lag_lower_state_info *linfo)
218 {
219 	struct dsa_notifier_lag_info info = {
220 		.sw_index = dp->ds->index,
221 		.port = dp->index,
222 	};
223 	bool tx_enabled;
224 
225 	if (!dp->lag_dev)
226 		return 0;
227 
228 	/* On statically configured aggregates (e.g. loadbalance
229 	 * without LACP) ports will always be tx_enabled, even if the
230 	 * link is down. Thus we require both link_up and tx_enabled
231 	 * in order to include it in the tx set.
232 	 */
233 	tx_enabled = linfo->link_up && linfo->tx_enabled;
234 
235 	if (tx_enabled == dp->lag_tx_enabled)
236 		return 0;
237 
238 	dp->lag_tx_enabled = tx_enabled;
239 
240 	return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
241 }
242 
243 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
244 		      struct netdev_lag_upper_info *uinfo)
245 {
246 	struct dsa_notifier_lag_info info = {
247 		.sw_index = dp->ds->index,
248 		.port = dp->index,
249 		.lag = lag,
250 		.info = uinfo,
251 	};
252 	int err;
253 
254 	dsa_lag_map(dp->ds->dst, lag);
255 	dp->lag_dev = lag;
256 
257 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
258 	if (err) {
259 		dp->lag_dev = NULL;
260 		dsa_lag_unmap(dp->ds->dst, lag);
261 	}
262 
263 	return err;
264 }
265 
266 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
267 {
268 	struct dsa_notifier_lag_info info = {
269 		.sw_index = dp->ds->index,
270 		.port = dp->index,
271 		.lag = lag,
272 	};
273 	int err;
274 
275 	if (!dp->lag_dev)
276 		return;
277 
278 	/* Port might have been part of a LAG that in turn was
279 	 * attached to a bridge.
280 	 */
281 	if (dp->bridge_dev)
282 		dsa_port_bridge_leave(dp, dp->bridge_dev);
283 
284 	dp->lag_tx_enabled = false;
285 	dp->lag_dev = NULL;
286 
287 	err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
288 	if (err)
289 		pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
290 		       err);
291 
292 	dsa_lag_unmap(dp->ds->dst, lag);
293 }
294 
295 /* Must be called under rcu_read_lock() */
296 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
297 					      bool vlan_filtering,
298 					      struct netlink_ext_ack *extack)
299 {
300 	struct dsa_switch *ds = dp->ds;
301 	int err, i;
302 
303 	/* VLAN awareness was off, so the question is "can we turn it on".
304 	 * We may have had 8021q uppers, those need to go. Make sure we don't
305 	 * enter an inconsistent state: deny changing the VLAN awareness state
306 	 * as long as we have 8021q uppers.
307 	 */
308 	if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
309 		struct net_device *upper_dev, *slave = dp->slave;
310 		struct net_device *br = dp->bridge_dev;
311 		struct list_head *iter;
312 
313 		netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
314 			struct bridge_vlan_info br_info;
315 			u16 vid;
316 
317 			if (!is_vlan_dev(upper_dev))
318 				continue;
319 
320 			vid = vlan_dev_vlan_id(upper_dev);
321 
322 			/* br_vlan_get_info() returns -EINVAL or -ENOENT if the
323 			 * device, respectively the VID is not found, returning
324 			 * 0 means success, which is a failure for us here.
325 			 */
326 			err = br_vlan_get_info(br, vid, &br_info);
327 			if (err == 0) {
328 				NL_SET_ERR_MSG_MOD(extack,
329 						   "Must first remove VLAN uppers having VIDs also present in bridge");
330 				return false;
331 			}
332 		}
333 	}
334 
335 	if (!ds->vlan_filtering_is_global)
336 		return true;
337 
338 	/* For cases where enabling/disabling VLAN awareness is global to the
339 	 * switch, we need to handle the case where multiple bridges span
340 	 * different ports of the same switch device and one of them has a
341 	 * different setting than what is being requested.
342 	 */
343 	for (i = 0; i < ds->num_ports; i++) {
344 		struct net_device *other_bridge;
345 
346 		other_bridge = dsa_to_port(ds, i)->bridge_dev;
347 		if (!other_bridge)
348 			continue;
349 		/* If it's the same bridge, it also has same
350 		 * vlan_filtering setting => no need to check
351 		 */
352 		if (other_bridge == dp->bridge_dev)
353 			continue;
354 		if (br_vlan_enabled(other_bridge) != vlan_filtering) {
355 			NL_SET_ERR_MSG_MOD(extack,
356 					   "VLAN filtering is a global setting");
357 			return false;
358 		}
359 	}
360 	return true;
361 }
362 
363 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
364 			    struct netlink_ext_ack *extack)
365 {
366 	struct dsa_switch *ds = dp->ds;
367 	bool apply;
368 	int err;
369 
370 	if (!ds->ops->port_vlan_filtering)
371 		return -EOPNOTSUPP;
372 
373 	/* We are called from dsa_slave_switchdev_blocking_event(),
374 	 * which is not under rcu_read_lock(), unlike
375 	 * dsa_slave_switchdev_event().
376 	 */
377 	rcu_read_lock();
378 	apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
379 	rcu_read_unlock();
380 	if (!apply)
381 		return -EINVAL;
382 
383 	if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
384 		return 0;
385 
386 	err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
387 					   extack);
388 	if (err)
389 		return err;
390 
391 	if (ds->vlan_filtering_is_global)
392 		ds->vlan_filtering = vlan_filtering;
393 	else
394 		dp->vlan_filtering = vlan_filtering;
395 
396 	return 0;
397 }
398 
399 /* This enforces legacy behavior for switch drivers which assume they can't
400  * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
401  */
402 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
403 {
404 	struct dsa_switch *ds = dp->ds;
405 
406 	if (!dp->bridge_dev)
407 		return false;
408 
409 	return (!ds->configure_vlan_while_not_filtering &&
410 		!br_vlan_enabled(dp->bridge_dev));
411 }
412 
413 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
414 {
415 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
416 	unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
417 	struct dsa_notifier_ageing_time_info info;
418 	int err;
419 
420 	info.ageing_time = ageing_time;
421 
422 	err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
423 	if (err)
424 		return err;
425 
426 	dp->ageing_time = ageing_time;
427 
428 	return 0;
429 }
430 
431 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
432 			      struct switchdev_brport_flags flags,
433 			      struct netlink_ext_ack *extack)
434 {
435 	struct dsa_switch *ds = dp->ds;
436 
437 	if (!ds->ops->port_pre_bridge_flags)
438 		return -EINVAL;
439 
440 	return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
441 }
442 
443 int dsa_port_bridge_flags(const struct dsa_port *dp,
444 			  struct switchdev_brport_flags flags,
445 			  struct netlink_ext_ack *extack)
446 {
447 	struct dsa_switch *ds = dp->ds;
448 
449 	if (!ds->ops->port_bridge_flags)
450 		return -EINVAL;
451 
452 	return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
453 }
454 
455 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
456 		     struct netlink_ext_ack *extack)
457 {
458 	struct dsa_switch *ds = dp->ds;
459 
460 	if (!ds->ops->port_set_mrouter)
461 		return -EOPNOTSUPP;
462 
463 	return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
464 }
465 
466 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
467 			bool propagate_upstream)
468 {
469 	struct dsa_notifier_mtu_info info = {
470 		.sw_index = dp->ds->index,
471 		.propagate_upstream = propagate_upstream,
472 		.port = dp->index,
473 		.mtu = new_mtu,
474 	};
475 
476 	return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
477 }
478 
479 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
480 		     u16 vid)
481 {
482 	struct dsa_notifier_fdb_info info = {
483 		.sw_index = dp->ds->index,
484 		.port = dp->index,
485 		.addr = addr,
486 		.vid = vid,
487 	};
488 
489 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
490 }
491 
492 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
493 		     u16 vid)
494 {
495 	struct dsa_notifier_fdb_info info = {
496 		.sw_index = dp->ds->index,
497 		.port = dp->index,
498 		.addr = addr,
499 		.vid = vid,
500 
501 	};
502 
503 	return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
504 }
505 
506 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
507 {
508 	struct dsa_switch *ds = dp->ds;
509 	int port = dp->index;
510 
511 	if (!ds->ops->port_fdb_dump)
512 		return -EOPNOTSUPP;
513 
514 	return ds->ops->port_fdb_dump(ds, port, cb, data);
515 }
516 
517 int dsa_port_mdb_add(const struct dsa_port *dp,
518 		     const struct switchdev_obj_port_mdb *mdb)
519 {
520 	struct dsa_notifier_mdb_info info = {
521 		.sw_index = dp->ds->index,
522 		.port = dp->index,
523 		.mdb = mdb,
524 	};
525 
526 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
527 }
528 
529 int dsa_port_mdb_del(const struct dsa_port *dp,
530 		     const struct switchdev_obj_port_mdb *mdb)
531 {
532 	struct dsa_notifier_mdb_info info = {
533 		.sw_index = dp->ds->index,
534 		.port = dp->index,
535 		.mdb = mdb,
536 	};
537 
538 	return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
539 }
540 
541 int dsa_port_vlan_add(struct dsa_port *dp,
542 		      const struct switchdev_obj_port_vlan *vlan,
543 		      struct netlink_ext_ack *extack)
544 {
545 	struct dsa_notifier_vlan_info info = {
546 		.sw_index = dp->ds->index,
547 		.port = dp->index,
548 		.vlan = vlan,
549 		.extack = extack,
550 	};
551 
552 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
553 }
554 
555 int dsa_port_vlan_del(struct dsa_port *dp,
556 		      const struct switchdev_obj_port_vlan *vlan)
557 {
558 	struct dsa_notifier_vlan_info info = {
559 		.sw_index = dp->ds->index,
560 		.port = dp->index,
561 		.vlan = vlan,
562 	};
563 
564 	return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
565 }
566 
567 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
568 			       const struct dsa_device_ops *tag_ops)
569 {
570 	cpu_dp->filter = tag_ops->filter;
571 	cpu_dp->rcv = tag_ops->rcv;
572 	cpu_dp->tag_ops = tag_ops;
573 }
574 
575 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
576 {
577 	struct device_node *phy_dn;
578 	struct phy_device *phydev;
579 
580 	phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
581 	if (!phy_dn)
582 		return NULL;
583 
584 	phydev = of_phy_find_device(phy_dn);
585 	if (!phydev) {
586 		of_node_put(phy_dn);
587 		return ERR_PTR(-EPROBE_DEFER);
588 	}
589 
590 	of_node_put(phy_dn);
591 	return phydev;
592 }
593 
594 static void dsa_port_phylink_validate(struct phylink_config *config,
595 				      unsigned long *supported,
596 				      struct phylink_link_state *state)
597 {
598 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
599 	struct dsa_switch *ds = dp->ds;
600 
601 	if (!ds->ops->phylink_validate)
602 		return;
603 
604 	ds->ops->phylink_validate(ds, dp->index, supported, state);
605 }
606 
607 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
608 					       struct phylink_link_state *state)
609 {
610 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
611 	struct dsa_switch *ds = dp->ds;
612 	int err;
613 
614 	/* Only called for inband modes */
615 	if (!ds->ops->phylink_mac_link_state) {
616 		state->link = 0;
617 		return;
618 	}
619 
620 	err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
621 	if (err < 0) {
622 		dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
623 			dp->index, err);
624 		state->link = 0;
625 	}
626 }
627 
628 static void dsa_port_phylink_mac_config(struct phylink_config *config,
629 					unsigned int mode,
630 					const struct phylink_link_state *state)
631 {
632 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
633 	struct dsa_switch *ds = dp->ds;
634 
635 	if (!ds->ops->phylink_mac_config)
636 		return;
637 
638 	ds->ops->phylink_mac_config(ds, dp->index, mode, state);
639 }
640 
641 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
642 {
643 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
644 	struct dsa_switch *ds = dp->ds;
645 
646 	if (!ds->ops->phylink_mac_an_restart)
647 		return;
648 
649 	ds->ops->phylink_mac_an_restart(ds, dp->index);
650 }
651 
652 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
653 					   unsigned int mode,
654 					   phy_interface_t interface)
655 {
656 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
657 	struct phy_device *phydev = NULL;
658 	struct dsa_switch *ds = dp->ds;
659 
660 	if (dsa_is_user_port(ds, dp->index))
661 		phydev = dp->slave->phydev;
662 
663 	if (!ds->ops->phylink_mac_link_down) {
664 		if (ds->ops->adjust_link && phydev)
665 			ds->ops->adjust_link(ds, dp->index, phydev);
666 		return;
667 	}
668 
669 	ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
670 }
671 
672 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
673 					 struct phy_device *phydev,
674 					 unsigned int mode,
675 					 phy_interface_t interface,
676 					 int speed, int duplex,
677 					 bool tx_pause, bool rx_pause)
678 {
679 	struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
680 	struct dsa_switch *ds = dp->ds;
681 
682 	if (!ds->ops->phylink_mac_link_up) {
683 		if (ds->ops->adjust_link && phydev)
684 			ds->ops->adjust_link(ds, dp->index, phydev);
685 		return;
686 	}
687 
688 	ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
689 				     speed, duplex, tx_pause, rx_pause);
690 }
691 
692 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
693 	.validate = dsa_port_phylink_validate,
694 	.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
695 	.mac_config = dsa_port_phylink_mac_config,
696 	.mac_an_restart = dsa_port_phylink_mac_an_restart,
697 	.mac_link_down = dsa_port_phylink_mac_link_down,
698 	.mac_link_up = dsa_port_phylink_mac_link_up,
699 };
700 
701 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
702 {
703 	struct dsa_switch *ds = dp->ds;
704 	struct phy_device *phydev;
705 	int port = dp->index;
706 	int err = 0;
707 
708 	phydev = dsa_port_get_phy_device(dp);
709 	if (!phydev)
710 		return 0;
711 
712 	if (IS_ERR(phydev))
713 		return PTR_ERR(phydev);
714 
715 	if (enable) {
716 		err = genphy_resume(phydev);
717 		if (err < 0)
718 			goto err_put_dev;
719 
720 		err = genphy_read_status(phydev);
721 		if (err < 0)
722 			goto err_put_dev;
723 	} else {
724 		err = genphy_suspend(phydev);
725 		if (err < 0)
726 			goto err_put_dev;
727 	}
728 
729 	if (ds->ops->adjust_link)
730 		ds->ops->adjust_link(ds, port, phydev);
731 
732 	dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
733 
734 err_put_dev:
735 	put_device(&phydev->mdio.dev);
736 	return err;
737 }
738 
739 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
740 {
741 	struct device_node *dn = dp->dn;
742 	struct dsa_switch *ds = dp->ds;
743 	struct phy_device *phydev;
744 	int port = dp->index;
745 	phy_interface_t mode;
746 	int err;
747 
748 	err = of_phy_register_fixed_link(dn);
749 	if (err) {
750 		dev_err(ds->dev,
751 			"failed to register the fixed PHY of port %d\n",
752 			port);
753 		return err;
754 	}
755 
756 	phydev = of_phy_find_device(dn);
757 
758 	err = of_get_phy_mode(dn, &mode);
759 	if (err)
760 		mode = PHY_INTERFACE_MODE_NA;
761 	phydev->interface = mode;
762 
763 	genphy_read_status(phydev);
764 
765 	if (ds->ops->adjust_link)
766 		ds->ops->adjust_link(ds, port, phydev);
767 
768 	put_device(&phydev->mdio.dev);
769 
770 	return 0;
771 }
772 
773 static int dsa_port_phylink_register(struct dsa_port *dp)
774 {
775 	struct dsa_switch *ds = dp->ds;
776 	struct device_node *port_dn = dp->dn;
777 	phy_interface_t mode;
778 	int err;
779 
780 	err = of_get_phy_mode(port_dn, &mode);
781 	if (err)
782 		mode = PHY_INTERFACE_MODE_NA;
783 
784 	dp->pl_config.dev = ds->dev;
785 	dp->pl_config.type = PHYLINK_DEV;
786 	dp->pl_config.pcs_poll = ds->pcs_poll;
787 
788 	dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
789 				mode, &dsa_port_phylink_mac_ops);
790 	if (IS_ERR(dp->pl)) {
791 		pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
792 		return PTR_ERR(dp->pl);
793 	}
794 
795 	err = phylink_of_phy_connect(dp->pl, port_dn, 0);
796 	if (err && err != -ENODEV) {
797 		pr_err("could not attach to PHY: %d\n", err);
798 		goto err_phy_connect;
799 	}
800 
801 	return 0;
802 
803 err_phy_connect:
804 	phylink_destroy(dp->pl);
805 	return err;
806 }
807 
808 int dsa_port_link_register_of(struct dsa_port *dp)
809 {
810 	struct dsa_switch *ds = dp->ds;
811 	struct device_node *phy_np;
812 	int port = dp->index;
813 
814 	if (!ds->ops->adjust_link) {
815 		phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
816 		if (of_phy_is_fixed_link(dp->dn) || phy_np) {
817 			if (ds->ops->phylink_mac_link_down)
818 				ds->ops->phylink_mac_link_down(ds, port,
819 					MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
820 			return dsa_port_phylink_register(dp);
821 		}
822 		return 0;
823 	}
824 
825 	dev_warn(ds->dev,
826 		 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
827 
828 	if (of_phy_is_fixed_link(dp->dn))
829 		return dsa_port_fixed_link_register_of(dp);
830 	else
831 		return dsa_port_setup_phy_of(dp, true);
832 }
833 
834 void dsa_port_link_unregister_of(struct dsa_port *dp)
835 {
836 	struct dsa_switch *ds = dp->ds;
837 
838 	if (!ds->ops->adjust_link && dp->pl) {
839 		rtnl_lock();
840 		phylink_disconnect_phy(dp->pl);
841 		rtnl_unlock();
842 		phylink_destroy(dp->pl);
843 		dp->pl = NULL;
844 		return;
845 	}
846 
847 	if (of_phy_is_fixed_link(dp->dn))
848 		of_phy_deregister_fixed_link(dp->dn);
849 	else
850 		dsa_port_setup_phy_of(dp, false);
851 }
852 
853 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
854 {
855 	struct phy_device *phydev;
856 	int ret = -EOPNOTSUPP;
857 
858 	if (of_phy_is_fixed_link(dp->dn))
859 		return ret;
860 
861 	phydev = dsa_port_get_phy_device(dp);
862 	if (IS_ERR_OR_NULL(phydev))
863 		return ret;
864 
865 	ret = phy_ethtool_get_strings(phydev, data);
866 	put_device(&phydev->mdio.dev);
867 
868 	return ret;
869 }
870 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
871 
872 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
873 {
874 	struct phy_device *phydev;
875 	int ret = -EOPNOTSUPP;
876 
877 	if (of_phy_is_fixed_link(dp->dn))
878 		return ret;
879 
880 	phydev = dsa_port_get_phy_device(dp);
881 	if (IS_ERR_OR_NULL(phydev))
882 		return ret;
883 
884 	ret = phy_ethtool_get_stats(phydev, NULL, data);
885 	put_device(&phydev->mdio.dev);
886 
887 	return ret;
888 }
889 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
890 
891 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
892 {
893 	struct phy_device *phydev;
894 	int ret = -EOPNOTSUPP;
895 
896 	if (of_phy_is_fixed_link(dp->dn))
897 		return ret;
898 
899 	phydev = dsa_port_get_phy_device(dp);
900 	if (IS_ERR_OR_NULL(phydev))
901 		return ret;
902 
903 	ret = phy_ethtool_get_sset_count(phydev);
904 	put_device(&phydev->mdio.dev);
905 
906 	return ret;
907 }
908 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
909 
910 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
911 {
912 	struct dsa_notifier_hsr_info info = {
913 		.sw_index = dp->ds->index,
914 		.port = dp->index,
915 		.hsr = hsr,
916 	};
917 	int err;
918 
919 	dp->hsr_dev = hsr;
920 
921 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
922 	if (err)
923 		dp->hsr_dev = NULL;
924 
925 	return err;
926 }
927 
928 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
929 {
930 	struct dsa_notifier_hsr_info info = {
931 		.sw_index = dp->ds->index,
932 		.port = dp->index,
933 		.hsr = hsr,
934 	};
935 	int err;
936 
937 	dp->hsr_dev = NULL;
938 
939 	err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
940 	if (err)
941 		pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
942 }
943