xref: /linux/drivers/net/dsa/rzn1_a5psw.c (revision b50ecc5aca4d18f1f0c4942f5c797bc85edef144)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2022 Schneider-Electric
4  *
5  * Clément Léger <clement.leger@bootlin.com>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/etherdevice.h>
10 #include <linux/if_bridge.h>
11 #include <linux/if_ether.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_mdio.h>
16 #include <net/dsa.h>
17 
18 #include "rzn1_a5psw.h"
19 
20 struct a5psw_stats {
21 	u16 offset;
22 	const char name[ETH_GSTRING_LEN];
23 };
24 
25 #define STAT_DESC(_offset) {	\
26 	.offset = A5PSW_##_offset,	\
27 	.name = __stringify(_offset),	\
28 }
29 
30 static const struct a5psw_stats a5psw_stats[] = {
31 	STAT_DESC(aFramesTransmittedOK),
32 	STAT_DESC(aFramesReceivedOK),
33 	STAT_DESC(aFrameCheckSequenceErrors),
34 	STAT_DESC(aAlignmentErrors),
35 	STAT_DESC(aOctetsTransmittedOK),
36 	STAT_DESC(aOctetsReceivedOK),
37 	STAT_DESC(aTxPAUSEMACCtrlFrames),
38 	STAT_DESC(aRxPAUSEMACCtrlFrames),
39 	STAT_DESC(ifInErrors),
40 	STAT_DESC(ifOutErrors),
41 	STAT_DESC(ifInUcastPkts),
42 	STAT_DESC(ifInMulticastPkts),
43 	STAT_DESC(ifInBroadcastPkts),
44 	STAT_DESC(ifOutDiscards),
45 	STAT_DESC(ifOutUcastPkts),
46 	STAT_DESC(ifOutMulticastPkts),
47 	STAT_DESC(ifOutBroadcastPkts),
48 	STAT_DESC(etherStatsDropEvents),
49 	STAT_DESC(etherStatsOctets),
50 	STAT_DESC(etherStatsPkts),
51 	STAT_DESC(etherStatsUndersizePkts),
52 	STAT_DESC(etherStatsOversizePkts),
53 	STAT_DESC(etherStatsPkts64Octets),
54 	STAT_DESC(etherStatsPkts65to127Octets),
55 	STAT_DESC(etherStatsPkts128to255Octets),
56 	STAT_DESC(etherStatsPkts256to511Octets),
57 	STAT_DESC(etherStatsPkts1024to1518Octets),
58 	STAT_DESC(etherStatsPkts1519toXOctets),
59 	STAT_DESC(etherStatsJabbers),
60 	STAT_DESC(etherStatsFragments),
61 	STAT_DESC(VLANReceived),
62 	STAT_DESC(VLANTransmitted),
63 	STAT_DESC(aDeferred),
64 	STAT_DESC(aMultipleCollisions),
65 	STAT_DESC(aSingleCollisions),
66 	STAT_DESC(aLateCollisions),
67 	STAT_DESC(aExcessiveCollisions),
68 	STAT_DESC(aCarrierSenseErrors),
69 };
70 
71 static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
72 {
73 	writel(value, a5psw->base + offset);
74 }
75 
76 static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
77 {
78 	return readl(a5psw->base + offset);
79 }
80 
81 static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
82 {
83 	u32 reg;
84 
85 	spin_lock(&a5psw->reg_lock);
86 
87 	reg = a5psw_reg_readl(a5psw, offset);
88 	reg &= ~mask;
89 	reg |= val;
90 	a5psw_reg_writel(a5psw, offset, reg);
91 
92 	spin_unlock(&a5psw->reg_lock);
93 }
94 
95 static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
96 						    int port,
97 						    enum dsa_tag_protocol mp)
98 {
99 	return DSA_TAG_PROTO_RZN1_A5PSW;
100 }
101 
102 static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
103 				   bool enable)
104 {
105 	u32 rx_match = 0;
106 
107 	if (enable)
108 		rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
109 
110 	a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
111 		      A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
112 }
113 
114 static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
115 {
116 	/* Enable "management forward" pattern matching, this will forward
117 	 * packets from this port only towards the management port and thus
118 	 * isolate the port.
119 	 */
120 	a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
121 }
122 
123 static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
124 {
125 	u32 mask = A5PSW_PORT_ENA_TX(port);
126 	u32 reg = enable ? mask : 0;
127 
128 	/* Even though the port TX is disabled through TXENA bit in the
129 	 * PORT_ENA register, it can still send BPDUs. This depends on the tag
130 	 * configuration added when sending packets from the CPU port to the
131 	 * switch port. Indeed, when using forced forwarding without filtering,
132 	 * even disabled ports will be able to send packets that are tagged.
133 	 * This allows to implement STP support when ports are in a state where
134 	 * forwarding traffic should be stopped but BPDUs should still be sent.
135 	 */
136 	a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
137 }
138 
139 static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
140 {
141 	u32 port_ena = 0;
142 
143 	if (enable)
144 		port_ena |= A5PSW_PORT_ENA_TX_RX(port);
145 
146 	a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
147 		      port_ena);
148 }
149 
150 static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
151 {
152 	int ret;
153 
154 	a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
155 
156 	ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
157 				 !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
158 				 A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
159 	if (ret)
160 		dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
161 
162 	return ret;
163 }
164 
165 static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
166 {
167 	u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
168 
169 	mutex_lock(&a5psw->lk_lock);
170 	a5psw_lk_execute_ctrl(a5psw, &ctrl);
171 	mutex_unlock(&a5psw->lk_lock);
172 }
173 
174 static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
175 				     bool authorize)
176 {
177 	u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
178 
179 	if (authorize)
180 		reg |= A5PSW_AUTH_PORT_AUTHORIZED;
181 	else
182 		reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
183 
184 	a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
185 }
186 
187 static void a5psw_port_disable(struct dsa_switch *ds, int port)
188 {
189 	struct a5psw *a5psw = ds->priv;
190 
191 	a5psw_port_authorize_set(a5psw, port, false);
192 	a5psw_port_enable_set(a5psw, port, false);
193 }
194 
195 static int a5psw_port_enable(struct dsa_switch *ds, int port,
196 			     struct phy_device *phy)
197 {
198 	struct a5psw *a5psw = ds->priv;
199 
200 	a5psw_port_authorize_set(a5psw, port, true);
201 	a5psw_port_enable_set(a5psw, port, true);
202 
203 	return 0;
204 }
205 
206 static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
207 {
208 	struct a5psw *a5psw = ds->priv;
209 
210 	new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
211 	a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
212 
213 	return 0;
214 }
215 
216 static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
217 {
218 	return A5PSW_MAX_MTU;
219 }
220 
221 static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
222 				   struct phylink_config *config)
223 {
224 	unsigned long *intf = config->supported_interfaces;
225 
226 	config->mac_capabilities = MAC_1000FD;
227 
228 	if (dsa_is_cpu_port(ds, port)) {
229 		/* GMII is used internally and GMAC2 is connected to the switch
230 		 * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
231 		 */
232 		__set_bit(PHY_INTERFACE_MODE_GMII, intf);
233 	} else {
234 		config->mac_capabilities |= MAC_100 | MAC_10;
235 		phy_interface_set_rgmii(intf);
236 		__set_bit(PHY_INTERFACE_MODE_RMII, intf);
237 		__set_bit(PHY_INTERFACE_MODE_MII, intf);
238 	}
239 }
240 
241 static struct phylink_pcs *
242 a5psw_phylink_mac_select_pcs(struct phylink_config *config,
243 			     phy_interface_t interface)
244 {
245 	struct dsa_port *dp = dsa_phylink_to_port(config);
246 	struct a5psw *a5psw = dp->ds->priv;
247 
248 	if (dsa_port_is_cpu(dp))
249 		return NULL;
250 
251 	return a5psw->pcs[dp->index];
252 }
253 
254 static void a5psw_phylink_mac_config(struct phylink_config *config,
255 				     unsigned int mode,
256 				     const struct phylink_link_state *state)
257 {
258 }
259 
260 static void a5psw_phylink_mac_link_down(struct phylink_config *config,
261 					unsigned int mode,
262 					phy_interface_t interface)
263 {
264 	struct dsa_port *dp = dsa_phylink_to_port(config);
265 	struct a5psw *a5psw = dp->ds->priv;
266 	int port = dp->index;
267 	u32 cmd_cfg;
268 
269 	cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
270 	cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
271 	a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
272 }
273 
274 static void a5psw_phylink_mac_link_up(struct phylink_config *config,
275 				      struct phy_device *phydev,
276 				      unsigned int mode,
277 				      phy_interface_t interface,
278 				      int speed, int duplex, bool tx_pause,
279 				      bool rx_pause)
280 {
281 	u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
282 		      A5PSW_CMD_CFG_TX_CRC_APPEND;
283 	struct dsa_port *dp = dsa_phylink_to_port(config);
284 	struct a5psw *a5psw = dp->ds->priv;
285 
286 	if (speed == SPEED_1000)
287 		cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
288 
289 	if (duplex == DUPLEX_HALF)
290 		cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
291 
292 	cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
293 
294 	if (!rx_pause)
295 		cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
296 
297 	a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(dp->index), cmd_cfg);
298 }
299 
300 static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
301 {
302 	struct a5psw *a5psw = ds->priv;
303 	unsigned long rate;
304 	u64 max, tmp;
305 	u32 agetime;
306 
307 	rate = clk_get_rate(a5psw->clk);
308 	max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
309 		       rate) * 1000;
310 	if (msecs > max)
311 		return -EINVAL;
312 
313 	tmp = div_u64(rate, MSEC_PER_SEC);
314 	agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
315 
316 	a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
317 
318 	return 0;
319 }
320 
321 static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
322 {
323 	u32 mask = A5PSW_INPUT_LEARN_DIS(port);
324 	u32 reg = !learn ? mask : 0;
325 
326 	a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
327 }
328 
329 static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
330 {
331 	u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
332 	u32 reg = block ? mask : 0;
333 
334 	a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
335 }
336 
337 static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
338 					  bool set)
339 {
340 	u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
341 			A5PSW_MCAST_DEF_MASK};
342 	int i;
343 
344 	for (i = 0; i < ARRAY_SIZE(offsets); i++)
345 		a5psw_reg_rmw(a5psw, offsets[i], BIT(port),
346 			      set ? BIT(port) : 0);
347 }
348 
349 static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
350 				      bool standalone)
351 {
352 	a5psw_port_learning_set(a5psw, port, !standalone);
353 	a5psw_flooding_set_resolution(a5psw, port, !standalone);
354 	a5psw_port_mgmtfwd_set(a5psw, port, standalone);
355 }
356 
357 static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
358 				  struct dsa_bridge bridge,
359 				  bool *tx_fwd_offload,
360 				  struct netlink_ext_ack *extack)
361 {
362 	struct a5psw *a5psw = ds->priv;
363 
364 	/* We only support 1 bridge device */
365 	if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
366 		NL_SET_ERR_MSG_MOD(extack,
367 				   "Forwarding offload supported for a single bridge");
368 		return -EOPNOTSUPP;
369 	}
370 
371 	a5psw->br_dev = bridge.dev;
372 	a5psw_port_set_standalone(a5psw, port, false);
373 
374 	a5psw->bridged_ports |= BIT(port);
375 
376 	return 0;
377 }
378 
379 static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
380 				    struct dsa_bridge bridge)
381 {
382 	struct a5psw *a5psw = ds->priv;
383 
384 	a5psw->bridged_ports &= ~BIT(port);
385 
386 	a5psw_port_set_standalone(a5psw, port, true);
387 
388 	/* No more ports bridged */
389 	if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
390 		a5psw->br_dev = NULL;
391 }
392 
393 static int a5psw_port_pre_bridge_flags(struct dsa_switch *ds, int port,
394 				       struct switchdev_brport_flags flags,
395 				       struct netlink_ext_ack *extack)
396 {
397 	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
398 			   BR_BCAST_FLOOD))
399 		return -EINVAL;
400 
401 	return 0;
402 }
403 
404 static int
405 a5psw_port_bridge_flags(struct dsa_switch *ds, int port,
406 			struct switchdev_brport_flags flags,
407 			struct netlink_ext_ack *extack)
408 {
409 	struct a5psw *a5psw = ds->priv;
410 	u32 val;
411 
412 	/* If a port is set as standalone, we do not want to be able to
413 	 * configure flooding nor learning which would result in joining the
414 	 * unique bridge. This can happen when a port leaves the bridge, in
415 	 * which case the DSA core will try to "clear" all flags for the
416 	 * standalone port (ie enable flooding, disable learning). In that case
417 	 * do not fail but do not apply the flags.
418 	 */
419 	if (!(a5psw->bridged_ports & BIT(port)))
420 		return 0;
421 
422 	if (flags.mask & BR_LEARNING) {
423 		val = flags.val & BR_LEARNING ? 0 : A5PSW_INPUT_LEARN_DIS(port);
424 		a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN,
425 			      A5PSW_INPUT_LEARN_DIS(port), val);
426 	}
427 
428 	if (flags.mask & BR_FLOOD) {
429 		val = flags.val & BR_FLOOD ? BIT(port) : 0;
430 		a5psw_reg_rmw(a5psw, A5PSW_UCAST_DEF_MASK, BIT(port), val);
431 	}
432 
433 	if (flags.mask & BR_MCAST_FLOOD) {
434 		val = flags.val & BR_MCAST_FLOOD ? BIT(port) : 0;
435 		a5psw_reg_rmw(a5psw, A5PSW_MCAST_DEF_MASK, BIT(port), val);
436 	}
437 
438 	if (flags.mask & BR_BCAST_FLOOD) {
439 		val = flags.val & BR_BCAST_FLOOD ? BIT(port) : 0;
440 		a5psw_reg_rmw(a5psw, A5PSW_BCAST_DEF_MASK, BIT(port), val);
441 	}
442 
443 	return 0;
444 }
445 
446 static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
447 {
448 	bool learning_enabled, rx_enabled, tx_enabled;
449 	struct dsa_port *dp = dsa_to_port(ds, port);
450 	struct a5psw *a5psw = ds->priv;
451 
452 	switch (state) {
453 	case BR_STATE_DISABLED:
454 	case BR_STATE_BLOCKING:
455 	case BR_STATE_LISTENING:
456 		rx_enabled = false;
457 		tx_enabled = false;
458 		learning_enabled = false;
459 		break;
460 	case BR_STATE_LEARNING:
461 		rx_enabled = false;
462 		tx_enabled = false;
463 		learning_enabled = dp->learning;
464 		break;
465 	case BR_STATE_FORWARDING:
466 		rx_enabled = true;
467 		tx_enabled = true;
468 		learning_enabled = dp->learning;
469 		break;
470 	default:
471 		dev_err(ds->dev, "invalid STP state: %d\n", state);
472 		return;
473 	}
474 
475 	a5psw_port_learning_set(a5psw, port, learning_enabled);
476 	a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
477 	a5psw_port_tx_enable(a5psw, port, tx_enabled);
478 }
479 
480 static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
481 {
482 	struct a5psw *a5psw = ds->priv;
483 
484 	a5psw_port_fdb_flush(a5psw, port);
485 }
486 
487 static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
488 				   u16 *entry)
489 {
490 	u32 ctrl;
491 	int ret;
492 
493 	a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
494 	a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
495 
496 	ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
497 	ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
498 	if (ret)
499 		return ret;
500 
501 	*entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
502 
503 	return 0;
504 }
505 
506 static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
507 			      const unsigned char *addr, u16 vid,
508 			      struct dsa_db db)
509 {
510 	struct a5psw *a5psw = ds->priv;
511 	union lk_data lk_data = {0};
512 	bool inc_learncount = false;
513 	int ret = 0;
514 	u16 entry;
515 	u32 reg;
516 
517 	ether_addr_copy(lk_data.entry.mac, addr);
518 	lk_data.entry.port_mask = BIT(port);
519 
520 	mutex_lock(&a5psw->lk_lock);
521 
522 	/* Set the value to be written in the lookup table */
523 	ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
524 	if (ret)
525 		goto lk_unlock;
526 
527 	lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
528 	if (!lk_data.entry.valid) {
529 		inc_learncount = true;
530 		/* port_mask set to 0x1f when entry is not valid, clear it */
531 		lk_data.entry.port_mask = 0;
532 		lk_data.entry.prio = 0;
533 	}
534 
535 	lk_data.entry.port_mask |= BIT(port);
536 	lk_data.entry.is_static = 1;
537 	lk_data.entry.valid = 1;
538 
539 	a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
540 
541 	reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
542 	ret = a5psw_lk_execute_ctrl(a5psw, &reg);
543 	if (ret)
544 		goto lk_unlock;
545 
546 	if (inc_learncount) {
547 		reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
548 		a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
549 	}
550 
551 lk_unlock:
552 	mutex_unlock(&a5psw->lk_lock);
553 
554 	return ret;
555 }
556 
557 static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
558 			      const unsigned char *addr, u16 vid,
559 			      struct dsa_db db)
560 {
561 	struct a5psw *a5psw = ds->priv;
562 	union lk_data lk_data = {0};
563 	bool clear = false;
564 	u16 entry;
565 	u32 reg;
566 	int ret;
567 
568 	ether_addr_copy(lk_data.entry.mac, addr);
569 
570 	mutex_lock(&a5psw->lk_lock);
571 
572 	ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
573 	if (ret)
574 		goto lk_unlock;
575 
576 	lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
577 
578 	/* Our hardware does not associate any VID to the FDB entries so this
579 	 * means that if two entries were added for the same mac but for
580 	 * different VID, then, on the deletion of the first one, we would also
581 	 * delete the second one. Since there is unfortunately nothing we can do
582 	 * about that, do not return an error...
583 	 */
584 	if (!lk_data.entry.valid)
585 		goto lk_unlock;
586 
587 	lk_data.entry.port_mask &= ~BIT(port);
588 	/* If there is no more port in the mask, clear the entry */
589 	if (lk_data.entry.port_mask == 0)
590 		clear = true;
591 
592 	a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
593 
594 	reg = entry;
595 	if (clear)
596 		reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
597 	else
598 		reg |= A5PSW_LK_ADDR_CTRL_WRITE;
599 
600 	ret = a5psw_lk_execute_ctrl(a5psw, &reg);
601 	if (ret)
602 		goto lk_unlock;
603 
604 	/* Decrement LEARNCOUNT */
605 	if (clear) {
606 		reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
607 		a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
608 	}
609 
610 lk_unlock:
611 	mutex_unlock(&a5psw->lk_lock);
612 
613 	return ret;
614 }
615 
616 static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
617 			       dsa_fdb_dump_cb_t *cb, void *data)
618 {
619 	struct a5psw *a5psw = ds->priv;
620 	union lk_data lk_data;
621 	int i = 0, ret = 0;
622 	u32 reg;
623 
624 	mutex_lock(&a5psw->lk_lock);
625 
626 	for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
627 		reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
628 
629 		ret = a5psw_lk_execute_ctrl(a5psw, &reg);
630 		if (ret)
631 			goto out_unlock;
632 
633 		lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
634 		/* If entry is not valid or does not contain the port, skip */
635 		if (!lk_data.entry.valid ||
636 		    !(lk_data.entry.port_mask & BIT(port)))
637 			continue;
638 
639 		lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
640 
641 		ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
642 		if (ret)
643 			goto out_unlock;
644 	}
645 
646 out_unlock:
647 	mutex_unlock(&a5psw->lk_lock);
648 
649 	return ret;
650 }
651 
652 static int a5psw_port_vlan_filtering(struct dsa_switch *ds, int port,
653 				     bool vlan_filtering,
654 				     struct netlink_ext_ack *extack)
655 {
656 	u32 mask = BIT(port + A5PSW_VLAN_VERI_SHIFT) |
657 		   BIT(port + A5PSW_VLAN_DISC_SHIFT);
658 	u32 val = vlan_filtering ? mask : 0;
659 	struct a5psw *a5psw = ds->priv;
660 
661 	/* Disable/enable vlan tagging */
662 	a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE_ENA, BIT(port),
663 		      vlan_filtering ? BIT(port) : 0);
664 
665 	/* Disable/enable vlan input filtering */
666 	a5psw_reg_rmw(a5psw, A5PSW_VLAN_VERIFY, mask, val);
667 
668 	return 0;
669 }
670 
671 static int a5psw_find_vlan_entry(struct a5psw *a5psw, u16 vid)
672 {
673 	u32 vlan_res;
674 	int i;
675 
676 	/* Find vlan for this port */
677 	for (i = 0; i < A5PSW_VLAN_COUNT; i++) {
678 		vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i));
679 		if (FIELD_GET(A5PSW_VLAN_RES_VLANID, vlan_res) == vid)
680 			return i;
681 	}
682 
683 	return -1;
684 }
685 
686 static int a5psw_new_vlan_res_entry(struct a5psw *a5psw, u16 newvid)
687 {
688 	u32 vlan_res;
689 	int i;
690 
691 	/* Find a free VLAN entry */
692 	for (i = 0; i < A5PSW_VLAN_COUNT; i++) {
693 		vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i));
694 		if (!(FIELD_GET(A5PSW_VLAN_RES_PORTMASK, vlan_res))) {
695 			vlan_res = FIELD_PREP(A5PSW_VLAN_RES_VLANID, newvid);
696 			a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(i), vlan_res);
697 			return i;
698 		}
699 	}
700 
701 	return -1;
702 }
703 
704 static void a5psw_port_vlan_tagged_cfg(struct a5psw *a5psw,
705 				       unsigned int vlan_res_id, int port,
706 				       bool set)
707 {
708 	u32 mask = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_RD_TAGMASK |
709 		   BIT(port);
710 	u32 vlan_res_off = A5PSW_VLAN_RES(vlan_res_id);
711 	u32 val = A5PSW_VLAN_RES_WR_TAGMASK, reg;
712 
713 	if (set)
714 		val |= BIT(port);
715 
716 	/* Toggle tag mask read */
717 	a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK);
718 	reg = a5psw_reg_readl(a5psw, vlan_res_off);
719 	a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK);
720 
721 	reg &= ~mask;
722 	reg |= val;
723 	a5psw_reg_writel(a5psw, vlan_res_off, reg);
724 }
725 
726 static void a5psw_port_vlan_cfg(struct a5psw *a5psw, unsigned int vlan_res_id,
727 				int port, bool set)
728 {
729 	u32 mask = A5PSW_VLAN_RES_WR_TAGMASK | BIT(port);
730 	u32 reg = A5PSW_VLAN_RES_WR_PORTMASK;
731 
732 	if (set)
733 		reg |= BIT(port);
734 
735 	a5psw_reg_rmw(a5psw, A5PSW_VLAN_RES(vlan_res_id), mask, reg);
736 }
737 
738 static int a5psw_port_vlan_add(struct dsa_switch *ds, int port,
739 			       const struct switchdev_obj_port_vlan *vlan,
740 			       struct netlink_ext_ack *extack)
741 {
742 	bool tagged = !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
743 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
744 	struct a5psw *a5psw = ds->priv;
745 	u16 vid = vlan->vid;
746 	int vlan_res_id;
747 
748 	vlan_res_id = a5psw_find_vlan_entry(a5psw, vid);
749 	if (vlan_res_id < 0) {
750 		vlan_res_id = a5psw_new_vlan_res_entry(a5psw, vid);
751 		if (vlan_res_id < 0)
752 			return -ENOSPC;
753 	}
754 
755 	a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, true);
756 	if (tagged)
757 		a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, true);
758 
759 	/* Configure port to tag with corresponding VID, but do not enable it
760 	 * yet: wait for vlan filtering to be enabled to enable vlan port
761 	 * tagging
762 	 */
763 	if (pvid)
764 		a5psw_reg_writel(a5psw, A5PSW_SYSTEM_TAGINFO(port), vid);
765 
766 	return 0;
767 }
768 
769 static int a5psw_port_vlan_del(struct dsa_switch *ds, int port,
770 			       const struct switchdev_obj_port_vlan *vlan)
771 {
772 	struct a5psw *a5psw = ds->priv;
773 	u16 vid = vlan->vid;
774 	int vlan_res_id;
775 
776 	vlan_res_id = a5psw_find_vlan_entry(a5psw, vid);
777 	if (vlan_res_id < 0)
778 		return -EINVAL;
779 
780 	a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, false);
781 	a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, false);
782 
783 	return 0;
784 }
785 
786 static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
787 {
788 	u32 reg_lo, reg_hi;
789 
790 	reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
791 	/* A5PSW_STATS_HIWORD is latched on stat read */
792 	reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
793 
794 	return ((u64)reg_hi << 32) | reg_lo;
795 }
796 
797 static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
798 			      uint8_t *data)
799 {
800 	unsigned int u;
801 
802 	if (stringset != ETH_SS_STATS)
803 		return;
804 
805 	for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
806 		ethtool_puts(&data, a5psw_stats[u].name);
807 }
808 
809 static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
810 				    uint64_t *data)
811 {
812 	struct a5psw *a5psw = ds->priv;
813 	unsigned int u;
814 
815 	for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
816 		data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
817 }
818 
819 static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
820 {
821 	if (sset != ETH_SS_STATS)
822 		return 0;
823 
824 	return ARRAY_SIZE(a5psw_stats);
825 }
826 
827 static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
828 				    struct ethtool_eth_mac_stats *mac_stats)
829 {
830 	struct a5psw *a5psw = ds->priv;
831 
832 #define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
833 	mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
834 	mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
835 	mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
836 	mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
837 	mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
838 	mac_stats->AlignmentErrors = RD(aAlignmentErrors);
839 	mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
840 	mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
841 	mac_stats->LateCollisions = RD(aLateCollisions);
842 	mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
843 	mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
844 	mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
845 	mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
846 	mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
847 	mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
848 	mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
849 	mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
850 	mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
851 	mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
852 #undef RD
853 }
854 
855 static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
856 	{ 0, 64 },
857 	{ 65, 127 },
858 	{ 128, 255 },
859 	{ 256, 511 },
860 	{ 512, 1023 },
861 	{ 1024, 1518 },
862 	{ 1519, A5PSW_MAX_MTU },
863 	{}
864 };
865 
866 static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
867 				 struct ethtool_rmon_stats *rmon_stats,
868 				 const struct ethtool_rmon_hist_range **ranges)
869 {
870 	struct a5psw *a5psw = ds->priv;
871 
872 #define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
873 	rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
874 	rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
875 	rmon_stats->fragments = RD(etherStatsFragments);
876 	rmon_stats->jabbers = RD(etherStatsJabbers);
877 	rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
878 	rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
879 	rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
880 	rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
881 	rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
882 	rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
883 	rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
884 #undef RD
885 
886 	*ranges = a5psw_rmon_ranges;
887 }
888 
889 static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
890 				     struct ethtool_eth_ctrl_stats *ctrl_stats)
891 {
892 	struct a5psw *a5psw = ds->priv;
893 	u64 stat;
894 
895 	stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
896 	ctrl_stats->MACControlFramesTransmitted = stat;
897 	stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
898 	ctrl_stats->MACControlFramesReceived = stat;
899 }
900 
901 static void a5psw_vlan_setup(struct a5psw *a5psw, int port)
902 {
903 	u32 reg;
904 
905 	/* Enable TAG always mode for the port, this is actually controlled
906 	 * by VLAN_IN_MODE_ENA field which will be used for PVID insertion
907 	 */
908 	reg = A5PSW_VLAN_IN_MODE_TAG_ALWAYS;
909 	reg <<= A5PSW_VLAN_IN_MODE_PORT_SHIFT(port);
910 	a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE, A5PSW_VLAN_IN_MODE_PORT(port),
911 		      reg);
912 
913 	/* Set transparent mode for output frame manipulation, this will depend
914 	 * on the VLAN_RES configuration mode
915 	 */
916 	reg = A5PSW_VLAN_OUT_MODE_TRANSPARENT;
917 	reg <<= A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port);
918 	a5psw_reg_rmw(a5psw, A5PSW_VLAN_OUT_MODE,
919 		      A5PSW_VLAN_OUT_MODE_PORT(port), reg);
920 }
921 
922 static int a5psw_setup(struct dsa_switch *ds)
923 {
924 	struct a5psw *a5psw = ds->priv;
925 	int port, vlan, ret;
926 	struct dsa_port *dp;
927 	u32 reg;
928 
929 	/* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
930 	dsa_switch_for_each_cpu_port(dp, ds) {
931 		if (dp->index != A5PSW_CPU_PORT) {
932 			dev_err(a5psw->dev, "Invalid CPU port\n");
933 			return -EINVAL;
934 		}
935 	}
936 
937 	/* Configure management port */
938 	reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
939 	a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
940 
941 	/* Set pattern 0 to forward all frame to mgmt port */
942 	a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
943 			 A5PSW_PATTERN_CTRL_MGMTFWD);
944 
945 	/* Enable port tagging */
946 	reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
947 	reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
948 	a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
949 
950 	/* Enable normal switch operation */
951 	reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
952 	      A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
953 	      A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
954 	a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
955 
956 	ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
957 				 !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
958 				 A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
959 	if (ret) {
960 		dev_err(a5psw->dev, "Failed to clear lookup table\n");
961 		return ret;
962 	}
963 
964 	/* Reset learn count to 0 */
965 	reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
966 	a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
967 
968 	/* Clear VLAN resource table */
969 	reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
970 	for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
971 		a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
972 
973 	/* Reset all ports */
974 	dsa_switch_for_each_port(dp, ds) {
975 		port = dp->index;
976 
977 		/* Reset the port */
978 		a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
979 				 A5PSW_CMD_CFG_SW_RESET);
980 
981 		/* Enable only CPU port */
982 		a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
983 
984 		if (dsa_port_is_unused(dp))
985 			continue;
986 
987 		/* Enable egress flooding and learning for CPU port */
988 		if (dsa_port_is_cpu(dp)) {
989 			a5psw_flooding_set_resolution(a5psw, port, true);
990 			a5psw_port_learning_set(a5psw, port, true);
991 		}
992 
993 		/* Enable standalone mode for user ports */
994 		if (dsa_port_is_user(dp))
995 			a5psw_port_set_standalone(a5psw, port, true);
996 
997 		a5psw_vlan_setup(a5psw, port);
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 static const struct phylink_mac_ops a5psw_phylink_mac_ops = {
1004 	.mac_select_pcs = a5psw_phylink_mac_select_pcs,
1005 	.mac_config = a5psw_phylink_mac_config,
1006 	.mac_link_down = a5psw_phylink_mac_link_down,
1007 	.mac_link_up = a5psw_phylink_mac_link_up,
1008 };
1009 
1010 static const struct dsa_switch_ops a5psw_switch_ops = {
1011 	.get_tag_protocol = a5psw_get_tag_protocol,
1012 	.setup = a5psw_setup,
1013 	.port_disable = a5psw_port_disable,
1014 	.port_enable = a5psw_port_enable,
1015 	.phylink_get_caps = a5psw_phylink_get_caps,
1016 	.port_change_mtu = a5psw_port_change_mtu,
1017 	.port_max_mtu = a5psw_port_max_mtu,
1018 	.get_sset_count = a5psw_get_sset_count,
1019 	.get_strings = a5psw_get_strings,
1020 	.get_ethtool_stats = a5psw_get_ethtool_stats,
1021 	.get_eth_mac_stats = a5psw_get_eth_mac_stats,
1022 	.get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
1023 	.get_rmon_stats = a5psw_get_rmon_stats,
1024 	.set_ageing_time = a5psw_set_ageing_time,
1025 	.port_bridge_join = a5psw_port_bridge_join,
1026 	.port_bridge_leave = a5psw_port_bridge_leave,
1027 	.port_pre_bridge_flags = a5psw_port_pre_bridge_flags,
1028 	.port_bridge_flags = a5psw_port_bridge_flags,
1029 	.port_stp_state_set = a5psw_port_stp_state_set,
1030 	.port_fast_age = a5psw_port_fast_age,
1031 	.port_vlan_filtering = a5psw_port_vlan_filtering,
1032 	.port_vlan_add = a5psw_port_vlan_add,
1033 	.port_vlan_del = a5psw_port_vlan_del,
1034 	.port_fdb_add = a5psw_port_fdb_add,
1035 	.port_fdb_del = a5psw_port_fdb_del,
1036 	.port_fdb_dump = a5psw_port_fdb_dump,
1037 };
1038 
1039 static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
1040 {
1041 	u32 status;
1042 	int err;
1043 
1044 	err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
1045 				 !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
1046 				 1000 * USEC_PER_MSEC);
1047 	if (err)
1048 		dev_err(a5psw->dev, "MDIO command timeout\n");
1049 
1050 	return err;
1051 }
1052 
1053 static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
1054 {
1055 	struct a5psw *a5psw = bus->priv;
1056 	u32 cmd, status;
1057 	int ret;
1058 
1059 	cmd = A5PSW_MDIO_COMMAND_READ;
1060 	cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
1061 	cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
1062 
1063 	a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
1064 
1065 	ret = a5psw_mdio_wait_busy(a5psw);
1066 	if (ret)
1067 		return ret;
1068 
1069 	ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
1070 
1071 	status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
1072 	if (status & A5PSW_MDIO_CFG_STATUS_READERR)
1073 		return -EIO;
1074 
1075 	return ret;
1076 }
1077 
1078 static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
1079 			    u16 phy_data)
1080 {
1081 	struct a5psw *a5psw = bus->priv;
1082 	u32 cmd;
1083 
1084 	cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
1085 	cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
1086 
1087 	a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
1088 	a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
1089 
1090 	return a5psw_mdio_wait_busy(a5psw);
1091 }
1092 
1093 static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
1094 {
1095 	unsigned long rate;
1096 	unsigned long div;
1097 	u32 cfgstatus;
1098 
1099 	rate = clk_get_rate(a5psw->hclk);
1100 	div = ((rate / mdio_freq) / 2);
1101 	if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
1102 	    div < A5PSW_MDIO_CLK_DIV_MIN) {
1103 		dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
1104 		return -ERANGE;
1105 	}
1106 
1107 	cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
1108 
1109 	a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
1110 
1111 	return 0;
1112 }
1113 
1114 static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
1115 {
1116 	struct device *dev = a5psw->dev;
1117 	struct mii_bus *bus;
1118 	u32 mdio_freq;
1119 	int ret;
1120 
1121 	if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
1122 		mdio_freq = A5PSW_MDIO_DEF_FREQ;
1123 
1124 	ret = a5psw_mdio_config(a5psw, mdio_freq);
1125 	if (ret)
1126 		return ret;
1127 
1128 	bus = devm_mdiobus_alloc(dev);
1129 	if (!bus)
1130 		return -ENOMEM;
1131 
1132 	bus->name = "a5psw_mdio";
1133 	bus->read = a5psw_mdio_read;
1134 	bus->write = a5psw_mdio_write;
1135 	bus->priv = a5psw;
1136 	bus->parent = dev;
1137 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1138 
1139 	a5psw->mii_bus = bus;
1140 
1141 	return devm_of_mdiobus_register(dev, bus, node);
1142 }
1143 
1144 static void a5psw_pcs_free(struct a5psw *a5psw)
1145 {
1146 	int i;
1147 
1148 	for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
1149 		if (a5psw->pcs[i])
1150 			miic_destroy(a5psw->pcs[i]);
1151 	}
1152 }
1153 
1154 static int a5psw_pcs_get(struct a5psw *a5psw)
1155 {
1156 	struct device_node *ports, *port, *pcs_node;
1157 	struct phylink_pcs *pcs;
1158 	int ret;
1159 	u32 reg;
1160 
1161 	ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
1162 	if (!ports)
1163 		return -EINVAL;
1164 
1165 	for_each_available_child_of_node(ports, port) {
1166 		pcs_node = of_parse_phandle(port, "pcs-handle", 0);
1167 		if (!pcs_node)
1168 			continue;
1169 
1170 		if (of_property_read_u32(port, "reg", &reg)) {
1171 			ret = -EINVAL;
1172 			goto free_pcs;
1173 		}
1174 
1175 		if (reg >= ARRAY_SIZE(a5psw->pcs)) {
1176 			ret = -ENODEV;
1177 			goto free_pcs;
1178 		}
1179 
1180 		pcs = miic_create(a5psw->dev, pcs_node);
1181 		if (IS_ERR(pcs)) {
1182 			dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
1183 				reg);
1184 			ret = PTR_ERR(pcs);
1185 			goto free_pcs;
1186 		}
1187 
1188 		a5psw->pcs[reg] = pcs;
1189 		of_node_put(pcs_node);
1190 	}
1191 	of_node_put(ports);
1192 
1193 	return 0;
1194 
1195 free_pcs:
1196 	of_node_put(pcs_node);
1197 	of_node_put(port);
1198 	of_node_put(ports);
1199 	a5psw_pcs_free(a5psw);
1200 
1201 	return ret;
1202 }
1203 
1204 static int a5psw_probe(struct platform_device *pdev)
1205 {
1206 	struct device *dev = &pdev->dev;
1207 	struct device_node *mdio;
1208 	struct dsa_switch *ds;
1209 	struct a5psw *a5psw;
1210 	int ret;
1211 
1212 	a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
1213 	if (!a5psw)
1214 		return -ENOMEM;
1215 
1216 	a5psw->dev = dev;
1217 	mutex_init(&a5psw->lk_lock);
1218 	spin_lock_init(&a5psw->reg_lock);
1219 	a5psw->base = devm_platform_ioremap_resource(pdev, 0);
1220 	if (IS_ERR(a5psw->base))
1221 		return PTR_ERR(a5psw->base);
1222 
1223 	a5psw->bridged_ports = BIT(A5PSW_CPU_PORT);
1224 
1225 	ret = a5psw_pcs_get(a5psw);
1226 	if (ret)
1227 		return ret;
1228 
1229 	a5psw->hclk = devm_clk_get(dev, "hclk");
1230 	if (IS_ERR(a5psw->hclk)) {
1231 		dev_err(dev, "failed get hclk clock\n");
1232 		ret = PTR_ERR(a5psw->hclk);
1233 		goto free_pcs;
1234 	}
1235 
1236 	a5psw->clk = devm_clk_get(dev, "clk");
1237 	if (IS_ERR(a5psw->clk)) {
1238 		dev_err(dev, "failed get clk_switch clock\n");
1239 		ret = PTR_ERR(a5psw->clk);
1240 		goto free_pcs;
1241 	}
1242 
1243 	ret = clk_prepare_enable(a5psw->clk);
1244 	if (ret)
1245 		goto free_pcs;
1246 
1247 	ret = clk_prepare_enable(a5psw->hclk);
1248 	if (ret)
1249 		goto clk_disable;
1250 
1251 	mdio = of_get_child_by_name(dev->of_node, "mdio");
1252 	if (of_device_is_available(mdio)) {
1253 		ret = a5psw_probe_mdio(a5psw, mdio);
1254 		if (ret) {
1255 			of_node_put(mdio);
1256 			dev_err(dev, "Failed to register MDIO: %d\n", ret);
1257 			goto hclk_disable;
1258 		}
1259 	}
1260 
1261 	of_node_put(mdio);
1262 
1263 	ds = &a5psw->ds;
1264 	ds->dev = dev;
1265 	ds->num_ports = A5PSW_PORTS_NUM;
1266 	ds->ops = &a5psw_switch_ops;
1267 	ds->phylink_mac_ops = &a5psw_phylink_mac_ops;
1268 	ds->priv = a5psw;
1269 
1270 	ret = dsa_register_switch(ds);
1271 	if (ret) {
1272 		dev_err(dev, "Failed to register DSA switch: %d\n", ret);
1273 		goto hclk_disable;
1274 	}
1275 
1276 	return 0;
1277 
1278 hclk_disable:
1279 	clk_disable_unprepare(a5psw->hclk);
1280 clk_disable:
1281 	clk_disable_unprepare(a5psw->clk);
1282 free_pcs:
1283 	a5psw_pcs_free(a5psw);
1284 
1285 	return ret;
1286 }
1287 
1288 static void a5psw_remove(struct platform_device *pdev)
1289 {
1290 	struct a5psw *a5psw = platform_get_drvdata(pdev);
1291 
1292 	if (!a5psw)
1293 		return;
1294 
1295 	dsa_unregister_switch(&a5psw->ds);
1296 	a5psw_pcs_free(a5psw);
1297 	clk_disable_unprepare(a5psw->hclk);
1298 	clk_disable_unprepare(a5psw->clk);
1299 }
1300 
1301 static void a5psw_shutdown(struct platform_device *pdev)
1302 {
1303 	struct a5psw *a5psw = platform_get_drvdata(pdev);
1304 
1305 	if (!a5psw)
1306 		return;
1307 
1308 	dsa_switch_shutdown(&a5psw->ds);
1309 
1310 	platform_set_drvdata(pdev, NULL);
1311 }
1312 
1313 static const struct of_device_id a5psw_of_mtable[] = {
1314 	{ .compatible = "renesas,rzn1-a5psw", },
1315 	{ /* sentinel */ },
1316 };
1317 MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
1318 
1319 static struct platform_driver a5psw_driver = {
1320 	.driver = {
1321 		.name	 = "rzn1_a5psw",
1322 		.of_match_table = a5psw_of_mtable,
1323 	},
1324 	.probe = a5psw_probe,
1325 	.remove = a5psw_remove,
1326 	.shutdown = a5psw_shutdown,
1327 };
1328 module_platform_driver(a5psw_driver);
1329 
1330 MODULE_LICENSE("GPL");
1331 MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
1332 MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
1333