xref: /linux/drivers/net/dsa/yt921x.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Motorcomm YT921x Switch
4  *
5  * Should work on YT9213/YT9214/YT9215/YT9218, but only tested on YT9215+SGMII,
6  * be sure to do your own checks before porting to another chip.
7  *
8  * Copyright (c) 2025 David Yang
9  */
10 
11 #include <linux/dcbnl.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_bridge.h>
14 #include <linux/if_hsr.h>
15 #include <linux/if_vlan.h>
16 #include <linux/iopoll.h>
17 #include <linux/mdio.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 #include <linux/sort.h>
23 
24 #include <net/dsa.h>
25 #include <net/dscp.h>
26 #include <net/ieee8021q.h>
27 
28 #include "yt921x.h"
29 
30 struct yt921x_mib_desc {
31 	unsigned int size;
32 	unsigned int offset;
33 	const char *name;
34 };
35 
36 #define MIB_DESC(_size, _offset, _name) \
37 	{_size, _offset, _name}
38 
39 /* Must agree with yt921x_mib
40  *
41  * Unstructured fields (name != NULL) will appear in get_ethtool_stats(),
42  * structured go to their *_stats() methods, but we need their sizes and offsets
43  * to perform 32bit MIB overflow wraparound.
44  */
45 static const struct yt921x_mib_desc yt921x_mib_descs[] = {
46 	MIB_DESC(1, YT921X_MIB_DATA_RX_BROADCAST, NULL),
47 	MIB_DESC(1, YT921X_MIB_DATA_RX_PAUSE, NULL),
48 	MIB_DESC(1, YT921X_MIB_DATA_RX_MULTICAST, NULL),
49 	MIB_DESC(1, YT921X_MIB_DATA_RX_CRC_ERR, NULL),
50 
51 	MIB_DESC(1, YT921X_MIB_DATA_RX_ALIGN_ERR, NULL),
52 	MIB_DESC(1, YT921X_MIB_DATA_RX_UNDERSIZE_ERR, NULL),
53 	MIB_DESC(1, YT921X_MIB_DATA_RX_FRAG_ERR, NULL),
54 	MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_64, NULL),
55 
56 	MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127, NULL),
57 	MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255, NULL),
58 	MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511, NULL),
59 	MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023, NULL),
60 
61 	MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518, NULL),
62 	MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX, NULL),
63 	MIB_DESC(2, YT921X_MIB_DATA_RX_GOOD_BYTES, NULL),
64 
65 	MIB_DESC(2, YT921X_MIB_DATA_RX_BAD_BYTES, "RxBadBytes"),
66 	MIB_DESC(1, YT921X_MIB_DATA_RX_OVERSIZE_ERR, NULL),
67 
68 	MIB_DESC(1, YT921X_MIB_DATA_RX_DROPPED, NULL),
69 	MIB_DESC(1, YT921X_MIB_DATA_TX_BROADCAST, NULL),
70 	MIB_DESC(1, YT921X_MIB_DATA_TX_PAUSE, NULL),
71 	MIB_DESC(1, YT921X_MIB_DATA_TX_MULTICAST, NULL),
72 
73 	MIB_DESC(1, YT921X_MIB_DATA_TX_UNDERSIZE_ERR, NULL),
74 	MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_64, NULL),
75 	MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127, NULL),
76 	MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255, NULL),
77 
78 	MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511, NULL),
79 	MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023, NULL),
80 	MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518, NULL),
81 	MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX, NULL),
82 
83 	MIB_DESC(2, YT921X_MIB_DATA_TX_GOOD_BYTES, NULL),
84 	MIB_DESC(1, YT921X_MIB_DATA_TX_COLLISION, NULL),
85 
86 	MIB_DESC(1, YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION, NULL),
87 	MIB_DESC(1, YT921X_MIB_DATA_TX_MULTIPLE_COLLISION, NULL),
88 	MIB_DESC(1, YT921X_MIB_DATA_TX_SINGLE_COLLISION, NULL),
89 	MIB_DESC(1, YT921X_MIB_DATA_TX_PKT, NULL),
90 
91 	MIB_DESC(1, YT921X_MIB_DATA_TX_DEFERRED, NULL),
92 	MIB_DESC(1, YT921X_MIB_DATA_TX_LATE_COLLISION, NULL),
93 	MIB_DESC(1, YT921X_MIB_DATA_RX_OAM, "RxOAM"),
94 	MIB_DESC(1, YT921X_MIB_DATA_TX_OAM, "TxOAM"),
95 };
96 
97 struct yt921x_info {
98 	const char *name;
99 	u16 major;
100 	/* Unknown, seems to be plain enumeration */
101 	u8 mode;
102 	u8 extmode;
103 	/* Ports with integral GbE PHYs, not including MCU Port 10 */
104 	u16 internal_mask;
105 	/* TODO: see comments in yt921x_dsa_phylink_get_caps() */
106 	u16 external_mask;
107 };
108 
109 #define YT921X_PORT_MASK_INTn(port)	BIT(port)
110 #define YT921X_PORT_MASK_INT0_n(n)	GENMASK((n) - 1, 0)
111 #define YT921X_PORT_MASK_EXT0		BIT(8)
112 #define YT921X_PORT_MASK_EXT1		BIT(9)
113 
114 static const struct yt921x_info yt921x_infos[] = {
115 	{
116 		"YT9215SC", YT9215_MAJOR, 1, 0,
117 		YT921X_PORT_MASK_INT0_n(5),
118 		YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
119 	},
120 	{
121 		"YT9215S", YT9215_MAJOR, 2, 0,
122 		YT921X_PORT_MASK_INT0_n(5),
123 		YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
124 	},
125 	{
126 		"YT9215RB", YT9215_MAJOR, 3, 0,
127 		YT921X_PORT_MASK_INT0_n(5),
128 		YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
129 	},
130 	{
131 		"YT9214NB", YT9215_MAJOR, 3, 2,
132 		YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3),
133 		YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
134 	},
135 	{
136 		"YT9213NB", YT9215_MAJOR, 3, 3,
137 		YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3),
138 		YT921X_PORT_MASK_EXT1,
139 	},
140 	{
141 		"YT9218N", YT9218_MAJOR, 0, 0,
142 		YT921X_PORT_MASK_INT0_n(8),
143 		0,
144 	},
145 	{
146 		"YT9218MB", YT9218_MAJOR, 1, 0,
147 		YT921X_PORT_MASK_INT0_n(8),
148 		YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
149 	},
150 	{}
151 };
152 
153 #define YT921X_NAME	"yt921x"
154 
155 #define YT921X_VID_UNWARE	4095
156 
157 #define YT921X_POLL_SLEEP_US	10000
158 #define YT921X_POLL_TIMEOUT_US	100000
159 
160 /* The interval should be small enough to avoid overflow of 32bit MIBs.
161  *
162  * Until we can read MIBs from stats64 call directly (i.e. sleep
163  * there), we have to poll stats more frequently then it is actually needed.
164  * For overflow protection, normally, 100 sec interval should have been OK.
165  */
166 #define YT921X_STATS_INTERVAL_JIFFIES	(3 * HZ)
167 
168 struct yt921x_reg_mdio {
169 	struct mii_bus *bus;
170 	int addr;
171 	/* SWITCH_ID_1 / SWITCH_ID_0 of the device
172 	 *
173 	 * This is a way to multiplex multiple devices on the same MII phyaddr
174 	 * and should be configurable in DT. However, MDIO core simply doesn't
175 	 * allow multiple devices over one reg addr, so this is a fixed value
176 	 * for now until a solution is found.
177 	 *
178 	 * Keep this because we need switchid to form MII regaddrs anyway.
179 	 */
180 	unsigned char switchid;
181 };
182 
183 /* TODO: SPI/I2C */
184 
185 #define to_yt921x_priv(_ds) container_of_const(_ds, struct yt921x_priv, ds)
186 #define to_device(priv) ((priv)->ds.dev)
187 
188 static int yt921x_reg_read(struct yt921x_priv *priv, u32 reg, u32 *valp)
189 {
190 	WARN_ON(!mutex_is_locked(&priv->reg_lock));
191 
192 	return priv->reg_ops->read(priv->reg_ctx, reg, valp);
193 }
194 
195 static int yt921x_reg_write(struct yt921x_priv *priv, u32 reg, u32 val)
196 {
197 	WARN_ON(!mutex_is_locked(&priv->reg_lock));
198 
199 	return priv->reg_ops->write(priv->reg_ctx, reg, val);
200 }
201 
202 static int
203 yt921x_reg_wait(struct yt921x_priv *priv, u32 reg, u32 mask, u32 *valp)
204 {
205 	u32 val;
206 	int res;
207 	int ret;
208 
209 	ret = read_poll_timeout(yt921x_reg_read, res,
210 				res || (val & mask) == *valp,
211 				YT921X_POLL_SLEEP_US, YT921X_POLL_TIMEOUT_US,
212 				false, priv, reg, &val);
213 	if (ret)
214 		return ret;
215 	if (res)
216 		return res;
217 
218 	*valp = val;
219 	return 0;
220 }
221 
222 static int
223 yt921x_reg_update_bits(struct yt921x_priv *priv, u32 reg, u32 mask, u32 val)
224 {
225 	int res;
226 	u32 v;
227 	u32 u;
228 
229 	res = yt921x_reg_read(priv, reg, &v);
230 	if (res)
231 		return res;
232 
233 	u = v;
234 	u &= ~mask;
235 	u |= val;
236 	if (u == v)
237 		return 0;
238 
239 	return yt921x_reg_write(priv, reg, u);
240 }
241 
242 static int yt921x_reg_set_bits(struct yt921x_priv *priv, u32 reg, u32 mask)
243 {
244 	return yt921x_reg_update_bits(priv, reg, 0, mask);
245 }
246 
247 static int yt921x_reg_clear_bits(struct yt921x_priv *priv, u32 reg, u32 mask)
248 {
249 	return yt921x_reg_update_bits(priv, reg, mask, 0);
250 }
251 
252 static int
253 yt921x_reg_toggle_bits(struct yt921x_priv *priv, u32 reg, u32 mask, bool set)
254 {
255 	return yt921x_reg_update_bits(priv, reg, mask, !set ? 0 : mask);
256 }
257 
258 /* Some registers, like VLANn_CTRL, should always be written in 64-bit, even if
259  * you are to write only the lower / upper 32 bits.
260  *
261  * There is no such restriction for reading, but we still provide 64-bit read
262  * wrappers so that we always handle u64 values.
263  */
264 
265 static int yt921x_reg64_read(struct yt921x_priv *priv, u32 reg, u64 *valp)
266 {
267 	u32 lo;
268 	u32 hi;
269 	int res;
270 
271 	res = yt921x_reg_read(priv, reg, &lo);
272 	if (res)
273 		return res;
274 	res = yt921x_reg_read(priv, reg + 4, &hi);
275 	if (res)
276 		return res;
277 
278 	*valp = ((u64)hi << 32) | lo;
279 	return 0;
280 }
281 
282 static int yt921x_reg64_write(struct yt921x_priv *priv, u32 reg, u64 val)
283 {
284 	int res;
285 
286 	res = yt921x_reg_write(priv, reg, (u32)val);
287 	if (res)
288 		return res;
289 	return yt921x_reg_write(priv, reg + 4, (u32)(val >> 32));
290 }
291 
292 static int
293 yt921x_reg64_update_bits(struct yt921x_priv *priv, u32 reg, u64 mask, u64 val)
294 {
295 	int res;
296 	u64 v;
297 	u64 u;
298 
299 	res = yt921x_reg64_read(priv, reg, &v);
300 	if (res)
301 		return res;
302 
303 	u = v;
304 	u &= ~mask;
305 	u |= val;
306 	if (u == v)
307 		return 0;
308 
309 	return yt921x_reg64_write(priv, reg, u);
310 }
311 
312 static int yt921x_reg64_clear_bits(struct yt921x_priv *priv, u32 reg, u64 mask)
313 {
314 	return yt921x_reg64_update_bits(priv, reg, mask, 0);
315 }
316 
317 static int yt921x_reg_mdio_read(void *context, u32 reg, u32 *valp)
318 {
319 	struct yt921x_reg_mdio *mdio = context;
320 	struct mii_bus *bus = mdio->bus;
321 	int addr = mdio->addr;
322 	u32 reg_addr;
323 	u32 reg_data;
324 	u32 val;
325 	int res;
326 
327 	/* Hold the mdio bus lock to avoid (un)locking for 4 times */
328 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
329 
330 	reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR |
331 		   YT921X_SMI_READ;
332 	res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16));
333 	if (res)
334 		goto end;
335 	res = __mdiobus_write(bus, addr, reg_addr, (u16)reg);
336 	if (res)
337 		goto end;
338 
339 	reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA |
340 		   YT921X_SMI_READ;
341 	res = __mdiobus_read(bus, addr, reg_data);
342 	if (res < 0)
343 		goto end;
344 	val = (u16)res;
345 	res = __mdiobus_read(bus, addr, reg_data);
346 	if (res < 0)
347 		goto end;
348 	val = (val << 16) | (u16)res;
349 
350 	*valp = val;
351 	res = 0;
352 
353 end:
354 	mutex_unlock(&bus->mdio_lock);
355 	return res;
356 }
357 
358 static int yt921x_reg_mdio_write(void *context, u32 reg, u32 val)
359 {
360 	struct yt921x_reg_mdio *mdio = context;
361 	struct mii_bus *bus = mdio->bus;
362 	int addr = mdio->addr;
363 	u32 reg_addr;
364 	u32 reg_data;
365 	int res;
366 
367 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
368 
369 	reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR |
370 		   YT921X_SMI_WRITE;
371 	res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16));
372 	if (res)
373 		goto end;
374 	res = __mdiobus_write(bus, addr, reg_addr, (u16)reg);
375 	if (res)
376 		goto end;
377 
378 	reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA |
379 		   YT921X_SMI_WRITE;
380 	res = __mdiobus_write(bus, addr, reg_data, (u16)(val >> 16));
381 	if (res)
382 		goto end;
383 	res = __mdiobus_write(bus, addr, reg_data, (u16)val);
384 	if (res)
385 		goto end;
386 
387 	res = 0;
388 
389 end:
390 	mutex_unlock(&bus->mdio_lock);
391 	return res;
392 }
393 
394 static const struct yt921x_reg_ops yt921x_reg_ops_mdio = {
395 	.read = yt921x_reg_mdio_read,
396 	.write = yt921x_reg_mdio_write,
397 };
398 
399 /* TODO: SPI/I2C */
400 
401 static int yt921x_intif_wait(struct yt921x_priv *priv)
402 {
403 	u32 val = 0;
404 
405 	return yt921x_reg_wait(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START,
406 			       &val);
407 }
408 
409 static int
410 yt921x_intif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp)
411 {
412 	struct device *dev = to_device(priv);
413 	u32 mask;
414 	u32 ctrl;
415 	u32 val;
416 	int res;
417 
418 	res = yt921x_intif_wait(priv);
419 	if (res)
420 		return res;
421 
422 	mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
423 	       YT921X_MBUS_CTRL_OP_M;
424 	ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
425 	       YT921X_MBUS_CTRL_READ;
426 	res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl);
427 	if (res)
428 		return res;
429 	res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START);
430 	if (res)
431 		return res;
432 
433 	res = yt921x_intif_wait(priv);
434 	if (res)
435 		return res;
436 	res = yt921x_reg_read(priv, YT921X_INT_MBUS_DIN, &val);
437 	if (res)
438 		return res;
439 
440 	if ((u16)val != val)
441 		dev_info(dev,
442 			 "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n",
443 			 __func__, port, reg, val);
444 	*valp = (u16)val;
445 	return 0;
446 }
447 
448 static int
449 yt921x_intif_write(struct yt921x_priv *priv, int port, int reg, u16 val)
450 {
451 	u32 mask;
452 	u32 ctrl;
453 	int res;
454 
455 	res = yt921x_intif_wait(priv);
456 	if (res)
457 		return res;
458 
459 	mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
460 	       YT921X_MBUS_CTRL_OP_M;
461 	ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
462 	       YT921X_MBUS_CTRL_WRITE;
463 	res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl);
464 	if (res)
465 		return res;
466 	res = yt921x_reg_write(priv, YT921X_INT_MBUS_DOUT, val);
467 	if (res)
468 		return res;
469 	res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START);
470 	if (res)
471 		return res;
472 
473 	return yt921x_intif_wait(priv);
474 }
475 
476 static int yt921x_mbus_int_read(struct mii_bus *mbus, int port, int reg)
477 {
478 	struct yt921x_priv *priv = mbus->priv;
479 	u16 val;
480 	int res;
481 
482 	if (port >= YT921X_PORT_NUM)
483 		return U16_MAX;
484 
485 	mutex_lock(&priv->reg_lock);
486 	res = yt921x_intif_read(priv, port, reg, &val);
487 	mutex_unlock(&priv->reg_lock);
488 
489 	if (res)
490 		return res;
491 	return val;
492 }
493 
494 static int
495 yt921x_mbus_int_write(struct mii_bus *mbus, int port, int reg, u16 data)
496 {
497 	struct yt921x_priv *priv = mbus->priv;
498 	int res;
499 
500 	if (port >= YT921X_PORT_NUM)
501 		return -ENODEV;
502 
503 	mutex_lock(&priv->reg_lock);
504 	res = yt921x_intif_write(priv, port, reg, data);
505 	mutex_unlock(&priv->reg_lock);
506 
507 	return res;
508 }
509 
510 static int
511 yt921x_mbus_int_init(struct yt921x_priv *priv, struct device_node *mnp)
512 {
513 	struct device *dev = to_device(priv);
514 	struct mii_bus *mbus;
515 	int res;
516 
517 	mbus = devm_mdiobus_alloc(dev);
518 	if (!mbus)
519 		return -ENOMEM;
520 
521 	mbus->name = "YT921x internal MDIO bus";
522 	snprintf(mbus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
523 	mbus->priv = priv;
524 	mbus->read = yt921x_mbus_int_read;
525 	mbus->write = yt921x_mbus_int_write;
526 	mbus->parent = dev;
527 	mbus->phy_mask = (u32)~GENMASK(YT921X_PORT_NUM - 1, 0);
528 
529 	res = devm_of_mdiobus_register(dev, mbus, mnp);
530 	if (res)
531 		return res;
532 
533 	priv->mbus_int = mbus;
534 
535 	return 0;
536 }
537 
538 static int yt921x_extif_wait(struct yt921x_priv *priv)
539 {
540 	u32 val = 0;
541 
542 	return yt921x_reg_wait(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START,
543 			       &val);
544 }
545 
546 static int
547 yt921x_extif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp)
548 {
549 	struct device *dev = to_device(priv);
550 	u32 mask;
551 	u32 ctrl;
552 	u32 val;
553 	int res;
554 
555 	res = yt921x_extif_wait(priv);
556 	if (res)
557 		return res;
558 
559 	mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
560 	       YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M;
561 	ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
562 	       YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_READ;
563 	res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl);
564 	if (res)
565 		return res;
566 	res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START);
567 	if (res)
568 		return res;
569 
570 	res = yt921x_extif_wait(priv);
571 	if (res)
572 		return res;
573 	res = yt921x_reg_read(priv, YT921X_EXT_MBUS_DIN, &val);
574 	if (res)
575 		return res;
576 
577 	if ((u16)val != val)
578 		dev_info(dev,
579 			 "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n",
580 			 __func__, port, reg, val);
581 	*valp = (u16)val;
582 	return 0;
583 }
584 
585 static int
586 yt921x_extif_write(struct yt921x_priv *priv, int port, int reg, u16 val)
587 {
588 	u32 mask;
589 	u32 ctrl;
590 	int res;
591 
592 	res = yt921x_extif_wait(priv);
593 	if (res)
594 		return res;
595 
596 	mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
597 	       YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M;
598 	ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
599 	       YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_WRITE;
600 	res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl);
601 	if (res)
602 		return res;
603 	res = yt921x_reg_write(priv, YT921X_EXT_MBUS_DOUT, val);
604 	if (res)
605 		return res;
606 	res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START);
607 	if (res)
608 		return res;
609 
610 	return yt921x_extif_wait(priv);
611 }
612 
613 static int yt921x_mbus_ext_read(struct mii_bus *mbus, int port, int reg)
614 {
615 	struct yt921x_priv *priv = mbus->priv;
616 	u16 val;
617 	int res;
618 
619 	mutex_lock(&priv->reg_lock);
620 	res = yt921x_extif_read(priv, port, reg, &val);
621 	mutex_unlock(&priv->reg_lock);
622 
623 	if (res)
624 		return res;
625 	return val;
626 }
627 
628 static int
629 yt921x_mbus_ext_write(struct mii_bus *mbus, int port, int reg, u16 data)
630 {
631 	struct yt921x_priv *priv = mbus->priv;
632 	int res;
633 
634 	mutex_lock(&priv->reg_lock);
635 	res = yt921x_extif_write(priv, port, reg, data);
636 	mutex_unlock(&priv->reg_lock);
637 
638 	return res;
639 }
640 
641 static int
642 yt921x_mbus_ext_init(struct yt921x_priv *priv, struct device_node *mnp)
643 {
644 	struct device *dev = to_device(priv);
645 	struct mii_bus *mbus;
646 	int res;
647 
648 	mbus = devm_mdiobus_alloc(dev);
649 	if (!mbus)
650 		return -ENOMEM;
651 
652 	mbus->name = "YT921x external MDIO bus";
653 	snprintf(mbus->id, MII_BUS_ID_SIZE, "%s@ext", dev_name(dev));
654 	mbus->priv = priv;
655 	/* TODO: c45? */
656 	mbus->read = yt921x_mbus_ext_read;
657 	mbus->write = yt921x_mbus_ext_write;
658 	mbus->parent = dev;
659 
660 	res = devm_of_mdiobus_register(dev, mbus, mnp);
661 	if (res)
662 		return res;
663 
664 	priv->mbus_ext = mbus;
665 
666 	return 0;
667 }
668 
669 /* Read and handle overflow of 32bit MIBs. MIB buffer must be zeroed before. */
670 static int yt921x_read_mib(struct yt921x_priv *priv, int port)
671 {
672 	struct yt921x_port *pp = &priv->ports[port];
673 	struct device *dev = to_device(priv);
674 	struct yt921x_mib *mib = &pp->mib;
675 	int res = 0;
676 
677 	/* Reading of yt921x_port::mib is not protected by a lock and it's vain
678 	 * to keep its consistency, since we have to read registers one by one
679 	 * and there is no way to make a snapshot of MIB stats.
680 	 *
681 	 * Writing (by this function only) is and should be protected by
682 	 * reg_lock.
683 	 */
684 
685 	for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
686 		const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
687 		u32 reg = YT921X_MIBn_DATA0(port) + desc->offset;
688 		u64 *valp = &((u64 *)mib)[i];
689 		u32 val0;
690 		u64 val;
691 
692 		res = yt921x_reg_read(priv, reg, &val0);
693 		if (res)
694 			break;
695 
696 		if (desc->size <= 1) {
697 			u64 old_val = *valp;
698 
699 			val = (old_val & ~(u64)U32_MAX) | val0;
700 			if (val < old_val)
701 				val += 1ull << 32;
702 		} else {
703 			u32 val1;
704 
705 			res = yt921x_reg_read(priv, reg + 4, &val1);
706 			if (res)
707 				break;
708 			val = ((u64)val1 << 32) | val0;
709 		}
710 
711 		WRITE_ONCE(*valp, val);
712 	}
713 
714 	pp->rx_frames = mib->rx_64byte + mib->rx_65_127byte +
715 			mib->rx_128_255byte + mib->rx_256_511byte +
716 			mib->rx_512_1023byte + mib->rx_1024_1518byte +
717 			mib->rx_jumbo;
718 	pp->tx_frames = mib->tx_64byte + mib->tx_65_127byte +
719 			mib->tx_128_255byte + mib->tx_256_511byte +
720 			mib->tx_512_1023byte + mib->tx_1024_1518byte +
721 			mib->tx_jumbo;
722 
723 	if (res)
724 		dev_err(dev, "Failed to %s port %d: %i\n", "read stats for",
725 			port, res);
726 	return res;
727 }
728 
729 static void yt921x_poll_mib(struct work_struct *work)
730 {
731 	struct yt921x_port *pp = container_of_const(work, struct yt921x_port,
732 						    mib_read.work);
733 	struct yt921x_priv *priv = (void *)(pp - pp->index) -
734 				   offsetof(struct yt921x_priv, ports);
735 	unsigned long delay = YT921X_STATS_INTERVAL_JIFFIES;
736 	int port = pp->index;
737 	int res;
738 
739 	mutex_lock(&priv->reg_lock);
740 	res = yt921x_read_mib(priv, port);
741 	mutex_unlock(&priv->reg_lock);
742 	if (res)
743 		delay *= 4;
744 
745 	schedule_delayed_work(&pp->mib_read, delay);
746 }
747 
748 static void
749 yt921x_dsa_get_strings(struct dsa_switch *ds, int port, u32 stringset,
750 		       uint8_t *data)
751 {
752 	if (stringset != ETH_SS_STATS)
753 		return;
754 
755 	for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
756 		const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
757 
758 		if (desc->name)
759 			ethtool_puts(&data, desc->name);
760 	}
761 }
762 
763 static void
764 yt921x_dsa_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
765 {
766 	struct yt921x_priv *priv = to_yt921x_priv(ds);
767 	struct yt921x_port *pp = &priv->ports[port];
768 	struct yt921x_mib *mib = &pp->mib;
769 	size_t j;
770 
771 	mutex_lock(&priv->reg_lock);
772 	yt921x_read_mib(priv, port);
773 	mutex_unlock(&priv->reg_lock);
774 
775 	j = 0;
776 	for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
777 		const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
778 
779 		if (!desc->name)
780 			continue;
781 
782 		data[j] = ((u64 *)mib)[i];
783 		j++;
784 	}
785 }
786 
787 static int yt921x_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset)
788 {
789 	int cnt = 0;
790 
791 	if (sset != ETH_SS_STATS)
792 		return 0;
793 
794 	for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
795 		const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
796 
797 		if (desc->name)
798 			cnt++;
799 	}
800 
801 	return cnt;
802 }
803 
804 static void
805 yt921x_dsa_get_eth_mac_stats(struct dsa_switch *ds, int port,
806 			     struct ethtool_eth_mac_stats *mac_stats)
807 {
808 	struct yt921x_priv *priv = to_yt921x_priv(ds);
809 	struct yt921x_port *pp = &priv->ports[port];
810 	struct yt921x_mib *mib = &pp->mib;
811 
812 	mutex_lock(&priv->reg_lock);
813 	yt921x_read_mib(priv, port);
814 	mutex_unlock(&priv->reg_lock);
815 
816 	mac_stats->FramesTransmittedOK = pp->tx_frames;
817 	mac_stats->SingleCollisionFrames = mib->tx_single_collisions;
818 	mac_stats->MultipleCollisionFrames = mib->tx_multiple_collisions;
819 	mac_stats->FramesReceivedOK = pp->rx_frames;
820 	mac_stats->FrameCheckSequenceErrors = mib->rx_crc_errors;
821 	mac_stats->AlignmentErrors = mib->rx_alignment_errors;
822 	mac_stats->OctetsTransmittedOK = mib->tx_good_bytes;
823 	mac_stats->FramesWithDeferredXmissions = mib->tx_deferred;
824 	mac_stats->LateCollisions = mib->tx_late_collisions;
825 	mac_stats->FramesAbortedDueToXSColls = mib->tx_aborted_errors;
826 	/* mac_stats->FramesLostDueToIntMACXmitError */
827 	/* mac_stats->CarrierSenseErrors */
828 	mac_stats->OctetsReceivedOK = mib->rx_good_bytes;
829 	/* mac_stats->FramesLostDueToIntMACRcvError */
830 	mac_stats->MulticastFramesXmittedOK = mib->tx_multicast;
831 	mac_stats->BroadcastFramesXmittedOK = mib->tx_broadcast;
832 	/* mac_stats->FramesWithExcessiveDeferral */
833 	mac_stats->MulticastFramesReceivedOK = mib->rx_multicast;
834 	mac_stats->BroadcastFramesReceivedOK = mib->rx_broadcast;
835 	/* mac_stats->InRangeLengthErrors */
836 	/* mac_stats->OutOfRangeLengthField */
837 	mac_stats->FrameTooLongErrors = mib->rx_oversize_errors;
838 }
839 
840 static void
841 yt921x_dsa_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
842 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
843 {
844 	struct yt921x_priv *priv = to_yt921x_priv(ds);
845 	struct yt921x_port *pp = &priv->ports[port];
846 	struct yt921x_mib *mib = &pp->mib;
847 
848 	mutex_lock(&priv->reg_lock);
849 	yt921x_read_mib(priv, port);
850 	mutex_unlock(&priv->reg_lock);
851 
852 	ctrl_stats->MACControlFramesTransmitted = mib->tx_pause;
853 	ctrl_stats->MACControlFramesReceived = mib->rx_pause;
854 	/* ctrl_stats->UnsupportedOpcodesReceived */
855 }
856 
857 static const struct ethtool_rmon_hist_range yt921x_rmon_ranges[] = {
858 	{ 0, 64 },
859 	{ 65, 127 },
860 	{ 128, 255 },
861 	{ 256, 511 },
862 	{ 512, 1023 },
863 	{ 1024, 1518 },
864 	{ 1519, YT921X_FRAME_SIZE_MAX },
865 	{}
866 };
867 
868 static void
869 yt921x_dsa_get_rmon_stats(struct dsa_switch *ds, int port,
870 			  struct ethtool_rmon_stats *rmon_stats,
871 			  const struct ethtool_rmon_hist_range **ranges)
872 {
873 	struct yt921x_priv *priv = to_yt921x_priv(ds);
874 	struct yt921x_port *pp = &priv->ports[port];
875 	struct yt921x_mib *mib = &pp->mib;
876 
877 	mutex_lock(&priv->reg_lock);
878 	yt921x_read_mib(priv, port);
879 	mutex_unlock(&priv->reg_lock);
880 
881 	*ranges = yt921x_rmon_ranges;
882 
883 	rmon_stats->undersize_pkts = mib->rx_undersize_errors;
884 	rmon_stats->oversize_pkts = mib->rx_oversize_errors;
885 	rmon_stats->fragments = mib->rx_alignment_errors;
886 	/* rmon_stats->jabbers */
887 
888 	rmon_stats->hist[0] = mib->rx_64byte;
889 	rmon_stats->hist[1] = mib->rx_65_127byte;
890 	rmon_stats->hist[2] = mib->rx_128_255byte;
891 	rmon_stats->hist[3] = mib->rx_256_511byte;
892 	rmon_stats->hist[4] = mib->rx_512_1023byte;
893 	rmon_stats->hist[5] = mib->rx_1024_1518byte;
894 	rmon_stats->hist[6] = mib->rx_jumbo;
895 
896 	rmon_stats->hist_tx[0] = mib->tx_64byte;
897 	rmon_stats->hist_tx[1] = mib->tx_65_127byte;
898 	rmon_stats->hist_tx[2] = mib->tx_128_255byte;
899 	rmon_stats->hist_tx[3] = mib->tx_256_511byte;
900 	rmon_stats->hist_tx[4] = mib->tx_512_1023byte;
901 	rmon_stats->hist_tx[5] = mib->tx_1024_1518byte;
902 	rmon_stats->hist_tx[6] = mib->tx_jumbo;
903 }
904 
905 static void
906 yt921x_dsa_get_stats64(struct dsa_switch *ds, int port,
907 		       struct rtnl_link_stats64 *stats)
908 {
909 	struct yt921x_priv *priv = to_yt921x_priv(ds);
910 	struct yt921x_port *pp = &priv->ports[port];
911 	struct yt921x_mib *mib = &pp->mib;
912 
913 	stats->rx_length_errors = mib->rx_undersize_errors +
914 				  mib->rx_fragment_errors;
915 	stats->rx_over_errors = mib->rx_oversize_errors;
916 	stats->rx_crc_errors = mib->rx_crc_errors;
917 	stats->rx_frame_errors = mib->rx_alignment_errors;
918 	/* stats->rx_fifo_errors */
919 	/* stats->rx_missed_errors */
920 
921 	stats->tx_aborted_errors = mib->tx_aborted_errors;
922 	/* stats->tx_carrier_errors */
923 	stats->tx_fifo_errors = mib->tx_undersize_errors;
924 	/* stats->tx_heartbeat_errors */
925 	stats->tx_window_errors = mib->tx_late_collisions;
926 
927 	stats->rx_packets = pp->rx_frames;
928 	stats->tx_packets = pp->tx_frames;
929 	stats->rx_bytes = mib->rx_good_bytes - ETH_FCS_LEN * stats->rx_packets;
930 	stats->tx_bytes = mib->tx_good_bytes - ETH_FCS_LEN * stats->tx_packets;
931 	stats->rx_errors = stats->rx_length_errors + stats->rx_over_errors +
932 			   stats->rx_crc_errors + stats->rx_frame_errors;
933 	stats->tx_errors = stats->tx_aborted_errors + stats->tx_fifo_errors +
934 			   stats->tx_window_errors;
935 	stats->rx_dropped = mib->rx_dropped;
936 	/* stats->tx_dropped */
937 	stats->multicast = mib->rx_multicast;
938 	stats->collisions = mib->tx_collisions;
939 }
940 
941 static void
942 yt921x_dsa_get_pause_stats(struct dsa_switch *ds, int port,
943 			   struct ethtool_pause_stats *pause_stats)
944 {
945 	struct yt921x_priv *priv = to_yt921x_priv(ds);
946 	struct yt921x_port *pp = &priv->ports[port];
947 	struct yt921x_mib *mib = &pp->mib;
948 
949 	mutex_lock(&priv->reg_lock);
950 	yt921x_read_mib(priv, port);
951 	mutex_unlock(&priv->reg_lock);
952 
953 	pause_stats->tx_pause_frames = mib->tx_pause;
954 	pause_stats->rx_pause_frames = mib->rx_pause;
955 }
956 
957 static int
958 yt921x_set_eee(struct yt921x_priv *priv, int port, struct ethtool_keee *e)
959 {
960 	/* Poor datasheet for EEE operations; don't ask if you are confused */
961 
962 	bool enable = e->eee_enabled;
963 	u16 new_mask;
964 	int res;
965 
966 	/* Enable / disable global EEE */
967 	new_mask = priv->eee_ports_mask;
968 	new_mask &= ~BIT(port);
969 	new_mask |= !enable ? 0 : BIT(port);
970 
971 	if (!!new_mask != !!priv->eee_ports_mask) {
972 		res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_FUNC,
973 					     YT921X_PON_STRAP_EEE, !!new_mask);
974 		if (res)
975 			return res;
976 		res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_VAL,
977 					     YT921X_PON_STRAP_EEE, !!new_mask);
978 		if (res)
979 			return res;
980 	}
981 
982 	priv->eee_ports_mask = new_mask;
983 
984 	/* Enable / disable port EEE */
985 	res = yt921x_reg_toggle_bits(priv, YT921X_EEE_CTRL,
986 				     YT921X_EEE_CTRL_ENn(port), enable);
987 	if (res)
988 		return res;
989 	res = yt921x_reg_toggle_bits(priv, YT921X_EEEn_VAL(port),
990 				     YT921X_EEE_VAL_DATA, enable);
991 	if (res)
992 		return res;
993 
994 	return 0;
995 }
996 
997 static int
998 yt921x_dsa_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
999 {
1000 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1001 	int res;
1002 
1003 	mutex_lock(&priv->reg_lock);
1004 	res = yt921x_set_eee(priv, port, e);
1005 	mutex_unlock(&priv->reg_lock);
1006 
1007 	return res;
1008 }
1009 
1010 static int
1011 yt921x_dsa_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1012 {
1013 	/* Only serves as packet filter, since the frame size is always set to
1014 	 * maximum after reset
1015 	 */
1016 
1017 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1018 	struct dsa_port *dp = dsa_to_port(ds, port);
1019 	int frame_size;
1020 	int res;
1021 
1022 	frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1023 	if (dsa_port_is_cpu(dp))
1024 		frame_size += YT921X_TAG_LEN;
1025 
1026 	mutex_lock(&priv->reg_lock);
1027 	res = yt921x_reg_update_bits(priv, YT921X_MACn_FRAME(port),
1028 				     YT921X_MAC_FRAME_SIZE_M,
1029 				     YT921X_MAC_FRAME_SIZE(frame_size));
1030 	mutex_unlock(&priv->reg_lock);
1031 
1032 	return res;
1033 }
1034 
1035 static int yt921x_dsa_port_max_mtu(struct dsa_switch *ds, int port)
1036 {
1037 	/* Only called for user ports, exclude tag len here */
1038 	return YT921X_FRAME_SIZE_MAX - ETH_HLEN - ETH_FCS_LEN - YT921X_TAG_LEN;
1039 }
1040 
1041 static int
1042 yt921x_mirror_del(struct yt921x_priv *priv, int port, bool ingress)
1043 {
1044 	u32 mask;
1045 
1046 	if (ingress)
1047 		mask = YT921X_MIRROR_IGR_PORTn(port);
1048 	else
1049 		mask = YT921X_MIRROR_EGR_PORTn(port);
1050 	return yt921x_reg_clear_bits(priv, YT921X_MIRROR, mask);
1051 }
1052 
1053 static int
1054 yt921x_mirror_add(struct yt921x_priv *priv, int port, bool ingress,
1055 		  int to_local_port, struct netlink_ext_ack *extack)
1056 {
1057 	u32 srcs;
1058 	u32 ctrl;
1059 	u32 val;
1060 	u32 dst;
1061 	int res;
1062 
1063 	if (ingress)
1064 		srcs = YT921X_MIRROR_IGR_PORTn(port);
1065 	else
1066 		srcs = YT921X_MIRROR_EGR_PORTn(port);
1067 	dst = YT921X_MIRROR_PORT(to_local_port);
1068 
1069 	res = yt921x_reg_read(priv, YT921X_MIRROR, &val);
1070 	if (res)
1071 		return res;
1072 
1073 	/* other mirror tasks & different dst port -> conflict */
1074 	if ((val & ~srcs & (YT921X_MIRROR_EGR_PORTS_M |
1075 			    YT921X_MIRROR_IGR_PORTS_M)) &&
1076 	    (val & YT921X_MIRROR_PORT_M) != dst) {
1077 		NL_SET_ERR_MSG_MOD(extack,
1078 				   "Sniffer port is already configured, delete existing rules & retry");
1079 		return -EBUSY;
1080 	}
1081 
1082 	ctrl = val & ~YT921X_MIRROR_PORT_M;
1083 	ctrl |= srcs;
1084 	ctrl |= dst;
1085 
1086 	if (ctrl == val)
1087 		return 0;
1088 
1089 	return yt921x_reg_write(priv, YT921X_MIRROR, ctrl);
1090 }
1091 
1092 static void
1093 yt921x_dsa_port_mirror_del(struct dsa_switch *ds, int port,
1094 			   struct dsa_mall_mirror_tc_entry *mirror)
1095 {
1096 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1097 	struct device *dev = to_device(priv);
1098 	int res;
1099 
1100 	mutex_lock(&priv->reg_lock);
1101 	res = yt921x_mirror_del(priv, port, mirror->ingress);
1102 	mutex_unlock(&priv->reg_lock);
1103 
1104 	if (res)
1105 		dev_err(dev, "Failed to %s port %d: %i\n", "unmirror",
1106 			port, res);
1107 }
1108 
1109 static int
1110 yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port,
1111 			   struct dsa_mall_mirror_tc_entry *mirror,
1112 			   bool ingress, struct netlink_ext_ack *extack)
1113 {
1114 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1115 	int res;
1116 
1117 	mutex_lock(&priv->reg_lock);
1118 	res = yt921x_mirror_add(priv, port, ingress,
1119 				mirror->to_local_port, extack);
1120 	mutex_unlock(&priv->reg_lock);
1121 
1122 	return res;
1123 }
1124 
1125 static int yt921x_lag_hash(struct yt921x_priv *priv, u32 ctrl, bool unique_lag,
1126 			   struct netlink_ext_ack *extack)
1127 {
1128 	u32 val;
1129 	int res;
1130 
1131 	/* Hash Mode is global. Make sure the same Hash Mode is set to all the
1132 	 * 2 possible lags.
1133 	 * If we are the unique LAG we can set whatever hash mode we want.
1134 	 * To change hash mode it's needed to remove all LAG and change the mode
1135 	 * with the latest.
1136 	 */
1137 	if (unique_lag) {
1138 		res = yt921x_reg_write(priv, YT921X_LAG_HASH, ctrl);
1139 		if (res)
1140 			return res;
1141 	} else {
1142 		res = yt921x_reg_read(priv, YT921X_LAG_HASH, &val);
1143 		if (res)
1144 			return res;
1145 
1146 		if (val != ctrl) {
1147 			NL_SET_ERR_MSG_MOD(extack,
1148 					   "Mismatched Hash Mode across different lags is not supported");
1149 			return -EOPNOTSUPP;
1150 		}
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 static int yt921x_lag_set(struct yt921x_priv *priv, u8 index, u16 ports_mask)
1157 {
1158 	unsigned long targets_mask = ports_mask;
1159 	unsigned int cnt;
1160 	u32 ctrl;
1161 	int port;
1162 	int res;
1163 
1164 	cnt = 0;
1165 	for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
1166 		ctrl = YT921X_LAG_MEMBER_PORT(port);
1167 		res = yt921x_reg_write(priv, YT921X_LAG_MEMBERnm(index, cnt),
1168 				       ctrl);
1169 		if (res)
1170 			return res;
1171 
1172 		cnt++;
1173 	}
1174 
1175 	ctrl = YT921X_LAG_GROUP_PORTS(ports_mask) |
1176 	       YT921X_LAG_GROUP_MEMBER_NUM(cnt);
1177 	return yt921x_reg_write(priv, YT921X_LAG_GROUPn(index), ctrl);
1178 }
1179 
1180 static int
1181 yt921x_dsa_port_lag_leave(struct dsa_switch *ds, int port, struct dsa_lag lag)
1182 {
1183 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1184 	struct dsa_port *dp;
1185 	u32 ctrl;
1186 	int res;
1187 
1188 	if (!lag.id)
1189 		return -EINVAL;
1190 
1191 	ctrl = 0;
1192 	dsa_lag_foreach_port(dp, ds->dst, &lag)
1193 		ctrl |= BIT(dp->index);
1194 
1195 	mutex_lock(&priv->reg_lock);
1196 	res = yt921x_lag_set(priv, lag.id - 1, ctrl);
1197 	mutex_unlock(&priv->reg_lock);
1198 
1199 	return res;
1200 }
1201 
1202 static int
1203 yt921x_dsa_port_lag_check(struct dsa_switch *ds, struct dsa_lag lag,
1204 			  struct netdev_lag_upper_info *info,
1205 			  struct netlink_ext_ack *extack)
1206 {
1207 	unsigned int members;
1208 	struct dsa_port *dp;
1209 
1210 	if (!lag.id)
1211 		return -EINVAL;
1212 
1213 	members = 0;
1214 	dsa_lag_foreach_port(dp, ds->dst, &lag)
1215 		/* Includes the port joining the LAG */
1216 		members++;
1217 
1218 	if (members > YT921X_LAG_PORT_NUM) {
1219 		NL_SET_ERR_MSG_MOD(extack,
1220 				   "Cannot offload more than 4 LAG ports");
1221 		return -EOPNOTSUPP;
1222 	}
1223 
1224 	if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
1225 		NL_SET_ERR_MSG_MOD(extack,
1226 				   "Can only offload LAG using hash TX type");
1227 		return -EOPNOTSUPP;
1228 	}
1229 
1230 	if (info->hash_type != NETDEV_LAG_HASH_L2 &&
1231 	    info->hash_type != NETDEV_LAG_HASH_L23 &&
1232 	    info->hash_type != NETDEV_LAG_HASH_L34) {
1233 		NL_SET_ERR_MSG_MOD(extack,
1234 				   "Can only offload L2 or L2+L3 or L3+L4 TX hash");
1235 		return -EOPNOTSUPP;
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 static int
1242 yt921x_dsa_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
1243 			 struct netdev_lag_upper_info *info,
1244 			 struct netlink_ext_ack *extack)
1245 {
1246 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1247 	struct dsa_port *dp;
1248 	bool unique_lag;
1249 	unsigned int i;
1250 	u32 ctrl;
1251 	int res;
1252 
1253 	res = yt921x_dsa_port_lag_check(ds, lag, info, extack);
1254 	if (res)
1255 		return res;
1256 
1257 	ctrl = 0;
1258 	switch (info->hash_type) {
1259 	case NETDEV_LAG_HASH_L34:
1260 		ctrl |= YT921X_LAG_HASH_IP_DST;
1261 		ctrl |= YT921X_LAG_HASH_IP_SRC;
1262 		ctrl |= YT921X_LAG_HASH_IP_PROTO;
1263 
1264 		ctrl |= YT921X_LAG_HASH_L4_DPORT;
1265 		ctrl |= YT921X_LAG_HASH_L4_SPORT;
1266 		break;
1267 	case NETDEV_LAG_HASH_L23:
1268 		ctrl |= YT921X_LAG_HASH_MAC_DA;
1269 		ctrl |= YT921X_LAG_HASH_MAC_SA;
1270 
1271 		ctrl |= YT921X_LAG_HASH_IP_DST;
1272 		ctrl |= YT921X_LAG_HASH_IP_SRC;
1273 		ctrl |= YT921X_LAG_HASH_IP_PROTO;
1274 		break;
1275 	case NETDEV_LAG_HASH_L2:
1276 		ctrl |= YT921X_LAG_HASH_MAC_DA;
1277 		ctrl |= YT921X_LAG_HASH_MAC_SA;
1278 		break;
1279 	default:
1280 		return -EOPNOTSUPP;
1281 	}
1282 
1283 	/* Check if we are the unique configured LAG */
1284 	unique_lag = true;
1285 	dsa_lags_foreach_id(i, ds->dst)
1286 		if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
1287 			unique_lag = false;
1288 			break;
1289 		}
1290 
1291 	mutex_lock(&priv->reg_lock);
1292 	do {
1293 		res = yt921x_lag_hash(priv, ctrl, unique_lag, extack);
1294 		if (res)
1295 			break;
1296 
1297 		ctrl = 0;
1298 		dsa_lag_foreach_port(dp, ds->dst, &lag)
1299 			ctrl |= BIT(dp->index);
1300 		res = yt921x_lag_set(priv, lag.id - 1, ctrl);
1301 	} while (0);
1302 	mutex_unlock(&priv->reg_lock);
1303 
1304 	return res;
1305 }
1306 
1307 static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp)
1308 {
1309 	struct device *dev = to_device(priv);
1310 	u32 val = YT921X_FDB_RESULT_DONE;
1311 	int res;
1312 
1313 	res = yt921x_reg_wait(priv, YT921X_FDB_RESULT, YT921X_FDB_RESULT_DONE,
1314 			      &val);
1315 	if (res) {
1316 		dev_err(dev, "FDB probably stuck\n");
1317 		return res;
1318 	}
1319 
1320 	*valp = val;
1321 	return 0;
1322 }
1323 
1324 static int
1325 yt921x_fdb_in01(struct yt921x_priv *priv, const unsigned char *addr,
1326 		u16 vid, u32 ctrl1)
1327 {
1328 	u32 ctrl;
1329 	int res;
1330 
1331 	ctrl = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1332 	res = yt921x_reg_write(priv, YT921X_FDB_IN0, ctrl);
1333 	if (res)
1334 		return res;
1335 
1336 	ctrl = ctrl1 | YT921X_FDB_IO1_FID(vid) | (addr[4] << 8) | addr[5];
1337 	return yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl);
1338 }
1339 
1340 static int
1341 yt921x_fdb_has(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
1342 	       u16 *indexp)
1343 {
1344 	u32 ctrl;
1345 	u32 val;
1346 	int res;
1347 
1348 	res = yt921x_fdb_in01(priv, addr, vid, 0);
1349 	if (res)
1350 		return res;
1351 
1352 	ctrl = 0;
1353 	res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
1354 	if (res)
1355 		return res;
1356 
1357 	ctrl = YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START;
1358 	res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
1359 	if (res)
1360 		return res;
1361 
1362 	res = yt921x_fdb_wait(priv, &val);
1363 	if (res)
1364 		return res;
1365 	if (val & YT921X_FDB_RESULT_NOTFOUND) {
1366 		*indexp = YT921X_FDB_NUM;
1367 		return 0;
1368 	}
1369 
1370 	*indexp = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val);
1371 	return 0;
1372 }
1373 
1374 static int
1375 yt921x_fdb_read(struct yt921x_priv *priv, unsigned char *addr, u16 *vidp,
1376 		u16 *ports_maskp, u16 *indexp, u8 *statusp)
1377 {
1378 	struct device *dev = to_device(priv);
1379 	u16 index;
1380 	u32 data0;
1381 	u32 data1;
1382 	u32 data2;
1383 	u32 val;
1384 	int res;
1385 
1386 	res = yt921x_fdb_wait(priv, &val);
1387 	if (res)
1388 		return res;
1389 	if (val & YT921X_FDB_RESULT_NOTFOUND) {
1390 		*ports_maskp = 0;
1391 		return 0;
1392 	}
1393 	index = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val);
1394 
1395 	res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &data1);
1396 	if (res)
1397 		return res;
1398 	if ((data1 & YT921X_FDB_IO1_STATUS_M) ==
1399 	    YT921X_FDB_IO1_STATUS_INVALID) {
1400 		*ports_maskp = 0;
1401 		return 0;
1402 	}
1403 
1404 	res = yt921x_reg_read(priv, YT921X_FDB_OUT0, &data0);
1405 	if (res)
1406 		return res;
1407 	res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &data2);
1408 	if (res)
1409 		return res;
1410 
1411 	addr[0] = data0 >> 24;
1412 	addr[1] = data0 >> 16;
1413 	addr[2] = data0 >> 8;
1414 	addr[3] = data0;
1415 	addr[4] = data1 >> 8;
1416 	addr[5] = data1;
1417 	*vidp = FIELD_GET(YT921X_FDB_IO1_FID_M, data1);
1418 	*indexp = index;
1419 	*ports_maskp = FIELD_GET(YT921X_FDB_IO2_EGR_PORTS_M, data2);
1420 	*statusp = FIELD_GET(YT921X_FDB_IO1_STATUS_M, data1);
1421 
1422 	dev_dbg(dev,
1423 		"%s: index 0x%x, mac %02x:%02x:%02x:%02x:%02x:%02x, vid %d, ports 0x%x, status %d\n",
1424 		__func__, *indexp, addr[0], addr[1], addr[2], addr[3],
1425 		addr[4], addr[5], *vidp, *ports_maskp, *statusp);
1426 	return 0;
1427 }
1428 
1429 static int
1430 yt921x_fdb_dump(struct yt921x_priv *priv, u16 ports_mask,
1431 		dsa_fdb_dump_cb_t *cb, void *data)
1432 {
1433 	unsigned char addr[ETH_ALEN];
1434 	u8 status;
1435 	u16 pmask;
1436 	u16 index;
1437 	u32 ctrl;
1438 	u16 vid;
1439 	int res;
1440 
1441 	ctrl = YT921X_FDB_OP_INDEX(0) | YT921X_FDB_OP_MODE_INDEX |
1442 	       YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START;
1443 	res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
1444 	if (res)
1445 		return res;
1446 	res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, &status);
1447 	if (res)
1448 		return res;
1449 	if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) {
1450 		res = cb(addr, vid,
1451 			 status == YT921X_FDB_ENTRY_STATUS_STATIC, data);
1452 		if (res)
1453 			return res;
1454 	}
1455 
1456 	ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
1457 	res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
1458 	if (res)
1459 		return res;
1460 
1461 	index = 0;
1462 	do {
1463 		ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX |
1464 		       YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT |
1465 		       YT921X_FDB_OP_OP_GET_NEXT | YT921X_FDB_OP_START;
1466 		res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
1467 		if (res)
1468 			return res;
1469 
1470 		res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index,
1471 				      &status);
1472 		if (res)
1473 			return res;
1474 		if (!pmask)
1475 			break;
1476 
1477 		if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) {
1478 			res = cb(addr, vid,
1479 				 status == YT921X_FDB_ENTRY_STATUS_STATIC,
1480 				 data);
1481 			if (res)
1482 				return res;
1483 		}
1484 
1485 		/* Never call GET_NEXT with 4095, otherwise it will hang
1486 		 * forever until a reset!
1487 		 */
1488 	} while (index < YT921X_FDB_NUM - 1);
1489 
1490 	return 0;
1491 }
1492 
1493 static int
1494 yt921x_fdb_flush_raw(struct yt921x_priv *priv, u16 ports_mask, u16 vid,
1495 		     bool flush_static)
1496 {
1497 	u32 ctrl;
1498 	u32 val;
1499 	int res;
1500 
1501 	if (vid < 4096) {
1502 		ctrl = YT921X_FDB_IO1_FID(vid);
1503 		res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl);
1504 		if (res)
1505 			return res;
1506 	}
1507 
1508 	ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
1509 	res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
1510 	if (res)
1511 		return res;
1512 
1513 	ctrl = YT921X_FDB_OP_OP_FLUSH | YT921X_FDB_OP_START;
1514 	if (vid >= 4096)
1515 		ctrl |= YT921X_FDB_OP_FLUSH_PORT;
1516 	else
1517 		ctrl |= YT921X_FDB_OP_FLUSH_PORT_VID;
1518 	if (flush_static)
1519 		ctrl |= YT921X_FDB_OP_FLUSH_STATIC;
1520 	res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
1521 	if (res)
1522 		return res;
1523 
1524 	res = yt921x_fdb_wait(priv, &val);
1525 	if (res)
1526 		return res;
1527 
1528 	return 0;
1529 }
1530 
1531 static int
1532 yt921x_fdb_flush_port(struct yt921x_priv *priv, int port, bool flush_static)
1533 {
1534 	return yt921x_fdb_flush_raw(priv, BIT(port), 4096, flush_static);
1535 }
1536 
1537 static int
1538 yt921x_fdb_add_index_in12(struct yt921x_priv *priv, u16 index, u16 ctrl1,
1539 			  u16 ctrl2)
1540 {
1541 	u32 ctrl;
1542 	u32 val;
1543 	int res;
1544 
1545 	res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl1);
1546 	if (res)
1547 		return res;
1548 	res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl2);
1549 	if (res)
1550 		return res;
1551 
1552 	ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX |
1553 	       YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START;
1554 	res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
1555 	if (res)
1556 		return res;
1557 
1558 	return yt921x_fdb_wait(priv, &val);
1559 }
1560 
1561 static int
1562 yt921x_fdb_add(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
1563 	       u16 ports_mask)
1564 {
1565 	u32 ctrl;
1566 	u32 val;
1567 	int res;
1568 
1569 	ctrl = YT921X_FDB_IO1_STATUS_STATIC;
1570 	res = yt921x_fdb_in01(priv, addr, vid, ctrl);
1571 	if (res)
1572 		return res;
1573 
1574 	ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
1575 	res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
1576 	if (res)
1577 		return res;
1578 
1579 	ctrl = YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START;
1580 	res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
1581 	if (res)
1582 		return res;
1583 
1584 	return yt921x_fdb_wait(priv, &val);
1585 }
1586 
1587 static int
1588 yt921x_fdb_leave(struct yt921x_priv *priv, const unsigned char *addr,
1589 		 u16 vid, u16 ports_mask)
1590 {
1591 	u16 index;
1592 	u32 ctrl1;
1593 	u32 ctrl2;
1594 	u32 ctrl;
1595 	u32 val2;
1596 	u32 val;
1597 	int res;
1598 
1599 	/* Check for presence */
1600 	res = yt921x_fdb_has(priv, addr, vid, &index);
1601 	if (res)
1602 		return res;
1603 	if (index >= YT921X_FDB_NUM)
1604 		return 0;
1605 
1606 	/* Check if action required */
1607 	res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2);
1608 	if (res)
1609 		return res;
1610 
1611 	ctrl2 = val2 & ~YT921X_FDB_IO2_EGR_PORTS(ports_mask);
1612 	if (ctrl2 == val2)
1613 		return 0;
1614 	if (!(ctrl2 & YT921X_FDB_IO2_EGR_PORTS_M)) {
1615 		ctrl = YT921X_FDB_OP_OP_DEL | YT921X_FDB_OP_START;
1616 		res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
1617 		if (res)
1618 			return res;
1619 
1620 		return yt921x_fdb_wait(priv, &val);
1621 	}
1622 
1623 	res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &ctrl1);
1624 	if (res)
1625 		return res;
1626 
1627 	return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2);
1628 }
1629 
1630 static int
1631 yt921x_fdb_join(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
1632 		u16 ports_mask)
1633 {
1634 	u16 index;
1635 	u32 ctrl1;
1636 	u32 ctrl2;
1637 	u32 val1;
1638 	u32 val2;
1639 	int res;
1640 
1641 	/* Check for presence */
1642 	res = yt921x_fdb_has(priv, addr, vid, &index);
1643 	if (res)
1644 		return res;
1645 	if (index >= YT921X_FDB_NUM)
1646 		return yt921x_fdb_add(priv, addr, vid, ports_mask);
1647 
1648 	/* Check if action required */
1649 	res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &val1);
1650 	if (res)
1651 		return res;
1652 	res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2);
1653 	if (res)
1654 		return res;
1655 
1656 	ctrl1 = val1 & ~YT921X_FDB_IO1_STATUS_M;
1657 	ctrl1 |= YT921X_FDB_IO1_STATUS_STATIC;
1658 	ctrl2 = val2 | YT921X_FDB_IO2_EGR_PORTS(ports_mask);
1659 	if (ctrl1 == val1 && ctrl2 == val2)
1660 		return 0;
1661 
1662 	return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2);
1663 }
1664 
1665 static int
1666 yt921x_dsa_port_fdb_dump(struct dsa_switch *ds, int port,
1667 			 dsa_fdb_dump_cb_t *cb, void *data)
1668 {
1669 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1670 	int res;
1671 
1672 	mutex_lock(&priv->reg_lock);
1673 	/* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
1674 	 * only wants to see unicast
1675 	 */
1676 	res = yt921x_fdb_dump(priv, BIT(port), cb, data);
1677 	mutex_unlock(&priv->reg_lock);
1678 
1679 	return res;
1680 }
1681 
1682 static void yt921x_dsa_port_fast_age(struct dsa_switch *ds, int port)
1683 {
1684 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1685 	struct device *dev = to_device(priv);
1686 	int res;
1687 
1688 	mutex_lock(&priv->reg_lock);
1689 	res = yt921x_fdb_flush_port(priv, port, false);
1690 	mutex_unlock(&priv->reg_lock);
1691 
1692 	if (res)
1693 		dev_err(dev, "Failed to %s port %d: %i\n", "clear FDB for",
1694 			port, res);
1695 }
1696 
1697 static int
1698 yt921x_dsa_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
1699 {
1700 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1701 	u32 ctrl;
1702 	int res;
1703 
1704 	/* AGEING reg is set in 5s step */
1705 	ctrl = clamp(msecs / 5000, 1, U16_MAX);
1706 
1707 	mutex_lock(&priv->reg_lock);
1708 	res = yt921x_reg_write(priv, YT921X_AGEING, ctrl);
1709 	mutex_unlock(&priv->reg_lock);
1710 
1711 	return res;
1712 }
1713 
1714 static int
1715 yt921x_dsa_port_fdb_del(struct dsa_switch *ds, int port,
1716 			const unsigned char *addr, u16 vid, struct dsa_db db)
1717 {
1718 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1719 	int res;
1720 
1721 	mutex_lock(&priv->reg_lock);
1722 	res = yt921x_fdb_leave(priv, addr, vid, BIT(port));
1723 	mutex_unlock(&priv->reg_lock);
1724 
1725 	return res;
1726 }
1727 
1728 static int
1729 yt921x_dsa_port_fdb_add(struct dsa_switch *ds, int port,
1730 			const unsigned char *addr, u16 vid, struct dsa_db db)
1731 {
1732 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1733 	int res;
1734 
1735 	mutex_lock(&priv->reg_lock);
1736 	res = yt921x_fdb_join(priv, addr, vid, BIT(port));
1737 	mutex_unlock(&priv->reg_lock);
1738 
1739 	return res;
1740 }
1741 
1742 static int
1743 yt921x_dsa_port_mdb_del(struct dsa_switch *ds, int port,
1744 			const struct switchdev_obj_port_mdb *mdb,
1745 			struct dsa_db db)
1746 {
1747 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1748 	const unsigned char *addr = mdb->addr;
1749 	u16 vid = mdb->vid;
1750 	int res;
1751 
1752 	mutex_lock(&priv->reg_lock);
1753 	res = yt921x_fdb_leave(priv, addr, vid, BIT(port));
1754 	mutex_unlock(&priv->reg_lock);
1755 
1756 	return res;
1757 }
1758 
1759 static int
1760 yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port,
1761 			const struct switchdev_obj_port_mdb *mdb,
1762 			struct dsa_db db)
1763 {
1764 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1765 	const unsigned char *addr = mdb->addr;
1766 	u16 vid = mdb->vid;
1767 	int res;
1768 
1769 	mutex_lock(&priv->reg_lock);
1770 	res = yt921x_fdb_join(priv, addr, vid, BIT(port));
1771 	mutex_unlock(&priv->reg_lock);
1772 
1773 	return res;
1774 }
1775 
1776 static int
1777 yt921x_vlan_aware_set(struct yt921x_priv *priv, int port, bool vlan_aware)
1778 {
1779 	u32 ctrl;
1780 
1781 	/* Abuse SVLAN for PCP parsing without polluting the FDB - it just works
1782 	 * despite YT921X_VLAN_CTRL_SVLAN_EN never being set
1783 	 */
1784 	if (!vlan_aware)
1785 		ctrl = YT921X_PORT_IGR_TPIDn_STAG(0);
1786 	else
1787 		ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0);
1788 	return yt921x_reg_write(priv, YT921X_PORTn_IGR_TPID(port), ctrl);
1789 }
1790 
1791 static int
1792 yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid)
1793 {
1794 	u32 mask;
1795 	u32 ctrl;
1796 
1797 	mask = YT921X_PORT_VLAN_CTRL_CVID_M;
1798 	ctrl = YT921X_PORT_VLAN_CTRL_CVID(vid);
1799 	return yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL(port),
1800 				      mask, ctrl);
1801 }
1802 
1803 static int
1804 yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering)
1805 {
1806 	struct dsa_port *dp = dsa_to_port(&priv->ds, port);
1807 	struct net_device *bdev;
1808 	u16 pvid;
1809 	u32 mask;
1810 	u32 ctrl;
1811 	int res;
1812 
1813 	bdev = dsa_port_bridge_dev_get(dp);
1814 
1815 	if (!bdev || !vlan_filtering)
1816 		pvid = YT921X_VID_UNWARE;
1817 	else
1818 		br_vlan_get_pvid(bdev, &pvid);
1819 	res = yt921x_port_set_pvid(priv, port, pvid);
1820 	if (res)
1821 		return res;
1822 
1823 	mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED |
1824 	       YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
1825 	ctrl = 0;
1826 	/* Do not drop tagged frames here; let VLAN_IGR_FILTER do it */
1827 	if (vlan_filtering && !pvid)
1828 		ctrl |= YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
1829 	res = yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL1(port),
1830 				     mask, ctrl);
1831 	if (res)
1832 		return res;
1833 
1834 	res = yt921x_reg_toggle_bits(priv, YT921X_VLAN_IGR_FILTER,
1835 				     YT921X_VLAN_IGR_FILTER_PORTn(port),
1836 				     vlan_filtering);
1837 	if (res)
1838 		return res;
1839 
1840 	res = yt921x_vlan_aware_set(priv, port, vlan_filtering);
1841 	if (res)
1842 		return res;
1843 
1844 	return 0;
1845 }
1846 
1847 static int
1848 yt921x_vlan_del(struct yt921x_priv *priv, int port, u16 vid)
1849 {
1850 	u64 mask64;
1851 
1852 	mask64 = YT921X_VLAN_CTRL_PORTS(port) |
1853 		 YT921X_VLAN_CTRL_UNTAG_PORTn(port);
1854 
1855 	return yt921x_reg64_clear_bits(priv, YT921X_VLANn_CTRL(vid), mask64);
1856 }
1857 
1858 static int
1859 yt921x_vlan_add(struct yt921x_priv *priv, int port, u16 vid, bool untagged)
1860 {
1861 	u64 mask64;
1862 	u64 ctrl64;
1863 
1864 	mask64 = YT921X_VLAN_CTRL_PORTn(port) |
1865 		 YT921X_VLAN_CTRL_PORTS(priv->cpu_ports_mask);
1866 	ctrl64 = mask64;
1867 
1868 	mask64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port);
1869 	if (untagged)
1870 		ctrl64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port);
1871 
1872 	return yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(vid),
1873 					mask64, ctrl64);
1874 }
1875 
1876 static int
1877 yt921x_pvid_clear(struct yt921x_priv *priv, int port)
1878 {
1879 	struct dsa_port *dp = dsa_to_port(&priv->ds, port);
1880 	bool vlan_filtering;
1881 	u32 mask;
1882 	int res;
1883 
1884 	vlan_filtering = dsa_port_is_vlan_filtering(dp);
1885 
1886 	res = yt921x_port_set_pvid(priv, port,
1887 				   vlan_filtering ? 0 : YT921X_VID_UNWARE);
1888 	if (res)
1889 		return res;
1890 
1891 	if (vlan_filtering) {
1892 		mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
1893 		res = yt921x_reg_set_bits(priv, YT921X_PORTn_VLAN_CTRL1(port),
1894 					  mask);
1895 		if (res)
1896 			return res;
1897 	}
1898 
1899 	return 0;
1900 }
1901 
1902 static int
1903 yt921x_pvid_set(struct yt921x_priv *priv, int port, u16 vid)
1904 {
1905 	struct dsa_port *dp = dsa_to_port(&priv->ds, port);
1906 	bool vlan_filtering;
1907 	u32 mask;
1908 	int res;
1909 
1910 	vlan_filtering = dsa_port_is_vlan_filtering(dp);
1911 
1912 	if (vlan_filtering) {
1913 		res = yt921x_port_set_pvid(priv, port, vid);
1914 		if (res)
1915 			return res;
1916 	}
1917 
1918 	mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
1919 	res = yt921x_reg_clear_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), mask);
1920 	if (res)
1921 		return res;
1922 
1923 	return 0;
1924 }
1925 
1926 static int
1927 yt921x_dsa_port_vlan_filtering(struct dsa_switch *ds, int port,
1928 			       bool vlan_filtering,
1929 			       struct netlink_ext_ack *extack)
1930 {
1931 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1932 	int res;
1933 
1934 	if (dsa_is_cpu_port(ds, port))
1935 		return 0;
1936 
1937 	mutex_lock(&priv->reg_lock);
1938 	res = yt921x_vlan_filtering(priv, port, vlan_filtering);
1939 	mutex_unlock(&priv->reg_lock);
1940 
1941 	return res;
1942 }
1943 
1944 static int
1945 yt921x_dsa_port_vlan_del(struct dsa_switch *ds, int port,
1946 			 const struct switchdev_obj_port_vlan *vlan)
1947 {
1948 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1949 	u16 vid = vlan->vid;
1950 	u16 pvid;
1951 	int res;
1952 
1953 	if (dsa_is_cpu_port(ds, port))
1954 		return 0;
1955 
1956 	mutex_lock(&priv->reg_lock);
1957 	do {
1958 		struct dsa_port *dp = dsa_to_port(ds, port);
1959 		struct net_device *bdev;
1960 
1961 		res = yt921x_vlan_del(priv, port, vid);
1962 		if (res)
1963 			break;
1964 
1965 		bdev = dsa_port_bridge_dev_get(dp);
1966 		if (bdev) {
1967 			br_vlan_get_pvid(bdev, &pvid);
1968 			if (pvid == vid)
1969 				res = yt921x_pvid_clear(priv, port);
1970 		}
1971 	} while (0);
1972 	mutex_unlock(&priv->reg_lock);
1973 
1974 	return res;
1975 }
1976 
1977 static int
1978 yt921x_dsa_port_vlan_add(struct dsa_switch *ds, int port,
1979 			 const struct switchdev_obj_port_vlan *vlan,
1980 			 struct netlink_ext_ack *extack)
1981 {
1982 	struct yt921x_priv *priv = to_yt921x_priv(ds);
1983 	u16 vid = vlan->vid;
1984 	u16 pvid;
1985 	int res;
1986 
1987 	/* CPU port is supposed to be a member of every VLAN; see
1988 	 * yt921x_vlan_add() and yt921x_port_setup()
1989 	 */
1990 	if (dsa_is_cpu_port(ds, port))
1991 		return 0;
1992 
1993 	mutex_lock(&priv->reg_lock);
1994 	do {
1995 		struct dsa_port *dp = dsa_to_port(ds, port);
1996 		struct net_device *bdev;
1997 
1998 		res = yt921x_vlan_add(priv, port, vid,
1999 				      vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
2000 		if (res)
2001 			break;
2002 
2003 		bdev = dsa_port_bridge_dev_get(dp);
2004 		if (bdev) {
2005 			if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
2006 				res = yt921x_pvid_set(priv, port, vid);
2007 			} else {
2008 				br_vlan_get_pvid(bdev, &pvid);
2009 				if (pvid == vid)
2010 					res = yt921x_pvid_clear(priv, port);
2011 			}
2012 		}
2013 	} while (0);
2014 	mutex_unlock(&priv->reg_lock);
2015 
2016 	return res;
2017 }
2018 
2019 static int yt921x_userport_standalone(struct yt921x_priv *priv, int port)
2020 {
2021 	u32 mask;
2022 	u32 ctrl;
2023 	int res;
2024 
2025 	ctrl = ~priv->cpu_ports_mask;
2026 	res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), ctrl);
2027 	if (res)
2028 		return res;
2029 
2030 	/* Turn off FDB learning to prevent FDB pollution */
2031 	mask = YT921X_PORT_LEARN_DIS;
2032 	res = yt921x_reg_set_bits(priv, YT921X_PORTn_LEARN(port), mask);
2033 	if (res)
2034 		return res;
2035 
2036 	/* Turn off VLAN awareness */
2037 	res = yt921x_vlan_aware_set(priv, port, false);
2038 	if (res)
2039 		return res;
2040 
2041 	/* Unrelated since learning is off and all packets are trapped;
2042 	 * set it anyway
2043 	 */
2044 	res = yt921x_port_set_pvid(priv, port, YT921X_VID_UNWARE);
2045 	if (res)
2046 		return res;
2047 
2048 	return 0;
2049 }
2050 
2051 static int yt921x_userport_bridge(struct yt921x_priv *priv, int port)
2052 {
2053 	u32 mask;
2054 	int res;
2055 
2056 	mask = YT921X_PORT_LEARN_DIS;
2057 	res = yt921x_reg_clear_bits(priv, YT921X_PORTn_LEARN(port), mask);
2058 	if (res)
2059 		return res;
2060 
2061 	return 0;
2062 }
2063 
2064 static int yt921x_isolate(struct yt921x_priv *priv, int port)
2065 {
2066 	u32 mask;
2067 	int res;
2068 
2069 	mask = BIT(port);
2070 	for (int i = 0; i < YT921X_PORT_NUM; i++) {
2071 		if ((BIT(i) & priv->cpu_ports_mask) || i == port)
2072 			continue;
2073 
2074 		res = yt921x_reg_set_bits(priv, YT921X_PORTn_ISOLATION(i),
2075 					  mask);
2076 		if (res)
2077 			return res;
2078 	}
2079 
2080 	return 0;
2081 }
2082 
2083 /* Make sure to include the CPU port in ports_mask, or your bridge will
2084  * not have it.
2085  */
2086 static int yt921x_bridge(struct yt921x_priv *priv, u16 ports_mask)
2087 {
2088 	unsigned long targets_mask = ports_mask & ~priv->cpu_ports_mask;
2089 	u32 isolated_mask;
2090 	u32 ctrl;
2091 	int port;
2092 	int res;
2093 
2094 	isolated_mask = 0;
2095 	for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
2096 		struct yt921x_port *pp = &priv->ports[port];
2097 
2098 		if (pp->isolated)
2099 			isolated_mask |= BIT(port);
2100 	}
2101 
2102 	/* Block from non-cpu bridge ports ... */
2103 	for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
2104 		struct yt921x_port *pp = &priv->ports[port];
2105 
2106 		/* to non-bridge ports */
2107 		ctrl = ~ports_mask;
2108 		/* to isolated ports when isolated */
2109 		if (pp->isolated)
2110 			ctrl |= isolated_mask;
2111 		/* to itself when non-hairpin */
2112 		if (!pp->hairpin)
2113 			ctrl |= BIT(port);
2114 		else
2115 			ctrl &= ~BIT(port);
2116 
2117 		res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port),
2118 				       ctrl);
2119 		if (res)
2120 			return res;
2121 	}
2122 
2123 	return 0;
2124 }
2125 
2126 static int yt921x_bridge_leave(struct yt921x_priv *priv, int port)
2127 {
2128 	int res;
2129 
2130 	res = yt921x_userport_standalone(priv, port);
2131 	if (res)
2132 		return res;
2133 
2134 	res = yt921x_isolate(priv, port);
2135 	if (res)
2136 		return res;
2137 
2138 	return 0;
2139 }
2140 
2141 static int
2142 yt921x_bridge_join(struct yt921x_priv *priv, int port, u16 ports_mask)
2143 {
2144 	int res;
2145 
2146 	res = yt921x_userport_bridge(priv, port);
2147 	if (res)
2148 		return res;
2149 
2150 	res = yt921x_bridge(priv, ports_mask);
2151 	if (res)
2152 		return res;
2153 
2154 	return 0;
2155 }
2156 
2157 static u32
2158 dsa_bridge_ports(struct dsa_switch *ds, const struct net_device *bdev)
2159 {
2160 	struct dsa_port *dp;
2161 	u32 mask = 0;
2162 
2163 	dsa_switch_for_each_user_port(dp, ds)
2164 		if (dsa_port_offloads_bridge_dev(dp, bdev))
2165 			mask |= BIT(dp->index);
2166 
2167 	return mask;
2168 }
2169 
2170 static int
2171 yt921x_bridge_flags(struct yt921x_priv *priv, int port,
2172 		    struct switchdev_brport_flags flags)
2173 {
2174 	struct yt921x_port *pp = &priv->ports[port];
2175 	bool do_flush;
2176 	u32 mask;
2177 	int res;
2178 
2179 	if (flags.mask & BR_LEARNING) {
2180 		bool learning = flags.val & BR_LEARNING;
2181 
2182 		mask = YT921X_PORT_LEARN_DIS;
2183 		res = yt921x_reg_toggle_bits(priv, YT921X_PORTn_LEARN(port),
2184 					     mask, !learning);
2185 		if (res)
2186 			return res;
2187 	}
2188 
2189 	/* BR_FLOOD, BR_MCAST_FLOOD: see the comment where ACT_UNK_ACTn_TRAP
2190 	 * is set
2191 	 */
2192 
2193 	/* BR_BCAST_FLOOD: we can filter bcast, but cannot trap them */
2194 
2195 	do_flush = false;
2196 	if (flags.mask & BR_HAIRPIN_MODE) {
2197 		pp->hairpin = flags.val & BR_HAIRPIN_MODE;
2198 		do_flush = true;
2199 	}
2200 	if (flags.mask & BR_ISOLATED) {
2201 		pp->isolated = flags.val & BR_ISOLATED;
2202 		do_flush = true;
2203 	}
2204 	if (do_flush) {
2205 		struct dsa_switch *ds = &priv->ds;
2206 		struct dsa_port *dp = dsa_to_port(ds, port);
2207 		struct net_device *bdev;
2208 
2209 		bdev = dsa_port_bridge_dev_get(dp);
2210 		if (bdev) {
2211 			u32 ports_mask;
2212 
2213 			ports_mask = dsa_bridge_ports(ds, bdev);
2214 			ports_mask |= priv->cpu_ports_mask;
2215 			res = yt921x_bridge(priv, ports_mask);
2216 			if (res)
2217 				return res;
2218 		}
2219 	}
2220 
2221 	return 0;
2222 }
2223 
2224 static int
2225 yt921x_dsa_port_pre_bridge_flags(struct dsa_switch *ds, int port,
2226 				 struct switchdev_brport_flags flags,
2227 				 struct netlink_ext_ack *extack)
2228 {
2229 	if (flags.mask & ~(BR_HAIRPIN_MODE | BR_LEARNING | BR_FLOOD |
2230 			   BR_MCAST_FLOOD | BR_ISOLATED))
2231 		return -EINVAL;
2232 	return 0;
2233 }
2234 
2235 static int
2236 yt921x_dsa_port_bridge_flags(struct dsa_switch *ds, int port,
2237 			     struct switchdev_brport_flags flags,
2238 			     struct netlink_ext_ack *extack)
2239 {
2240 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2241 	int res;
2242 
2243 	if (dsa_is_cpu_port(ds, port))
2244 		return 0;
2245 
2246 	mutex_lock(&priv->reg_lock);
2247 	res = yt921x_bridge_flags(priv, port, flags);
2248 	mutex_unlock(&priv->reg_lock);
2249 
2250 	return res;
2251 }
2252 
2253 static void
2254 yt921x_dsa_port_bridge_leave(struct dsa_switch *ds, int port,
2255 			     struct dsa_bridge bridge)
2256 {
2257 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2258 	struct device *dev = to_device(priv);
2259 	int res;
2260 
2261 	if (dsa_is_cpu_port(ds, port))
2262 		return;
2263 
2264 	mutex_lock(&priv->reg_lock);
2265 	res = yt921x_bridge_leave(priv, port);
2266 	mutex_unlock(&priv->reg_lock);
2267 
2268 	if (res)
2269 		dev_err(dev, "Failed to %s port %d: %i\n", "unbridge",
2270 			port, res);
2271 }
2272 
2273 static int
2274 yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port,
2275 			    struct dsa_bridge bridge, bool *tx_fwd_offload,
2276 			    struct netlink_ext_ack *extack)
2277 {
2278 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2279 	u16 ports_mask;
2280 	int res;
2281 
2282 	if (dsa_is_cpu_port(ds, port))
2283 		return 0;
2284 
2285 	ports_mask = dsa_bridge_ports(ds, bridge.dev);
2286 	ports_mask |= priv->cpu_ports_mask;
2287 
2288 	mutex_lock(&priv->reg_lock);
2289 	res = yt921x_bridge_join(priv, port, ports_mask);
2290 	mutex_unlock(&priv->reg_lock);
2291 
2292 	return res;
2293 }
2294 
2295 static int
2296 yt921x_dsa_port_mst_state_set(struct dsa_switch *ds, int port,
2297 			      const struct switchdev_mst_state *st)
2298 {
2299 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2300 	u32 mask;
2301 	u32 ctrl;
2302 	int res;
2303 
2304 	mask = YT921X_STP_PORTn_M(port);
2305 	switch (st->state) {
2306 	case BR_STATE_DISABLED:
2307 		ctrl = YT921X_STP_PORTn_DISABLED(port);
2308 		break;
2309 	case BR_STATE_LISTENING:
2310 	case BR_STATE_LEARNING:
2311 		ctrl = YT921X_STP_PORTn_LEARNING(port);
2312 		break;
2313 	case BR_STATE_FORWARDING:
2314 	default:
2315 		ctrl = YT921X_STP_PORTn_FORWARD(port);
2316 		break;
2317 	case BR_STATE_BLOCKING:
2318 		ctrl = YT921X_STP_PORTn_BLOCKING(port);
2319 		break;
2320 	}
2321 
2322 	mutex_lock(&priv->reg_lock);
2323 	res = yt921x_reg_update_bits(priv, YT921X_STPn(st->msti), mask, ctrl);
2324 	mutex_unlock(&priv->reg_lock);
2325 
2326 	return res;
2327 }
2328 
2329 static int
2330 yt921x_dsa_vlan_msti_set(struct dsa_switch *ds, struct dsa_bridge bridge,
2331 			 const struct switchdev_vlan_msti *msti)
2332 {
2333 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2334 	u64 mask64;
2335 	u64 ctrl64;
2336 	int res;
2337 
2338 	if (!msti->vid)
2339 		return -EINVAL;
2340 	if (!msti->msti || msti->msti >= YT921X_MSTI_NUM)
2341 		return -EINVAL;
2342 
2343 	mask64 = YT921X_VLAN_CTRL_STP_ID_M;
2344 	ctrl64 = YT921X_VLAN_CTRL_STP_ID(msti->msti);
2345 
2346 	mutex_lock(&priv->reg_lock);
2347 	res = yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(msti->vid),
2348 				       mask64, ctrl64);
2349 	mutex_unlock(&priv->reg_lock);
2350 
2351 	return res;
2352 }
2353 
2354 static void
2355 yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
2356 {
2357 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2358 	struct dsa_port *dp = dsa_to_port(ds, port);
2359 	struct device *dev = to_device(priv);
2360 	bool learning;
2361 	u32 mask;
2362 	u32 ctrl;
2363 	int res;
2364 
2365 	mask = YT921X_STP_PORTn_M(port);
2366 	learning = false;
2367 	switch (state) {
2368 	case BR_STATE_DISABLED:
2369 		ctrl = YT921X_STP_PORTn_DISABLED(port);
2370 		break;
2371 	case BR_STATE_LISTENING:
2372 		ctrl = YT921X_STP_PORTn_LEARNING(port);
2373 		break;
2374 	case BR_STATE_LEARNING:
2375 		ctrl = YT921X_STP_PORTn_LEARNING(port);
2376 		learning = dp->learning;
2377 		break;
2378 	case BR_STATE_FORWARDING:
2379 	default:
2380 		ctrl = YT921X_STP_PORTn_FORWARD(port);
2381 		learning = dp->learning;
2382 		break;
2383 	case BR_STATE_BLOCKING:
2384 		ctrl = YT921X_STP_PORTn_BLOCKING(port);
2385 		break;
2386 	}
2387 
2388 	mutex_lock(&priv->reg_lock);
2389 	do {
2390 		res = yt921x_reg_update_bits(priv, YT921X_STPn(0), mask, ctrl);
2391 		if (res)
2392 			break;
2393 
2394 		mask = YT921X_PORT_LEARN_DIS;
2395 		ctrl = !learning ? YT921X_PORT_LEARN_DIS : 0;
2396 		res = yt921x_reg_update_bits(priv, YT921X_PORTn_LEARN(port),
2397 					     mask, ctrl);
2398 	} while (0);
2399 	mutex_unlock(&priv->reg_lock);
2400 
2401 	if (res)
2402 		dev_err(dev, "Failed to %s port %d: %i\n", "set STP state for",
2403 			port, res);
2404 }
2405 
2406 static int __maybe_unused
2407 yt921x_dsa_port_get_default_prio(struct dsa_switch *ds, int port)
2408 {
2409 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2410 	u32 val;
2411 	int res;
2412 
2413 	mutex_lock(&priv->reg_lock);
2414 	res = yt921x_reg_read(priv, YT921X_PORTn_QOS(port), &val);
2415 	mutex_unlock(&priv->reg_lock);
2416 
2417 	if (res)
2418 		return res;
2419 
2420 	return FIELD_GET(YT921X_PORT_QOS_PRIO_M, val);
2421 }
2422 
2423 static int __maybe_unused
2424 yt921x_dsa_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
2425 {
2426 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2427 	u32 mask;
2428 	u32 ctrl;
2429 	int res;
2430 
2431 	if (prio >= YT921X_PRIO_NUM)
2432 		return -EINVAL;
2433 
2434 	mutex_lock(&priv->reg_lock);
2435 	mask = YT921X_PORT_QOS_PRIO_M | YT921X_PORT_QOS_PRIO_EN;
2436 	ctrl = YT921X_PORT_QOS_PRIO(prio) | YT921X_PORT_QOS_PRIO_EN;
2437 	res = yt921x_reg_update_bits(priv, YT921X_PORTn_QOS(port), mask, ctrl);
2438 	mutex_unlock(&priv->reg_lock);
2439 
2440 	return res;
2441 }
2442 
2443 static int __maybe_unused appprios_cmp(const void *a, const void *b)
2444 {
2445 	return ((const u8 *)b)[1] - ((const u8 *)a)[1];
2446 }
2447 
2448 static int __maybe_unused
2449 yt921x_dsa_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel,
2450 			     int *nselp)
2451 {
2452 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2453 	u8 appprios[2][2] = {};
2454 	int nsel;
2455 	u32 val;
2456 	int res;
2457 
2458 	mutex_lock(&priv->reg_lock);
2459 	res = yt921x_reg_read(priv, YT921X_PORTn_PRIO_ORD(port), &val);
2460 	mutex_unlock(&priv->reg_lock);
2461 
2462 	if (res)
2463 		return res;
2464 
2465 	appprios[0][0] = IEEE_8021QAZ_APP_SEL_DSCP;
2466 	appprios[0][1] = (val >> (3 * YT921X_APP_SEL_DSCP)) & 7;
2467 	appprios[1][0] = DCB_APP_SEL_PCP;
2468 	appprios[1][1] = (val >> (3 * YT921X_APP_SEL_CVLAN_PCP)) & 7;
2469 	sort(appprios, ARRAY_SIZE(appprios), sizeof(appprios[0]), appprios_cmp,
2470 	     NULL);
2471 
2472 	nsel = 0;
2473 	for (int i = 0; i < ARRAY_SIZE(appprios) && appprios[i][1]; i++) {
2474 		sel[nsel] = appprios[i][0];
2475 		nsel++;
2476 	}
2477 	*nselp = nsel;
2478 
2479 	return 0;
2480 }
2481 
2482 static int __maybe_unused
2483 yt921x_dsa_port_set_apptrust(struct dsa_switch *ds, int port, const u8 *sel,
2484 			     int nsel)
2485 {
2486 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2487 	struct device *dev = to_device(priv);
2488 	u32 ctrl;
2489 	int res;
2490 
2491 	if (nsel > YT921X_APP_SEL_NUM)
2492 		return -EINVAL;
2493 
2494 	ctrl = 0;
2495 	for (int i = 0; i < nsel; i++) {
2496 		switch (sel[i]) {
2497 		case IEEE_8021QAZ_APP_SEL_DSCP:
2498 			ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_DSCP,
2499 							  7 - i);
2500 			break;
2501 		case DCB_APP_SEL_PCP:
2502 			ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_CVLAN_PCP,
2503 							  7 - i);
2504 			ctrl |= YT921X_PORT_PRIO_ORD_APPm(YT921X_APP_SEL_SVLAN_PCP,
2505 							  7 - i);
2506 			break;
2507 		default:
2508 			dev_err(dev,
2509 				"Invalid apptrust selector (at %d-th). Supported: dscp, pcp\n",
2510 				i + 1);
2511 			return -EOPNOTSUPP;
2512 		}
2513 	}
2514 
2515 	mutex_lock(&priv->reg_lock);
2516 	res = yt921x_reg_write(priv, YT921X_PORTn_PRIO_ORD(port), ctrl);
2517 	mutex_unlock(&priv->reg_lock);
2518 
2519 	return res;
2520 }
2521 
2522 static int yt921x_port_down(struct yt921x_priv *priv, int port)
2523 {
2524 	u32 mask;
2525 	int res;
2526 
2527 	mask = YT921X_PORT_LINK | YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN;
2528 	res = yt921x_reg_clear_bits(priv, YT921X_PORTn_CTRL(port), mask);
2529 	if (res)
2530 		return res;
2531 
2532 	if (yt921x_port_is_external(port)) {
2533 		mask = YT921X_SERDES_LINK;
2534 		res = yt921x_reg_clear_bits(priv, YT921X_SERDESn(port), mask);
2535 		if (res)
2536 			return res;
2537 
2538 		mask = YT921X_XMII_LINK;
2539 		res = yt921x_reg_clear_bits(priv, YT921X_XMIIn(port), mask);
2540 		if (res)
2541 			return res;
2542 	}
2543 
2544 	return 0;
2545 }
2546 
2547 static int
2548 yt921x_port_up(struct yt921x_priv *priv, int port, unsigned int mode,
2549 	       phy_interface_t interface, int speed, int duplex,
2550 	       bool tx_pause, bool rx_pause)
2551 {
2552 	u32 mask;
2553 	u32 ctrl;
2554 	int res;
2555 
2556 	switch (speed) {
2557 	case SPEED_10:
2558 		ctrl = YT921X_PORT_SPEED_10;
2559 		break;
2560 	case SPEED_100:
2561 		ctrl = YT921X_PORT_SPEED_100;
2562 		break;
2563 	case SPEED_1000:
2564 		ctrl = YT921X_PORT_SPEED_1000;
2565 		break;
2566 	case SPEED_2500:
2567 		ctrl = YT921X_PORT_SPEED_2500;
2568 		break;
2569 	case SPEED_10000:
2570 		ctrl = YT921X_PORT_SPEED_10000;
2571 		break;
2572 	default:
2573 		return -EINVAL;
2574 	}
2575 	if (duplex == DUPLEX_FULL)
2576 		ctrl |= YT921X_PORT_DUPLEX_FULL;
2577 	if (tx_pause)
2578 		ctrl |= YT921X_PORT_TX_PAUSE;
2579 	if (rx_pause)
2580 		ctrl |= YT921X_PORT_RX_PAUSE;
2581 	ctrl |= YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN;
2582 	res = yt921x_reg_write(priv, YT921X_PORTn_CTRL(port), ctrl);
2583 	if (res)
2584 		return res;
2585 
2586 	if (yt921x_port_is_external(port)) {
2587 		mask = YT921X_SERDES_SPEED_M;
2588 		switch (speed) {
2589 		case SPEED_10:
2590 			ctrl = YT921X_SERDES_SPEED_10;
2591 			break;
2592 		case SPEED_100:
2593 			ctrl = YT921X_SERDES_SPEED_100;
2594 			break;
2595 		case SPEED_1000:
2596 			ctrl = YT921X_SERDES_SPEED_1000;
2597 			break;
2598 		case SPEED_2500:
2599 			ctrl = YT921X_SERDES_SPEED_2500;
2600 			break;
2601 		case SPEED_10000:
2602 			ctrl = YT921X_SERDES_SPEED_10000;
2603 			break;
2604 		default:
2605 			return -EINVAL;
2606 		}
2607 		mask |= YT921X_SERDES_DUPLEX_FULL;
2608 		if (duplex == DUPLEX_FULL)
2609 			ctrl |= YT921X_SERDES_DUPLEX_FULL;
2610 		mask |= YT921X_SERDES_TX_PAUSE;
2611 		if (tx_pause)
2612 			ctrl |= YT921X_SERDES_TX_PAUSE;
2613 		mask |= YT921X_SERDES_RX_PAUSE;
2614 		if (rx_pause)
2615 			ctrl |= YT921X_SERDES_RX_PAUSE;
2616 		mask |= YT921X_SERDES_LINK;
2617 		ctrl |= YT921X_SERDES_LINK;
2618 		res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port),
2619 					     mask, ctrl);
2620 		if (res)
2621 			return res;
2622 
2623 		mask = YT921X_XMII_LINK;
2624 		res = yt921x_reg_set_bits(priv, YT921X_XMIIn(port), mask);
2625 		if (res)
2626 			return res;
2627 
2628 		switch (speed) {
2629 		case SPEED_10:
2630 			ctrl = YT921X_MDIO_POLLING_SPEED_10;
2631 			break;
2632 		case SPEED_100:
2633 			ctrl = YT921X_MDIO_POLLING_SPEED_100;
2634 			break;
2635 		case SPEED_1000:
2636 			ctrl = YT921X_MDIO_POLLING_SPEED_1000;
2637 			break;
2638 		case SPEED_2500:
2639 			ctrl = YT921X_MDIO_POLLING_SPEED_2500;
2640 			break;
2641 		case SPEED_10000:
2642 			ctrl = YT921X_MDIO_POLLING_SPEED_10000;
2643 			break;
2644 		default:
2645 			return -EINVAL;
2646 		}
2647 		if (duplex == DUPLEX_FULL)
2648 			ctrl |= YT921X_MDIO_POLLING_DUPLEX_FULL;
2649 		ctrl |= YT921X_MDIO_POLLING_LINK;
2650 		res = yt921x_reg_write(priv, YT921X_MDIO_POLLINGn(port), ctrl);
2651 		if (res)
2652 			return res;
2653 	}
2654 
2655 	return 0;
2656 }
2657 
2658 static int
2659 yt921x_port_config(struct yt921x_priv *priv, int port, unsigned int mode,
2660 		   phy_interface_t interface)
2661 {
2662 	struct device *dev = to_device(priv);
2663 	u32 mask;
2664 	u32 ctrl;
2665 	int res;
2666 
2667 	if (!yt921x_port_is_external(port)) {
2668 		if (interface != PHY_INTERFACE_MODE_INTERNAL) {
2669 			dev_err(dev, "Wrong mode %d on port %d\n",
2670 				interface, port);
2671 			return -EINVAL;
2672 		}
2673 		return 0;
2674 	}
2675 
2676 	switch (interface) {
2677 	/* SERDES */
2678 	case PHY_INTERFACE_MODE_SGMII:
2679 	case PHY_INTERFACE_MODE_100BASEX:
2680 	case PHY_INTERFACE_MODE_1000BASEX:
2681 	case PHY_INTERFACE_MODE_2500BASEX:
2682 		mask = YT921X_SERDES_CTRL_PORTn(port);
2683 		res = yt921x_reg_set_bits(priv, YT921X_SERDES_CTRL, mask);
2684 		if (res)
2685 			return res;
2686 
2687 		mask = YT921X_XMII_CTRL_PORTn(port);
2688 		res = yt921x_reg_clear_bits(priv, YT921X_XMII_CTRL, mask);
2689 		if (res)
2690 			return res;
2691 
2692 		mask = YT921X_SERDES_MODE_M;
2693 		switch (interface) {
2694 		case PHY_INTERFACE_MODE_SGMII:
2695 			ctrl = YT921X_SERDES_MODE_SGMII;
2696 			break;
2697 		case PHY_INTERFACE_MODE_100BASEX:
2698 			ctrl = YT921X_SERDES_MODE_100BASEX;
2699 			break;
2700 		case PHY_INTERFACE_MODE_1000BASEX:
2701 			ctrl = YT921X_SERDES_MODE_1000BASEX;
2702 			break;
2703 		case PHY_INTERFACE_MODE_2500BASEX:
2704 			ctrl = YT921X_SERDES_MODE_2500BASEX;
2705 			break;
2706 		default:
2707 			return -EINVAL;
2708 		}
2709 		res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port),
2710 					     mask, ctrl);
2711 		if (res)
2712 			return res;
2713 
2714 		break;
2715 	/* add XMII support here */
2716 	default:
2717 		return -EINVAL;
2718 	}
2719 
2720 	return 0;
2721 }
2722 
2723 static void
2724 yt921x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
2725 			     phy_interface_t interface)
2726 {
2727 	struct dsa_port *dp = dsa_phylink_to_port(config);
2728 	struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
2729 	int port = dp->index;
2730 	int res;
2731 
2732 	/* No need to sync; port control block is hold until device remove */
2733 	cancel_delayed_work(&priv->ports[port].mib_read);
2734 
2735 	mutex_lock(&priv->reg_lock);
2736 	res = yt921x_port_down(priv, port);
2737 	mutex_unlock(&priv->reg_lock);
2738 
2739 	if (res)
2740 		dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring down",
2741 			port, res);
2742 }
2743 
2744 static void
2745 yt921x_phylink_mac_link_up(struct phylink_config *config,
2746 			   struct phy_device *phydev, unsigned int mode,
2747 			   phy_interface_t interface, int speed, int duplex,
2748 			   bool tx_pause, bool rx_pause)
2749 {
2750 	struct dsa_port *dp = dsa_phylink_to_port(config);
2751 	struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
2752 	int port = dp->index;
2753 	int res;
2754 
2755 	mutex_lock(&priv->reg_lock);
2756 	res = yt921x_port_up(priv, port, mode, interface, speed, duplex,
2757 			     tx_pause, rx_pause);
2758 	mutex_unlock(&priv->reg_lock);
2759 
2760 	if (res)
2761 		dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring up",
2762 			port, res);
2763 
2764 	schedule_delayed_work(&priv->ports[port].mib_read, 0);
2765 }
2766 
2767 static void
2768 yt921x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
2769 			  const struct phylink_link_state *state)
2770 {
2771 	struct dsa_port *dp = dsa_phylink_to_port(config);
2772 	struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
2773 	int port = dp->index;
2774 	int res;
2775 
2776 	mutex_lock(&priv->reg_lock);
2777 	res = yt921x_port_config(priv, port, mode, state->interface);
2778 	mutex_unlock(&priv->reg_lock);
2779 
2780 	if (res)
2781 		dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "config",
2782 			port, res);
2783 }
2784 
2785 static void
2786 yt921x_dsa_phylink_get_caps(struct dsa_switch *ds, int port,
2787 			    struct phylink_config *config)
2788 {
2789 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2790 	const struct yt921x_info *info = priv->info;
2791 
2792 	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
2793 				   MAC_10 | MAC_100 | MAC_1000;
2794 
2795 	if (info->internal_mask & BIT(port)) {
2796 		/* Port 10 for MCU should probably go here too. But since that
2797 		 * is untested yet, turn it down for the moment by letting it
2798 		 * fall to the default branch.
2799 		 */
2800 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
2801 			  config->supported_interfaces);
2802 	} else if (info->external_mask & BIT(port)) {
2803 		/* TODO: external ports may support SERDES only, XMII only, or
2804 		 * SERDES + XMII depending on the chip. However, we can't get
2805 		 * the accurate config table due to lack of document, thus
2806 		 * we simply declare SERDES + XMII and rely on the correctness
2807 		 * of devicetree for now.
2808 		 */
2809 
2810 		/* SERDES */
2811 		__set_bit(PHY_INTERFACE_MODE_SGMII,
2812 			  config->supported_interfaces);
2813 		/* REVSGMII (SGMII in PHY role) should go here, once
2814 		 * PHY_INTERFACE_MODE_REVSGMII is introduced.
2815 		 */
2816 		__set_bit(PHY_INTERFACE_MODE_100BASEX,
2817 			  config->supported_interfaces);
2818 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
2819 			  config->supported_interfaces);
2820 		__set_bit(PHY_INTERFACE_MODE_2500BASEX,
2821 			  config->supported_interfaces);
2822 		config->mac_capabilities |= MAC_2500FD;
2823 
2824 		/* XMII */
2825 
2826 		/* Not tested. To add support for XMII:
2827 		 *   - Add proper interface modes below
2828 		 *   - Handle them in yt921x_port_config()
2829 		 */
2830 	}
2831 	/* no such port: empty supported_interfaces causes phylink to turn it
2832 	 * down
2833 	 */
2834 }
2835 
2836 static int yt921x_port_setup(struct yt921x_priv *priv, int port)
2837 {
2838 	struct dsa_switch *ds = &priv->ds;
2839 	u32 ctrl;
2840 	int res;
2841 
2842 	res = yt921x_userport_standalone(priv, port);
2843 	if (res)
2844 		return res;
2845 
2846 	/* Clear prio order (even if DCB is not enabled) to avoid unsolicited
2847 	 * priorities
2848 	 */
2849 	res = yt921x_reg_write(priv, YT921X_PORTn_PRIO_ORD(port), 0);
2850 	if (res)
2851 		return res;
2852 
2853 	if (dsa_is_cpu_port(ds, port)) {
2854 		/* Egress of CPU port is supposed to be completely controlled
2855 		 * via tagging, so set to oneway isolated (drop all packets
2856 		 * without tag).
2857 		 */
2858 		ctrl = ~(u32)0;
2859 		res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port),
2860 				       ctrl);
2861 		if (res)
2862 			return res;
2863 
2864 		/* To simplify FDB "isolation" simulation, we also disable
2865 		 * learning on the CPU port, and let software identify packets
2866 		 * towarding CPU (either trapped or a static FDB entry is
2867 		 * matched, no matter which bridge that entry is for), which is
2868 		 * already done by yt921x_userport_standalone(). As a result,
2869 		 * VLAN-awareness becomes unrelated on the CPU port (set to
2870 		 * VLAN-unaware by the way).
2871 		 */
2872 	}
2873 
2874 	return 0;
2875 }
2876 
2877 static enum dsa_tag_protocol
2878 yt921x_dsa_get_tag_protocol(struct dsa_switch *ds, int port,
2879 			    enum dsa_tag_protocol m)
2880 {
2881 	return DSA_TAG_PROTO_YT921X;
2882 }
2883 
2884 static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port)
2885 {
2886 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2887 	int res;
2888 
2889 	mutex_lock(&priv->reg_lock);
2890 	res = yt921x_port_setup(priv, port);
2891 	mutex_unlock(&priv->reg_lock);
2892 
2893 	return res;
2894 }
2895 
2896 /* Not "port" - DSCP mapping is global */
2897 static int __maybe_unused
2898 yt921x_dsa_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
2899 {
2900 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2901 	u32 val;
2902 	int res;
2903 
2904 	mutex_lock(&priv->reg_lock);
2905 	res = yt921x_reg_read(priv, YT921X_IPM_DSCPn(dscp), &val);
2906 	mutex_unlock(&priv->reg_lock);
2907 
2908 	if (res)
2909 		return res;
2910 
2911 	return FIELD_GET(YT921X_IPM_PRIO_M, val);
2912 }
2913 
2914 static int __maybe_unused
2915 yt921x_dsa_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
2916 {
2917 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2918 	u32 val;
2919 	int res;
2920 
2921 	mutex_lock(&priv->reg_lock);
2922 	/* During a "dcb app replace" command, the new app table entry will be
2923 	 * added first, then the old one will be deleted. But the hardware only
2924 	 * supports one QoS class per DSCP value (duh), so if we blindly delete
2925 	 * the app table entry for this DSCP value, we end up deleting the
2926 	 * entry with the new priority. Avoid that by checking whether user
2927 	 * space wants to delete the priority which is currently configured, or
2928 	 * something else which is no longer current.
2929 	 */
2930 	res = yt921x_reg_read(priv, YT921X_IPM_DSCPn(dscp), &val);
2931 	if (!res && FIELD_GET(YT921X_IPM_PRIO_M, val) == prio)
2932 		res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp),
2933 				       YT921X_IPM_PRIO(IEEE8021Q_TT_BK));
2934 	mutex_unlock(&priv->reg_lock);
2935 
2936 	return res;
2937 }
2938 
2939 static int __maybe_unused
2940 yt921x_dsa_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
2941 {
2942 	struct yt921x_priv *priv = to_yt921x_priv(ds);
2943 	int res;
2944 
2945 	if (prio >= YT921X_PRIO_NUM)
2946 		return -EINVAL;
2947 
2948 	mutex_lock(&priv->reg_lock);
2949 	res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp),
2950 			       YT921X_IPM_PRIO(prio));
2951 	mutex_unlock(&priv->reg_lock);
2952 
2953 	return res;
2954 }
2955 
2956 static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp)
2957 {
2958 	u32 val = YT921X_EDATA_DATA_IDLE;
2959 	int res;
2960 
2961 	res = yt921x_reg_wait(priv, YT921X_EDATA_DATA,
2962 			      YT921X_EDATA_DATA_STATUS_M, &val);
2963 	if (res)
2964 		return res;
2965 
2966 	*valp = val;
2967 	return 0;
2968 }
2969 
2970 static int
2971 yt921x_edata_read_cont(struct yt921x_priv *priv, u8 addr, u8 *valp)
2972 {
2973 	u32 ctrl;
2974 	u32 val;
2975 	int res;
2976 
2977 	ctrl = YT921X_EDATA_CTRL_ADDR(addr) | YT921X_EDATA_CTRL_READ;
2978 	res = yt921x_reg_write(priv, YT921X_EDATA_CTRL, ctrl);
2979 	if (res)
2980 		return res;
2981 	res = yt921x_edata_wait(priv, &val);
2982 	if (res)
2983 		return res;
2984 
2985 	*valp = FIELD_GET(YT921X_EDATA_DATA_DATA_M, val);
2986 	return 0;
2987 }
2988 
2989 static int yt921x_edata_read(struct yt921x_priv *priv, u8 addr, u8 *valp)
2990 {
2991 	u32 val;
2992 	int res;
2993 
2994 	res = yt921x_edata_wait(priv, &val);
2995 	if (res)
2996 		return res;
2997 	return yt921x_edata_read_cont(priv, addr, valp);
2998 }
2999 
3000 static int yt921x_chip_detect(struct yt921x_priv *priv)
3001 {
3002 	struct device *dev = to_device(priv);
3003 	const struct yt921x_info *info;
3004 	u8 extmode;
3005 	u32 chipid;
3006 	u32 major;
3007 	u32 mode;
3008 	int res;
3009 
3010 	res = yt921x_reg_read(priv, YT921X_CHIP_ID, &chipid);
3011 	if (res)
3012 		return res;
3013 
3014 	major = FIELD_GET(YT921X_CHIP_ID_MAJOR, chipid);
3015 
3016 	for (info = yt921x_infos; info->name; info++)
3017 		if (info->major == major)
3018 			break;
3019 	if (!info->name) {
3020 		dev_err(dev, "Unexpected chipid 0x%x\n", chipid);
3021 		return -ENODEV;
3022 	}
3023 
3024 	res = yt921x_reg_read(priv, YT921X_CHIP_MODE, &mode);
3025 	if (res)
3026 		return res;
3027 	res = yt921x_edata_read(priv, YT921X_EDATA_EXTMODE, &extmode);
3028 	if (res)
3029 		return res;
3030 
3031 	for (; info->name; info++)
3032 		if (info->major == major && info->mode == mode &&
3033 		    info->extmode == extmode)
3034 			break;
3035 	if (!info->name) {
3036 		dev_err(dev,
3037 			"Unsupported chipid 0x%x with chipmode 0x%x 0x%x\n",
3038 			chipid, mode, extmode);
3039 		return -ENODEV;
3040 	}
3041 
3042 	/* Print chipid here since we are interested in lower 16 bits */
3043 	dev_info(dev,
3044 		 "Motorcomm %s ethernet switch, chipid: 0x%x, chipmode: 0x%x 0x%x\n",
3045 		 info->name, chipid, mode, extmode);
3046 
3047 	priv->info = info;
3048 	return 0;
3049 }
3050 
3051 static int yt921x_chip_reset(struct yt921x_priv *priv)
3052 {
3053 	struct device *dev = to_device(priv);
3054 	u16 eth_p_tag;
3055 	u32 val;
3056 	int res;
3057 
3058 	res = yt921x_chip_detect(priv);
3059 	if (res)
3060 		return res;
3061 
3062 	/* Reset */
3063 	res = yt921x_reg_write(priv, YT921X_RST, YT921X_RST_HW);
3064 	if (res)
3065 		return res;
3066 
3067 	/* RST_HW is almost same as GPIO hard reset, so we need this delay. */
3068 	fsleep(YT921X_RST_DELAY_US);
3069 
3070 	val = 0;
3071 	res = yt921x_reg_wait(priv, YT921X_RST, ~0, &val);
3072 	if (res)
3073 		return res;
3074 
3075 	/* Check for tag EtherType; do it after reset in case you messed it up
3076 	 * before.
3077 	 */
3078 	res = yt921x_reg_read(priv, YT921X_CPU_TAG_TPID, &val);
3079 	if (res)
3080 		return res;
3081 	eth_p_tag = FIELD_GET(YT921X_CPU_TAG_TPID_TPID_M, val);
3082 	if (eth_p_tag != ETH_P_YT921X) {
3083 		dev_err(dev, "Tag type 0x%x != 0x%x\n", eth_p_tag,
3084 			ETH_P_YT921X);
3085 		/* Despite being possible, we choose not to set CPU_TAG_TPID,
3086 		 * since there is no way it can be different unless you have the
3087 		 * wrong chip.
3088 		 */
3089 		return -EINVAL;
3090 	}
3091 
3092 	return 0;
3093 }
3094 
3095 static int yt921x_chip_setup_dsa(struct yt921x_priv *priv)
3096 {
3097 	struct dsa_switch *ds = &priv->ds;
3098 	unsigned long cpu_ports_mask;
3099 	u64 ctrl64;
3100 	u32 ctrl;
3101 	int port;
3102 	int res;
3103 
3104 	/* Enable DSA */
3105 	priv->cpu_ports_mask = dsa_cpu_ports(ds);
3106 
3107 	ctrl = YT921X_EXT_CPU_PORT_TAG_EN | YT921X_EXT_CPU_PORT_PORT_EN |
3108 	       YT921X_EXT_CPU_PORT_PORT(__ffs(priv->cpu_ports_mask));
3109 	res = yt921x_reg_write(priv, YT921X_EXT_CPU_PORT, ctrl);
3110 	if (res)
3111 		return res;
3112 
3113 	/* Setup software switch */
3114 	ctrl = YT921X_CPU_COPY_TO_EXT_CPU;
3115 	res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl);
3116 	if (res)
3117 		return res;
3118 
3119 	ctrl = GENMASK(10, 0);
3120 	res = yt921x_reg_write(priv, YT921X_FILTER_UNK_UCAST, ctrl);
3121 	if (res)
3122 		return res;
3123 	res = yt921x_reg_write(priv, YT921X_FILTER_UNK_MCAST, ctrl);
3124 	if (res)
3125 		return res;
3126 
3127 	/* YT921x does not support native DSA port bridging, so we use port
3128 	 * isolation to emulate it. However, be especially careful that port
3129 	 * isolation takes _after_ FDB lookups, i.e. if an FDB entry (from
3130 	 * another bridge) is matched and the destination port (in another
3131 	 * bridge) is blocked, the packet will be dropped instead of flooding to
3132 	 * the "bridged" ports, thus we need to trap and handle those packets by
3133 	 * software.
3134 	 *
3135 	 * If there is no more than one bridge, we might be able to drop them
3136 	 * directly given some conditions are met, but we trap them in all cases
3137 	 * for now.
3138 	 */
3139 	ctrl = 0;
3140 	for (int i = 0; i < YT921X_PORT_NUM; i++)
3141 		ctrl |= YT921X_ACT_UNK_ACTn_TRAP(i);
3142 	/* Except for CPU ports, if any packets are sent via CPU ports without
3143 	 * tag, they should be dropped.
3144 	 */
3145 	cpu_ports_mask = priv->cpu_ports_mask;
3146 	for_each_set_bit(port, &cpu_ports_mask, YT921X_PORT_NUM) {
3147 		ctrl &= ~YT921X_ACT_UNK_ACTn_M(port);
3148 		ctrl |= YT921X_ACT_UNK_ACTn_DROP(port);
3149 	}
3150 	res = yt921x_reg_write(priv, YT921X_ACT_UNK_UCAST, ctrl);
3151 	if (res)
3152 		return res;
3153 	res = yt921x_reg_write(priv, YT921X_ACT_UNK_MCAST, ctrl);
3154 	if (res)
3155 		return res;
3156 
3157 	/* Tagged VID 0 should be treated as untagged, which confuses the
3158 	 * hardware a lot
3159 	 */
3160 	ctrl64 = YT921X_VLAN_CTRL_LEARN_DIS | YT921X_VLAN_CTRL_PORTS_M;
3161 	res = yt921x_reg64_write(priv, YT921X_VLANn_CTRL(0), ctrl64);
3162 	if (res)
3163 		return res;
3164 
3165 	return 0;
3166 }
3167 
3168 static int __maybe_unused yt921x_chip_setup_qos(struct yt921x_priv *priv)
3169 {
3170 	u32 ctrl;
3171 	int res;
3172 
3173 	/* DSCP to internal priorities */
3174 	for (u8 dscp = 0; dscp < DSCP_MAX; dscp++) {
3175 		int prio = ietf_dscp_to_ieee8021q_tt(dscp);
3176 
3177 		if (prio < 0)
3178 			return prio;
3179 
3180 		res = yt921x_reg_write(priv, YT921X_IPM_DSCPn(dscp),
3181 				       YT921X_IPM_PRIO(prio));
3182 		if (res)
3183 			return res;
3184 	}
3185 
3186 	/* 802.1Q QoS to internal priorities */
3187 	for (u8 pcp = 0; pcp < 8; pcp++)
3188 		for (u8 dei = 0; dei < 2; dei++) {
3189 			ctrl = YT921X_IPM_PRIO(pcp);
3190 			if (dei)
3191 				/* "Red" almost means drop, so it's not that
3192 				 * useful. Note that tc police does not support
3193 				 * Three-Color very well
3194 				 */
3195 				ctrl |= YT921X_IPM_COLOR_YELLOW;
3196 
3197 			for (u8 svlan = 0; svlan < 2; svlan++) {
3198 				u32 reg = YT921X_IPM_PCPn(svlan, dei, pcp);
3199 
3200 				res = yt921x_reg_write(priv, reg, ctrl);
3201 				if (res)
3202 					return res;
3203 			}
3204 		}
3205 
3206 	return 0;
3207 }
3208 
3209 static int yt921x_chip_setup(struct yt921x_priv *priv)
3210 {
3211 	u32 ctrl;
3212 	int res;
3213 
3214 	ctrl = YT921X_FUNC_MIB;
3215 	res = yt921x_reg_set_bits(priv, YT921X_FUNC, ctrl);
3216 	if (res)
3217 		return res;
3218 
3219 	res = yt921x_chip_setup_dsa(priv);
3220 	if (res)
3221 		return res;
3222 
3223 #if IS_ENABLED(CONFIG_DCB)
3224 	res = yt921x_chip_setup_qos(priv);
3225 	if (res)
3226 		return res;
3227 #endif
3228 
3229 	/* Clear MIB */
3230 	ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT;
3231 	res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl);
3232 	if (res)
3233 		return res;
3234 
3235 	/* Miscellaneous */
3236 	res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP);
3237 	if (res)
3238 		return res;
3239 
3240 	return 0;
3241 }
3242 
3243 static int yt921x_dsa_setup(struct dsa_switch *ds)
3244 {
3245 	struct yt921x_priv *priv = to_yt921x_priv(ds);
3246 	struct device *dev = to_device(priv);
3247 	struct device_node *np = dev->of_node;
3248 	struct device_node *child;
3249 	int res;
3250 
3251 	mutex_lock(&priv->reg_lock);
3252 	res = yt921x_chip_reset(priv);
3253 	mutex_unlock(&priv->reg_lock);
3254 
3255 	if (res)
3256 		return res;
3257 
3258 	/* Register the internal mdio bus. Nodes for internal ports should have
3259 	 * proper phy-handle pointing to their PHYs. Not enabling the internal
3260 	 * bus is possible, though pretty wired, if internal ports are not used.
3261 	 */
3262 	child = of_get_child_by_name(np, "mdio");
3263 	if (child) {
3264 		res = yt921x_mbus_int_init(priv, child);
3265 		of_node_put(child);
3266 		if (res)
3267 			return res;
3268 	}
3269 
3270 	/* External mdio bus is optional */
3271 	child = of_get_child_by_name(np, "mdio-external");
3272 	if (child) {
3273 		res = yt921x_mbus_ext_init(priv, child);
3274 		of_node_put(child);
3275 		if (res)
3276 			return res;
3277 
3278 		dev_err(dev, "Untested external mdio bus\n");
3279 		return -ENODEV;
3280 	}
3281 
3282 	mutex_lock(&priv->reg_lock);
3283 	res = yt921x_chip_setup(priv);
3284 	mutex_unlock(&priv->reg_lock);
3285 
3286 	if (res)
3287 		return res;
3288 
3289 	return 0;
3290 }
3291 
3292 static const struct phylink_mac_ops yt921x_phylink_mac_ops = {
3293 	.mac_link_down	= yt921x_phylink_mac_link_down,
3294 	.mac_link_up	= yt921x_phylink_mac_link_up,
3295 	.mac_config	= yt921x_phylink_mac_config,
3296 };
3297 
3298 static const struct dsa_switch_ops yt921x_dsa_switch_ops = {
3299 	/* mib */
3300 	.get_strings		= yt921x_dsa_get_strings,
3301 	.get_ethtool_stats	= yt921x_dsa_get_ethtool_stats,
3302 	.get_sset_count		= yt921x_dsa_get_sset_count,
3303 	.get_eth_mac_stats	= yt921x_dsa_get_eth_mac_stats,
3304 	.get_eth_ctrl_stats	= yt921x_dsa_get_eth_ctrl_stats,
3305 	.get_rmon_stats		= yt921x_dsa_get_rmon_stats,
3306 	.get_stats64		= yt921x_dsa_get_stats64,
3307 	.get_pause_stats	= yt921x_dsa_get_pause_stats,
3308 	/* eee */
3309 	.support_eee		= dsa_supports_eee,
3310 	.set_mac_eee		= yt921x_dsa_set_mac_eee,
3311 	/* mtu */
3312 	.port_change_mtu	= yt921x_dsa_port_change_mtu,
3313 	.port_max_mtu		= yt921x_dsa_port_max_mtu,
3314 	/* hsr */
3315 	.port_hsr_leave		= dsa_port_simple_hsr_leave,
3316 	.port_hsr_join		= dsa_port_simple_hsr_join,
3317 	/* mirror */
3318 	.port_mirror_del	= yt921x_dsa_port_mirror_del,
3319 	.port_mirror_add	= yt921x_dsa_port_mirror_add,
3320 	/* lag */
3321 	.port_lag_leave		= yt921x_dsa_port_lag_leave,
3322 	.port_lag_join		= yt921x_dsa_port_lag_join,
3323 	/* fdb */
3324 	.port_fdb_dump		= yt921x_dsa_port_fdb_dump,
3325 	.port_fast_age		= yt921x_dsa_port_fast_age,
3326 	.set_ageing_time	= yt921x_dsa_set_ageing_time,
3327 	.port_fdb_del		= yt921x_dsa_port_fdb_del,
3328 	.port_fdb_add		= yt921x_dsa_port_fdb_add,
3329 	.port_mdb_del		= yt921x_dsa_port_mdb_del,
3330 	.port_mdb_add		= yt921x_dsa_port_mdb_add,
3331 	/* vlan */
3332 	.port_vlan_filtering	= yt921x_dsa_port_vlan_filtering,
3333 	.port_vlan_del		= yt921x_dsa_port_vlan_del,
3334 	.port_vlan_add		= yt921x_dsa_port_vlan_add,
3335 	/* bridge */
3336 	.port_pre_bridge_flags	= yt921x_dsa_port_pre_bridge_flags,
3337 	.port_bridge_flags	= yt921x_dsa_port_bridge_flags,
3338 	.port_bridge_leave	= yt921x_dsa_port_bridge_leave,
3339 	.port_bridge_join	= yt921x_dsa_port_bridge_join,
3340 	/* mst */
3341 	.port_mst_state_set	= yt921x_dsa_port_mst_state_set,
3342 	.vlan_msti_set		= yt921x_dsa_vlan_msti_set,
3343 	.port_stp_state_set	= yt921x_dsa_port_stp_state_set,
3344 #if IS_ENABLED(CONFIG_DCB)
3345 	/* dcb */
3346 	.port_get_default_prio	= yt921x_dsa_port_get_default_prio,
3347 	.port_set_default_prio	= yt921x_dsa_port_set_default_prio,
3348 	.port_get_apptrust	= yt921x_dsa_port_get_apptrust,
3349 	.port_set_apptrust	= yt921x_dsa_port_set_apptrust,
3350 #endif
3351 	/* port */
3352 	.get_tag_protocol	= yt921x_dsa_get_tag_protocol,
3353 	.phylink_get_caps	= yt921x_dsa_phylink_get_caps,
3354 	.port_setup		= yt921x_dsa_port_setup,
3355 #if IS_ENABLED(CONFIG_DCB)
3356 	/* dscp */
3357 	.port_get_dscp_prio	= yt921x_dsa_port_get_dscp_prio,
3358 	.port_del_dscp_prio	= yt921x_dsa_port_del_dscp_prio,
3359 	.port_add_dscp_prio	= yt921x_dsa_port_add_dscp_prio,
3360 #endif
3361 	/* chip */
3362 	.setup			= yt921x_dsa_setup,
3363 };
3364 
3365 static void yt921x_mdio_shutdown(struct mdio_device *mdiodev)
3366 {
3367 	struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev);
3368 
3369 	if (!priv)
3370 		return;
3371 
3372 	dsa_switch_shutdown(&priv->ds);
3373 }
3374 
3375 static void yt921x_mdio_remove(struct mdio_device *mdiodev)
3376 {
3377 	struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev);
3378 
3379 	if (!priv)
3380 		return;
3381 
3382 	for (size_t i = ARRAY_SIZE(priv->ports); i-- > 0; ) {
3383 		struct yt921x_port *pp = &priv->ports[i];
3384 
3385 		disable_delayed_work_sync(&pp->mib_read);
3386 	}
3387 
3388 	dsa_unregister_switch(&priv->ds);
3389 
3390 	mutex_destroy(&priv->reg_lock);
3391 }
3392 
3393 static int yt921x_mdio_probe(struct mdio_device *mdiodev)
3394 {
3395 	struct device *dev = &mdiodev->dev;
3396 	struct yt921x_reg_mdio *mdio;
3397 	struct yt921x_priv *priv;
3398 	struct dsa_switch *ds;
3399 
3400 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
3401 	if (!priv)
3402 		return -ENOMEM;
3403 
3404 	mdio = devm_kzalloc(dev, sizeof(*mdio), GFP_KERNEL);
3405 	if (!mdio)
3406 		return -ENOMEM;
3407 
3408 	mdio->bus = mdiodev->bus;
3409 	mdio->addr = mdiodev->addr;
3410 	mdio->switchid = 0;
3411 
3412 	mutex_init(&priv->reg_lock);
3413 
3414 	priv->reg_ops = &yt921x_reg_ops_mdio;
3415 	priv->reg_ctx = mdio;
3416 
3417 	for (size_t i = 0; i < ARRAY_SIZE(priv->ports); i++) {
3418 		struct yt921x_port *pp = &priv->ports[i];
3419 
3420 		pp->index = i;
3421 		INIT_DELAYED_WORK(&pp->mib_read, yt921x_poll_mib);
3422 	}
3423 
3424 	ds = &priv->ds;
3425 	ds->dev = dev;
3426 	ds->assisted_learning_on_cpu_port = true;
3427 	ds->dscp_prio_mapping_is_global = true;
3428 	ds->priv = priv;
3429 	ds->ops = &yt921x_dsa_switch_ops;
3430 	ds->ageing_time_min = 1 * 5000;
3431 	ds->ageing_time_max = U16_MAX * 5000;
3432 	ds->phylink_mac_ops = &yt921x_phylink_mac_ops;
3433 	ds->num_lag_ids = YT921X_LAG_NUM;
3434 	ds->num_ports = YT921X_PORT_NUM;
3435 
3436 	mdiodev_set_drvdata(mdiodev, priv);
3437 
3438 	return dsa_register_switch(ds);
3439 }
3440 
3441 static const struct of_device_id yt921x_of_match[] = {
3442 	{ .compatible = "motorcomm,yt9215" },
3443 	{}
3444 };
3445 MODULE_DEVICE_TABLE(of, yt921x_of_match);
3446 
3447 static struct mdio_driver yt921x_mdio_driver = {
3448 	.probe = yt921x_mdio_probe,
3449 	.remove = yt921x_mdio_remove,
3450 	.shutdown = yt921x_mdio_shutdown,
3451 	.mdiodrv.driver = {
3452 		.name = YT921X_NAME,
3453 		.of_match_table = yt921x_of_match,
3454 	},
3455 };
3456 
3457 mdio_module_driver(yt921x_mdio_driver);
3458 
3459 MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
3460 MODULE_DESCRIPTION("Driver for Motorcomm YT921x Switch");
3461 MODULE_LICENSE("GPL");
3462