xref: /linux/drivers/net/dsa/microchip/ksz9477.c (revision ea23fbd2a8f7dadfa9cd9b9d73f3b8a69eec0671)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip KSZ9477 switch driver main logic
4  *
5  * Copyright (C) 2017-2019 Microchip Technology Inc.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/iopoll.h>
11 #include <linux/platform_data/microchip-ksz.h>
12 #include <linux/phy.h>
13 #include <linux/if_bridge.h>
14 #include <linux/if_vlan.h>
15 #include <net/dsa.h>
16 #include <net/switchdev.h>
17 
18 #include "ksz9477_reg.h"
19 #include "ksz_common.h"
20 #include "ksz9477.h"
21 
22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
23 {
24 	regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
25 }
26 
27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
28 			 bool set)
29 {
30 	regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
31 			   bits, set ? bits : 0);
32 }
33 
34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
35 {
36 	regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
37 }
38 
39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
40 			       u32 bits, bool set)
41 {
42 	regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
43 			   bits, set ? bits : 0);
44 }
45 
46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
47 {
48 	u16 frame_size;
49 
50 	if (!dsa_is_cpu_port(dev->ds, port))
51 		return 0;
52 
53 	frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
54 
55 	return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
56 				  REG_SW_MTU_MASK, frame_size);
57 }
58 
59 /**
60  * ksz9477_handle_wake_reason - Handle wake reason on a specified port.
61  * @dev: The device structure.
62  * @port: The port number.
63  *
64  * This function reads the PME (Power Management Event) status register of a
65  * specified port to determine the wake reason. If there is no wake event, it
66  * returns early. Otherwise, it logs the wake reason which could be due to a
67  * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
68  * is then cleared to acknowledge the handling of the wake event.
69  *
70  * Return: 0 on success, or an error code on failure.
71  */
72 static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port)
73 {
74 	u8 pme_status;
75 	int ret;
76 
77 	ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status);
78 	if (ret)
79 		return ret;
80 
81 	if (!pme_status)
82 		return 0;
83 
84 	dev_dbg(dev->dev, "Wake event on port %d due to:%s%s\n", port,
85 		pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
86 		pme_status & PME_WOL_ENERGY ? " \"Enery detect\"" : "");
87 
88 	return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status);
89 }
90 
91 /**
92  * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port.
93  * @dev: The device structure.
94  * @port: The port number.
95  * @wol: Pointer to ethtool Wake-on-LAN settings structure.
96  *
97  * This function checks the PME Pin Control Register to see if  PME Pin Output
98  * Enable is set, indicating PME is enabled. If enabled, it sets the supported
99  * and active WoL flags.
100  */
101 void ksz9477_get_wol(struct ksz_device *dev, int port,
102 		     struct ethtool_wolinfo *wol)
103 {
104 	u8 pme_ctrl;
105 	int ret;
106 
107 	if (!dev->wakeup_source)
108 		return;
109 
110 	wol->supported = WAKE_PHY;
111 
112 	ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl);
113 	if (ret)
114 		return;
115 
116 	if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
117 		wol->wolopts |= WAKE_PHY;
118 }
119 
120 /**
121  * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port.
122  * @dev: The device structure.
123  * @port: The port number.
124  * @wol: Pointer to ethtool Wake-on-LAN settings structure.
125  *
126  * This function configures Wake-on-LAN (WoL) settings for a specified port.
127  * It validates the provided WoL options, checks if PME is enabled via the
128  * switch's PME Pin Control Register, clears any previous wake reasons,
129  * and sets the Magic Packet flag in the port's PME control register if
130  * specified.
131  *
132  * Return: 0 on success, or other error codes on failure.
133  */
134 int ksz9477_set_wol(struct ksz_device *dev, int port,
135 		    struct ethtool_wolinfo *wol)
136 {
137 	u8 pme_ctrl = 0;
138 	int ret;
139 
140 	if (wol->wolopts & ~WAKE_PHY)
141 		return -EINVAL;
142 
143 	if (!dev->wakeup_source)
144 		return -EOPNOTSUPP;
145 
146 	ret = ksz9477_handle_wake_reason(dev, port);
147 	if (ret)
148 		return ret;
149 
150 	if (wol->wolopts & WAKE_PHY)
151 		pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
152 
153 	return ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl);
154 }
155 
156 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
157 {
158 	unsigned int val;
159 
160 	return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
161 					val, !(val & VLAN_START), 10, 1000);
162 }
163 
164 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
165 				  u32 *vlan_table)
166 {
167 	int ret;
168 
169 	mutex_lock(&dev->vlan_mutex);
170 
171 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
172 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
173 
174 	/* wait to be cleared */
175 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
176 	if (ret) {
177 		dev_dbg(dev->dev, "Failed to read vlan table\n");
178 		goto exit;
179 	}
180 
181 	ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
182 	ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
183 	ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
184 
185 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
186 
187 exit:
188 	mutex_unlock(&dev->vlan_mutex);
189 
190 	return ret;
191 }
192 
193 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
194 				  u32 *vlan_table)
195 {
196 	int ret;
197 
198 	mutex_lock(&dev->vlan_mutex);
199 
200 	ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
201 	ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
202 	ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
203 
204 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
205 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
206 
207 	/* wait to be cleared */
208 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
209 	if (ret) {
210 		dev_dbg(dev->dev, "Failed to write vlan table\n");
211 		goto exit;
212 	}
213 
214 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
215 
216 	/* update vlan cache table */
217 	dev->vlan_cache[vid].table[0] = vlan_table[0];
218 	dev->vlan_cache[vid].table[1] = vlan_table[1];
219 	dev->vlan_cache[vid].table[2] = vlan_table[2];
220 
221 exit:
222 	mutex_unlock(&dev->vlan_mutex);
223 
224 	return ret;
225 }
226 
227 static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
228 {
229 	ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
230 	ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
231 	ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
232 	ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
233 }
234 
235 static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
236 {
237 	ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
238 	ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
239 	ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
240 	ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
241 }
242 
243 static int ksz9477_wait_alu_ready(struct ksz_device *dev)
244 {
245 	unsigned int val;
246 
247 	return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
248 					val, !(val & ALU_START), 10, 1000);
249 }
250 
251 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
252 {
253 	unsigned int val;
254 
255 	return regmap_read_poll_timeout(ksz_regmap_32(dev),
256 					REG_SW_ALU_STAT_CTRL__4,
257 					val, !(val & ALU_STAT_START),
258 					10, 1000);
259 }
260 
261 int ksz9477_reset_switch(struct ksz_device *dev)
262 {
263 	u8 data8;
264 	u32 data32;
265 
266 	/* reset switch */
267 	ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
268 
269 	/* turn off SPI DO Edge select */
270 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
271 			   SPI_AUTO_EDGE_DETECTION, 0);
272 
273 	/* default configuration */
274 	ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
275 	data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
276 	      SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
277 	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
278 
279 	/* disable interrupts */
280 	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
281 	ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
282 	ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
283 
284 	/* KSZ9893 compatible chips do not support refclk configuration */
285 	if (dev->chip_id == KSZ9893_CHIP_ID ||
286 	    dev->chip_id == KSZ8563_CHIP_ID ||
287 	    dev->chip_id == KSZ9563_CHIP_ID)
288 		return 0;
289 
290 	data8 = SW_ENABLE_REFCLKO;
291 	if (dev->synclko_disable)
292 		data8 = 0;
293 	else if (dev->synclko_125)
294 		data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
295 	ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
296 
297 	return 0;
298 }
299 
300 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
301 {
302 	struct ksz_port *p = &dev->ports[port];
303 	unsigned int val;
304 	u32 data;
305 	int ret;
306 
307 	/* retain the flush/freeze bit */
308 	data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
309 	data |= MIB_COUNTER_READ;
310 	data |= (addr << MIB_COUNTER_INDEX_S);
311 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
312 
313 	ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
314 			PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
315 			val, !(val & MIB_COUNTER_READ), 10, 1000);
316 	/* failed to read MIB. get out of loop */
317 	if (ret) {
318 		dev_dbg(dev->dev, "Failed to get MIB\n");
319 		return;
320 	}
321 
322 	/* count resets upon read */
323 	ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
324 	*cnt += data;
325 }
326 
327 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
328 		       u64 *dropped, u64 *cnt)
329 {
330 	addr = dev->info->mib_names[addr].index;
331 	ksz9477_r_mib_cnt(dev, port, addr, cnt);
332 }
333 
334 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
335 {
336 	u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
337 	struct ksz_port *p = &dev->ports[port];
338 
339 	/* enable/disable the port for flush/freeze function */
340 	mutex_lock(&p->mib.cnt_mutex);
341 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
342 
343 	/* used by MIB counter reading code to know freeze is enabled */
344 	p->freeze = freeze;
345 	mutex_unlock(&p->mib.cnt_mutex);
346 }
347 
348 void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
349 {
350 	struct ksz_port_mib *mib = &dev->ports[port].mib;
351 
352 	/* flush all enabled port MIB counters */
353 	mutex_lock(&mib->cnt_mutex);
354 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
355 		     MIB_COUNTER_FLUSH_FREEZE);
356 	ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
357 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
358 	mutex_unlock(&mib->cnt_mutex);
359 }
360 
361 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
362 				 u16 *data)
363 {
364 	/* KSZ8563R do not have extended registers but BMSR_ESTATEN and
365 	 * BMSR_ERCAP bits are set.
366 	 */
367 	if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
368 		*data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
369 }
370 
371 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
372 {
373 	u16 val = 0xffff;
374 	int ret;
375 
376 	/* No real PHY after this. Simulate the PHY.
377 	 * A fixed PHY can be setup in the device tree, but this function is
378 	 * still called for that port during initialization.
379 	 * For RGMII PHY there is no way to access it so the fixed PHY should
380 	 * be used.  For SGMII PHY the supporting code will be added later.
381 	 */
382 	if (!dev->info->internal_phy[addr]) {
383 		struct ksz_port *p = &dev->ports[addr];
384 
385 		switch (reg) {
386 		case MII_BMCR:
387 			val = 0x1140;
388 			break;
389 		case MII_BMSR:
390 			val = 0x796d;
391 			break;
392 		case MII_PHYSID1:
393 			val = 0x0022;
394 			break;
395 		case MII_PHYSID2:
396 			val = 0x1631;
397 			break;
398 		case MII_ADVERTISE:
399 			val = 0x05e1;
400 			break;
401 		case MII_LPA:
402 			val = 0xc5e1;
403 			break;
404 		case MII_CTRL1000:
405 			val = 0x0700;
406 			break;
407 		case MII_STAT1000:
408 			if (p->phydev.speed == SPEED_1000)
409 				val = 0x3800;
410 			else
411 				val = 0;
412 			break;
413 		}
414 	} else {
415 		ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
416 		if (ret)
417 			return ret;
418 
419 		ksz9477_r_phy_quirks(dev, addr, reg, &val);
420 	}
421 
422 	*data = val;
423 
424 	return 0;
425 }
426 
427 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
428 {
429 	u32 mask, val32;
430 
431 	/* No real PHY after this. */
432 	if (!dev->info->internal_phy[addr])
433 		return 0;
434 
435 	if (reg < 0x10)
436 		return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
437 
438 	/* Errata: When using SPI, I2C, or in-band register access,
439 	 * writes to certain PHY registers should be performed as
440 	 * 32-bit writes instead of 16-bit writes.
441 	 */
442 	val32 = val;
443 	mask = 0xffff;
444 	if ((reg & 1) == 0) {
445 		val32 <<= 16;
446 		mask <<= 16;
447 	}
448 	reg &= ~1;
449 	return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
450 }
451 
452 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
453 {
454 	ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
455 }
456 
457 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
458 {
459 	const u16 *regs = dev->info->regs;
460 	u8 data;
461 
462 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
463 			   SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
464 			   SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
465 
466 	if (port < dev->info->port_cnt) {
467 		/* flush individual port */
468 		ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
469 		if (!(data & PORT_LEARN_DISABLE))
470 			ksz_pwrite8(dev, port, regs[P_STP_CTRL],
471 				    data | PORT_LEARN_DISABLE);
472 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
473 		ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
474 	} else {
475 		/* flush all */
476 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
477 	}
478 }
479 
480 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
481 				bool flag, struct netlink_ext_ack *extack)
482 {
483 	if (flag) {
484 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
485 			     PORT_VLAN_LOOKUP_VID_0, true);
486 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
487 	} else {
488 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
489 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
490 			     PORT_VLAN_LOOKUP_VID_0, false);
491 	}
492 
493 	return 0;
494 }
495 
496 int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
497 			  const struct switchdev_obj_port_vlan *vlan,
498 			  struct netlink_ext_ack *extack)
499 {
500 	u32 vlan_table[3];
501 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
502 	int err;
503 
504 	err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
505 	if (err) {
506 		NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
507 		return err;
508 	}
509 
510 	vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
511 	if (untagged)
512 		vlan_table[1] |= BIT(port);
513 	else
514 		vlan_table[1] &= ~BIT(port);
515 	vlan_table[1] &= ~(BIT(dev->cpu_port));
516 
517 	vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
518 
519 	err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
520 	if (err) {
521 		NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
522 		return err;
523 	}
524 
525 	/* change PVID */
526 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
527 		ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
528 
529 	return 0;
530 }
531 
532 int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
533 			  const struct switchdev_obj_port_vlan *vlan)
534 {
535 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
536 	u32 vlan_table[3];
537 	u16 pvid;
538 
539 	ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
540 	pvid = pvid & 0xFFF;
541 
542 	if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
543 		dev_dbg(dev->dev, "Failed to get vlan table\n");
544 		return -ETIMEDOUT;
545 	}
546 
547 	vlan_table[2] &= ~BIT(port);
548 
549 	if (pvid == vlan->vid)
550 		pvid = 1;
551 
552 	if (untagged)
553 		vlan_table[1] &= ~BIT(port);
554 
555 	if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
556 		dev_dbg(dev->dev, "Failed to set vlan table\n");
557 		return -ETIMEDOUT;
558 	}
559 
560 	ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
561 
562 	return 0;
563 }
564 
565 int ksz9477_fdb_add(struct ksz_device *dev, int port,
566 		    const unsigned char *addr, u16 vid, struct dsa_db db)
567 {
568 	u32 alu_table[4];
569 	u32 data;
570 	int ret = 0;
571 
572 	mutex_lock(&dev->alu_mutex);
573 
574 	/* find any entry with mac & vid */
575 	data = vid << ALU_FID_INDEX_S;
576 	data |= ((addr[0] << 8) | addr[1]);
577 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
578 
579 	data = ((addr[2] << 24) | (addr[3] << 16));
580 	data |= ((addr[4] << 8) | addr[5]);
581 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
582 
583 	/* start read operation */
584 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
585 
586 	/* wait to be finished */
587 	ret = ksz9477_wait_alu_ready(dev);
588 	if (ret) {
589 		dev_dbg(dev->dev, "Failed to read ALU\n");
590 		goto exit;
591 	}
592 
593 	/* read ALU entry */
594 	ksz9477_read_table(dev, alu_table);
595 
596 	/* update ALU entry */
597 	alu_table[0] = ALU_V_STATIC_VALID;
598 	alu_table[1] |= BIT(port);
599 	if (vid)
600 		alu_table[1] |= ALU_V_USE_FID;
601 	alu_table[2] = (vid << ALU_V_FID_S);
602 	alu_table[2] |= ((addr[0] << 8) | addr[1]);
603 	alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
604 	alu_table[3] |= ((addr[4] << 8) | addr[5]);
605 
606 	ksz9477_write_table(dev, alu_table);
607 
608 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
609 
610 	/* wait to be finished */
611 	ret = ksz9477_wait_alu_ready(dev);
612 	if (ret)
613 		dev_dbg(dev->dev, "Failed to write ALU\n");
614 
615 exit:
616 	mutex_unlock(&dev->alu_mutex);
617 
618 	return ret;
619 }
620 
621 int ksz9477_fdb_del(struct ksz_device *dev, int port,
622 		    const unsigned char *addr, u16 vid, struct dsa_db db)
623 {
624 	u32 alu_table[4];
625 	u32 data;
626 	int ret = 0;
627 
628 	mutex_lock(&dev->alu_mutex);
629 
630 	/* read any entry with mac & vid */
631 	data = vid << ALU_FID_INDEX_S;
632 	data |= ((addr[0] << 8) | addr[1]);
633 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
634 
635 	data = ((addr[2] << 24) | (addr[3] << 16));
636 	data |= ((addr[4] << 8) | addr[5]);
637 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
638 
639 	/* start read operation */
640 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
641 
642 	/* wait to be finished */
643 	ret = ksz9477_wait_alu_ready(dev);
644 	if (ret) {
645 		dev_dbg(dev->dev, "Failed to read ALU\n");
646 		goto exit;
647 	}
648 
649 	ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
650 	if (alu_table[0] & ALU_V_STATIC_VALID) {
651 		ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
652 		ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
653 		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
654 
655 		/* clear forwarding port */
656 		alu_table[1] &= ~BIT(port);
657 
658 		/* if there is no port to forward, clear table */
659 		if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
660 			alu_table[0] = 0;
661 			alu_table[1] = 0;
662 			alu_table[2] = 0;
663 			alu_table[3] = 0;
664 		}
665 	} else {
666 		alu_table[0] = 0;
667 		alu_table[1] = 0;
668 		alu_table[2] = 0;
669 		alu_table[3] = 0;
670 	}
671 
672 	ksz9477_write_table(dev, alu_table);
673 
674 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
675 
676 	/* wait to be finished */
677 	ret = ksz9477_wait_alu_ready(dev);
678 	if (ret)
679 		dev_dbg(dev->dev, "Failed to write ALU\n");
680 
681 exit:
682 	mutex_unlock(&dev->alu_mutex);
683 
684 	return ret;
685 }
686 
687 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
688 {
689 	alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
690 	alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
691 	alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
692 	alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
693 			ALU_V_PRIO_AGE_CNT_M;
694 	alu->mstp = alu_table[0] & ALU_V_MSTP_M;
695 
696 	alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
697 	alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
698 	alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
699 
700 	alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
701 
702 	alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
703 	alu->mac[1] = alu_table[2] & 0xFF;
704 	alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
705 	alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
706 	alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
707 	alu->mac[5] = alu_table[3] & 0xFF;
708 }
709 
710 int ksz9477_fdb_dump(struct ksz_device *dev, int port,
711 		     dsa_fdb_dump_cb_t *cb, void *data)
712 {
713 	int ret = 0;
714 	u32 ksz_data;
715 	u32 alu_table[4];
716 	struct alu_struct alu;
717 	int timeout;
718 
719 	mutex_lock(&dev->alu_mutex);
720 
721 	/* start ALU search */
722 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
723 
724 	do {
725 		timeout = 1000;
726 		do {
727 			ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
728 			if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
729 				break;
730 			usleep_range(1, 10);
731 		} while (timeout-- > 0);
732 
733 		if (!timeout) {
734 			dev_dbg(dev->dev, "Failed to search ALU\n");
735 			ret = -ETIMEDOUT;
736 			goto exit;
737 		}
738 
739 		if (!(ksz_data & ALU_VALID))
740 			continue;
741 
742 		/* read ALU table */
743 		ksz9477_read_table(dev, alu_table);
744 
745 		ksz9477_convert_alu(&alu, alu_table);
746 
747 		if (alu.port_forward & BIT(port)) {
748 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
749 			if (ret)
750 				goto exit;
751 		}
752 	} while (ksz_data & ALU_START);
753 
754 exit:
755 
756 	/* stop ALU search */
757 	ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
758 
759 	mutex_unlock(&dev->alu_mutex);
760 
761 	return ret;
762 }
763 
764 int ksz9477_mdb_add(struct ksz_device *dev, int port,
765 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
766 {
767 	u32 static_table[4];
768 	const u8 *shifts;
769 	const u32 *masks;
770 	u32 data;
771 	int index;
772 	u32 mac_hi, mac_lo;
773 	int err = 0;
774 
775 	shifts = dev->info->shifts;
776 	masks = dev->info->masks;
777 
778 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
779 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
780 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
781 
782 	mutex_lock(&dev->alu_mutex);
783 
784 	for (index = 0; index < dev->info->num_statics; index++) {
785 		/* find empty slot first */
786 		data = (index << shifts[ALU_STAT_INDEX]) |
787 			masks[ALU_STAT_READ] | ALU_STAT_START;
788 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
789 
790 		/* wait to be finished */
791 		err = ksz9477_wait_alu_sta_ready(dev);
792 		if (err) {
793 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
794 			goto exit;
795 		}
796 
797 		/* read ALU static table */
798 		ksz9477_read_table(dev, static_table);
799 
800 		if (static_table[0] & ALU_V_STATIC_VALID) {
801 			/* check this has same vid & mac address */
802 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
803 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
804 			    static_table[3] == mac_lo) {
805 				/* found matching one */
806 				break;
807 			}
808 		} else {
809 			/* found empty one */
810 			break;
811 		}
812 	}
813 
814 	/* no available entry */
815 	if (index == dev->info->num_statics) {
816 		err = -ENOSPC;
817 		goto exit;
818 	}
819 
820 	/* add entry */
821 	static_table[0] = ALU_V_STATIC_VALID;
822 	static_table[1] |= BIT(port);
823 	if (mdb->vid)
824 		static_table[1] |= ALU_V_USE_FID;
825 	static_table[2] = (mdb->vid << ALU_V_FID_S);
826 	static_table[2] |= mac_hi;
827 	static_table[3] = mac_lo;
828 
829 	ksz9477_write_table(dev, static_table);
830 
831 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
832 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
833 
834 	/* wait to be finished */
835 	if (ksz9477_wait_alu_sta_ready(dev))
836 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
837 
838 exit:
839 	mutex_unlock(&dev->alu_mutex);
840 	return err;
841 }
842 
843 int ksz9477_mdb_del(struct ksz_device *dev, int port,
844 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
845 {
846 	u32 static_table[4];
847 	const u8 *shifts;
848 	const u32 *masks;
849 	u32 data;
850 	int index;
851 	int ret = 0;
852 	u32 mac_hi, mac_lo;
853 
854 	shifts = dev->info->shifts;
855 	masks = dev->info->masks;
856 
857 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
858 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
859 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
860 
861 	mutex_lock(&dev->alu_mutex);
862 
863 	for (index = 0; index < dev->info->num_statics; index++) {
864 		/* find empty slot first */
865 		data = (index << shifts[ALU_STAT_INDEX]) |
866 			masks[ALU_STAT_READ] | ALU_STAT_START;
867 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
868 
869 		/* wait to be finished */
870 		ret = ksz9477_wait_alu_sta_ready(dev);
871 		if (ret) {
872 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
873 			goto exit;
874 		}
875 
876 		/* read ALU static table */
877 		ksz9477_read_table(dev, static_table);
878 
879 		if (static_table[0] & ALU_V_STATIC_VALID) {
880 			/* check this has same vid & mac address */
881 
882 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
883 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
884 			    static_table[3] == mac_lo) {
885 				/* found matching one */
886 				break;
887 			}
888 		}
889 	}
890 
891 	/* no available entry */
892 	if (index == dev->info->num_statics)
893 		goto exit;
894 
895 	/* clear port */
896 	static_table[1] &= ~BIT(port);
897 
898 	if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
899 		/* delete entry */
900 		static_table[0] = 0;
901 		static_table[1] = 0;
902 		static_table[2] = 0;
903 		static_table[3] = 0;
904 	}
905 
906 	ksz9477_write_table(dev, static_table);
907 
908 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
909 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
910 
911 	/* wait to be finished */
912 	ret = ksz9477_wait_alu_sta_ready(dev);
913 	if (ret)
914 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
915 
916 exit:
917 	mutex_unlock(&dev->alu_mutex);
918 
919 	return ret;
920 }
921 
922 int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
923 			    struct dsa_mall_mirror_tc_entry *mirror,
924 			    bool ingress, struct netlink_ext_ack *extack)
925 {
926 	u8 data;
927 	int p;
928 
929 	/* Limit to one sniffer port
930 	 * Check if any of the port is already set for sniffing
931 	 * If yes, instruct the user to remove the previous entry & exit
932 	 */
933 	for (p = 0; p < dev->info->port_cnt; p++) {
934 		/* Skip the current sniffing port */
935 		if (p == mirror->to_local_port)
936 			continue;
937 
938 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
939 
940 		if (data & PORT_MIRROR_SNIFFER) {
941 			NL_SET_ERR_MSG_MOD(extack,
942 					   "Sniffer port is already configured, delete existing rules & retry");
943 			return -EBUSY;
944 		}
945 	}
946 
947 	if (ingress)
948 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
949 	else
950 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
951 
952 	/* configure mirror port */
953 	ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
954 		     PORT_MIRROR_SNIFFER, true);
955 
956 	ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
957 
958 	return 0;
959 }
960 
961 void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
962 			     struct dsa_mall_mirror_tc_entry *mirror)
963 {
964 	bool in_use = false;
965 	u8 data;
966 	int p;
967 
968 	if (mirror->ingress)
969 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
970 	else
971 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
972 
973 
974 	/* Check if any of the port is still referring to sniffer port */
975 	for (p = 0; p < dev->info->port_cnt; p++) {
976 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
977 
978 		if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
979 			in_use = true;
980 			break;
981 		}
982 	}
983 
984 	/* delete sniffing if there are no other mirroring rules */
985 	if (!in_use)
986 		ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
987 			     PORT_MIRROR_SNIFFER, false);
988 }
989 
990 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
991 {
992 	phy_interface_t interface;
993 	bool gbit;
994 
995 	if (dev->info->internal_phy[port])
996 		return PHY_INTERFACE_MODE_NA;
997 
998 	gbit = ksz_get_gbit(dev, port);
999 
1000 	interface = ksz_get_xmii(dev, port, gbit);
1001 
1002 	return interface;
1003 }
1004 
1005 void ksz9477_get_caps(struct ksz_device *dev, int port,
1006 		      struct phylink_config *config)
1007 {
1008 	config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
1009 				   MAC_SYM_PAUSE;
1010 
1011 	if (dev->info->gbit_capable[port])
1012 		config->mac_capabilities |= MAC_1000FD;
1013 }
1014 
1015 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
1016 {
1017 	u32 secs = msecs / 1000;
1018 	u8 value;
1019 	u8 data;
1020 	int ret;
1021 
1022 	value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
1023 
1024 	ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
1025 	if (ret < 0)
1026 		return ret;
1027 
1028 	data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
1029 
1030 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
1031 	if (ret < 0)
1032 		return ret;
1033 
1034 	value &= ~SW_AGE_CNT_M;
1035 	value |= FIELD_PREP(SW_AGE_CNT_M, data);
1036 
1037 	return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
1038 }
1039 
1040 void ksz9477_port_queue_split(struct ksz_device *dev, int port)
1041 {
1042 	u8 data;
1043 
1044 	if (dev->info->num_tx_queues == 8)
1045 		data = PORT_EIGHT_QUEUE;
1046 	else if (dev->info->num_tx_queues == 4)
1047 		data = PORT_FOUR_QUEUE;
1048 	else if (dev->info->num_tx_queues == 2)
1049 		data = PORT_TWO_QUEUE;
1050 	else
1051 		data = PORT_SINGLE_QUEUE;
1052 
1053 	ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
1054 }
1055 
1056 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
1057 {
1058 	struct dsa_switch *ds = dev->ds;
1059 	u16 data16;
1060 	u8 member;
1061 
1062 	/* enable tag tail for host port */
1063 	if (cpu_port)
1064 		ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
1065 			     true);
1066 
1067 	ksz9477_port_queue_split(dev, port);
1068 
1069 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
1070 
1071 	/* set back pressure */
1072 	ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
1073 
1074 	/* enable broadcast storm limit */
1075 	ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
1076 
1077 	/* disable DiffServ priority */
1078 	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
1079 
1080 	/* replace priority */
1081 	ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
1082 		     false);
1083 	ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
1084 			   MTI_PVID_REPLACE, false);
1085 
1086 	/* enable 802.1p priority */
1087 	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
1088 
1089 	/* force flow control for non-PHY ports only */
1090 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
1091 		     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
1092 		     !dev->info->internal_phy[port]);
1093 
1094 	if (cpu_port)
1095 		member = dsa_user_ports(ds);
1096 	else
1097 		member = BIT(dsa_upstream_port(ds, port));
1098 
1099 	ksz9477_cfg_port_member(dev, port, member);
1100 
1101 	/* clear pending interrupts */
1102 	if (dev->info->internal_phy[port])
1103 		ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
1104 
1105 	ksz9477_port_acl_init(dev, port);
1106 
1107 	/* clear pending wake flags */
1108 	ksz9477_handle_wake_reason(dev, port);
1109 }
1110 
1111 void ksz9477_config_cpu_port(struct dsa_switch *ds)
1112 {
1113 	struct ksz_device *dev = ds->priv;
1114 	struct ksz_port *p;
1115 	int i;
1116 
1117 	for (i = 0; i < dev->info->port_cnt; i++) {
1118 		if (dsa_is_cpu_port(ds, i) &&
1119 		    (dev->info->cpu_ports & (1 << i))) {
1120 			phy_interface_t interface;
1121 			const char *prev_msg;
1122 			const char *prev_mode;
1123 
1124 			dev->cpu_port = i;
1125 			p = &dev->ports[i];
1126 
1127 			/* Read from XMII register to determine host port
1128 			 * interface.  If set specifically in device tree
1129 			 * note the difference to help debugging.
1130 			 */
1131 			interface = ksz9477_get_interface(dev, i);
1132 			if (!p->interface) {
1133 				if (dev->compat_interface) {
1134 					dev_warn(dev->dev,
1135 						 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
1136 						 "Please update your device tree.\n",
1137 						 i);
1138 					p->interface = dev->compat_interface;
1139 				} else {
1140 					p->interface = interface;
1141 				}
1142 			}
1143 			if (interface && interface != p->interface) {
1144 				prev_msg = " instead of ";
1145 				prev_mode = phy_modes(interface);
1146 			} else {
1147 				prev_msg = "";
1148 				prev_mode = "";
1149 			}
1150 			dev_info(dev->dev,
1151 				 "Port%d: using phy mode %s%s%s\n",
1152 				 i,
1153 				 phy_modes(p->interface),
1154 				 prev_msg,
1155 				 prev_mode);
1156 
1157 			/* enable cpu port */
1158 			ksz9477_port_setup(dev, i, true);
1159 		}
1160 	}
1161 
1162 	for (i = 0; i < dev->info->port_cnt; i++) {
1163 		if (i == dev->cpu_port)
1164 			continue;
1165 		ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
1166 	}
1167 }
1168 
1169 int ksz9477_enable_stp_addr(struct ksz_device *dev)
1170 {
1171 	const u32 *masks;
1172 	u32 data;
1173 	int ret;
1174 
1175 	masks = dev->info->masks;
1176 
1177 	/* Enable Reserved multicast table */
1178 	ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
1179 
1180 	/* Set the Override bit for forwarding BPDU packet to CPU */
1181 	ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
1182 			  ALU_V_OVERRIDE | BIT(dev->cpu_port));
1183 	if (ret < 0)
1184 		return ret;
1185 
1186 	data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
1187 
1188 	ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1189 	if (ret < 0)
1190 		return ret;
1191 
1192 	/* wait to be finished */
1193 	ret = ksz9477_wait_alu_sta_ready(dev);
1194 	if (ret < 0) {
1195 		dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
1196 		return ret;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 int ksz9477_setup(struct dsa_switch *ds)
1203 {
1204 	struct ksz_device *dev = ds->priv;
1205 	int ret = 0;
1206 
1207 	ds->mtu_enforcement_ingress = true;
1208 
1209 	/* Required for port partitioning. */
1210 	ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
1211 		      true);
1212 
1213 	/* Do not work correctly with tail tagging. */
1214 	ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
1215 
1216 	/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
1217 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
1218 
1219 	/* Now we can configure default MTU value */
1220 	ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
1221 				 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1222 	if (ret)
1223 		return ret;
1224 
1225 	/* queue based egress rate limit */
1226 	ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
1227 
1228 	/* enable global MIB counter freeze function */
1229 	ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
1230 
1231 	return 0;
1232 }
1233 
1234 u32 ksz9477_get_port_addr(int port, int offset)
1235 {
1236 	return PORT_CTRL_ADDR(port, offset);
1237 }
1238 
1239 int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
1240 {
1241 	val = val >> 8;
1242 
1243 	return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
1244 }
1245 
1246 /* The KSZ9477 provides following HW features to accelerate
1247  * HSR frames handling:
1248  *
1249  * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH
1250  * 2. RX PACKET DUPLICATION DISCARDING
1251  * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING
1252  *
1253  * Only one from point 1. has the NETIF_F* flag available.
1254  *
1255  * Ones from point 2 and 3 are "best effort" - i.e. those will
1256  * work correctly most of the time, but it may happen that some
1257  * frames will not be caught - to be more specific; there is a race
1258  * condition in hardware such that, when duplicate packets are received
1259  * on member ports very close in time to each other, the hardware fails
1260  * to detect that they are duplicates.
1261  *
1262  * Hence, the SW needs to handle those special cases. However, the speed
1263  * up gain is considerable when above features are used.
1264  *
1265  * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames
1266  * can be forwarded in the switch fabric between HSR ports.
1267  */
1268 #define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD)
1269 
1270 void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
1271 {
1272 	struct ksz_device *dev = ds->priv;
1273 	struct net_device *user;
1274 	struct dsa_port *hsr_dp;
1275 	u8 data, hsr_ports = 0;
1276 
1277 	/* Program which port(s) shall support HSR */
1278 	ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port));
1279 
1280 	/* Forward frames between HSR ports (i.e. bridge together HSR ports) */
1281 	if (dev->hsr_ports) {
1282 		dsa_hsr_foreach_port(hsr_dp, ds, hsr)
1283 			hsr_ports |= BIT(hsr_dp->index);
1284 
1285 		hsr_ports |= BIT(dsa_upstream_port(ds, port));
1286 		dsa_hsr_foreach_port(hsr_dp, ds, hsr)
1287 			ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports);
1288 	}
1289 
1290 	if (!dev->hsr_ports) {
1291 		/* Enable discarding of received HSR frames */
1292 		ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data);
1293 		data |= HSR_DUPLICATE_DISCARD;
1294 		data &= ~HSR_NODE_UNICAST;
1295 		ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data);
1296 	}
1297 
1298 	/* Enable per port self-address filtering.
1299 	 * The global self-address filtering has already been enabled in the
1300 	 * ksz9477_reset_switch() function.
1301 	 */
1302 	ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true);
1303 
1304 	/* Setup HW supported features for lan HSR ports */
1305 	user = dsa_to_port(ds, port)->user;
1306 	user->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
1307 }
1308 
1309 void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr)
1310 {
1311 	struct ksz_device *dev = ds->priv;
1312 
1313 	/* Clear port HSR support */
1314 	ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0);
1315 
1316 	/* Disable forwarding frames between HSR ports */
1317 	ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port)));
1318 
1319 	/* Disable per port self-address filtering */
1320 	ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false);
1321 }
1322 
1323 int ksz9477_switch_init(struct ksz_device *dev)
1324 {
1325 	u8 data8;
1326 	int ret;
1327 
1328 	dev->port_mask = (1 << dev->info->port_cnt) - 1;
1329 
1330 	/* turn off SPI DO Edge select */
1331 	ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
1332 	if (ret)
1333 		return ret;
1334 
1335 	data8 &= ~SPI_AUTO_EDGE_DETECTION;
1336 	ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
1337 	if (ret)
1338 		return ret;
1339 
1340 	return 0;
1341 }
1342 
1343 void ksz9477_switch_exit(struct ksz_device *dev)
1344 {
1345 	ksz9477_reset_switch(dev);
1346 }
1347 
1348 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
1349 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
1350 MODULE_LICENSE("GPL");
1351