xref: /linux/drivers/net/dsa/microchip/ksz9477.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip KSZ9477 switch driver main logic
4  *
5  * Copyright (C) 2017-2019 Microchip Technology Inc.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/iopoll.h>
11 #include <linux/platform_data/microchip-ksz.h>
12 #include <linux/phy.h>
13 #include <linux/if_bridge.h>
14 #include <linux/if_vlan.h>
15 #include <net/dsa.h>
16 #include <net/switchdev.h>
17 
18 #include "ksz9477_reg.h"
19 #include "ksz_common.h"
20 #include "ksz9477.h"
21 
22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
23 {
24 	regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
25 }
26 
27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
28 			 bool set)
29 {
30 	regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
31 			   bits, set ? bits : 0);
32 }
33 
34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
35 {
36 	regmap_update_bits(dev->regmap[2], addr, bits, set ? bits : 0);
37 }
38 
39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
40 			       u32 bits, bool set)
41 {
42 	regmap_update_bits(dev->regmap[2], PORT_CTRL_ADDR(port, offset),
43 			   bits, set ? bits : 0);
44 }
45 
46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
47 {
48 	u16 frame_size;
49 
50 	if (!dsa_is_cpu_port(dev->ds, port))
51 		return 0;
52 
53 	frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
54 
55 	return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
56 				  REG_SW_MTU_MASK, frame_size);
57 }
58 
59 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
60 {
61 	unsigned int val;
62 
63 	return regmap_read_poll_timeout(dev->regmap[0], REG_SW_VLAN_CTRL,
64 					val, !(val & VLAN_START), 10, 1000);
65 }
66 
67 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
68 				  u32 *vlan_table)
69 {
70 	int ret;
71 
72 	mutex_lock(&dev->vlan_mutex);
73 
74 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
75 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
76 
77 	/* wait to be cleared */
78 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
79 	if (ret) {
80 		dev_dbg(dev->dev, "Failed to read vlan table\n");
81 		goto exit;
82 	}
83 
84 	ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
85 	ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
86 	ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
87 
88 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
89 
90 exit:
91 	mutex_unlock(&dev->vlan_mutex);
92 
93 	return ret;
94 }
95 
96 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
97 				  u32 *vlan_table)
98 {
99 	int ret;
100 
101 	mutex_lock(&dev->vlan_mutex);
102 
103 	ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
104 	ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
105 	ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
106 
107 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
108 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
109 
110 	/* wait to be cleared */
111 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
112 	if (ret) {
113 		dev_dbg(dev->dev, "Failed to write vlan table\n");
114 		goto exit;
115 	}
116 
117 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
118 
119 	/* update vlan cache table */
120 	dev->vlan_cache[vid].table[0] = vlan_table[0];
121 	dev->vlan_cache[vid].table[1] = vlan_table[1];
122 	dev->vlan_cache[vid].table[2] = vlan_table[2];
123 
124 exit:
125 	mutex_unlock(&dev->vlan_mutex);
126 
127 	return ret;
128 }
129 
130 static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
131 {
132 	ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
133 	ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
134 	ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
135 	ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
136 }
137 
138 static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
139 {
140 	ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
141 	ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
142 	ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
143 	ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
144 }
145 
146 static int ksz9477_wait_alu_ready(struct ksz_device *dev)
147 {
148 	unsigned int val;
149 
150 	return regmap_read_poll_timeout(dev->regmap[2], REG_SW_ALU_CTRL__4,
151 					val, !(val & ALU_START), 10, 1000);
152 }
153 
154 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
155 {
156 	unsigned int val;
157 
158 	return regmap_read_poll_timeout(dev->regmap[2],
159 					REG_SW_ALU_STAT_CTRL__4,
160 					val, !(val & ALU_STAT_START),
161 					10, 1000);
162 }
163 
164 int ksz9477_reset_switch(struct ksz_device *dev)
165 {
166 	u8 data8;
167 	u32 data32;
168 
169 	/* reset switch */
170 	ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
171 
172 	/* turn off SPI DO Edge select */
173 	regmap_update_bits(dev->regmap[0], REG_SW_GLOBAL_SERIAL_CTRL_0,
174 			   SPI_AUTO_EDGE_DETECTION, 0);
175 
176 	/* default configuration */
177 	ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
178 	data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
179 	      SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
180 	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
181 
182 	/* disable interrupts */
183 	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
184 	ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
185 	ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
186 
187 	/* KSZ9893 compatible chips do not support refclk configuration */
188 	if (dev->chip_id == KSZ9893_CHIP_ID ||
189 	    dev->chip_id == KSZ8563_CHIP_ID ||
190 	    dev->chip_id == KSZ9563_CHIP_ID)
191 		return 0;
192 
193 	data8 = SW_ENABLE_REFCLKO;
194 	if (dev->synclko_disable)
195 		data8 = 0;
196 	else if (dev->synclko_125)
197 		data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
198 	ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
199 
200 	return 0;
201 }
202 
203 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
204 {
205 	struct ksz_port *p = &dev->ports[port];
206 	unsigned int val;
207 	u32 data;
208 	int ret;
209 
210 	/* retain the flush/freeze bit */
211 	data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
212 	data |= MIB_COUNTER_READ;
213 	data |= (addr << MIB_COUNTER_INDEX_S);
214 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
215 
216 	ret = regmap_read_poll_timeout(dev->regmap[2],
217 			PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
218 			val, !(val & MIB_COUNTER_READ), 10, 1000);
219 	/* failed to read MIB. get out of loop */
220 	if (ret) {
221 		dev_dbg(dev->dev, "Failed to get MIB\n");
222 		return;
223 	}
224 
225 	/* count resets upon read */
226 	ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
227 	*cnt += data;
228 }
229 
230 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
231 		       u64 *dropped, u64 *cnt)
232 {
233 	addr = dev->info->mib_names[addr].index;
234 	ksz9477_r_mib_cnt(dev, port, addr, cnt);
235 }
236 
237 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
238 {
239 	u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
240 	struct ksz_port *p = &dev->ports[port];
241 
242 	/* enable/disable the port for flush/freeze function */
243 	mutex_lock(&p->mib.cnt_mutex);
244 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
245 
246 	/* used by MIB counter reading code to know freeze is enabled */
247 	p->freeze = freeze;
248 	mutex_unlock(&p->mib.cnt_mutex);
249 }
250 
251 void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
252 {
253 	struct ksz_port_mib *mib = &dev->ports[port].mib;
254 
255 	/* flush all enabled port MIB counters */
256 	mutex_lock(&mib->cnt_mutex);
257 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
258 		     MIB_COUNTER_FLUSH_FREEZE);
259 	ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
260 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
261 	mutex_unlock(&mib->cnt_mutex);
262 }
263 
264 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
265 				 u16 *data)
266 {
267 	/* KSZ8563R do not have extended registers but BMSR_ESTATEN and
268 	 * BMSR_ERCAP bits are set.
269 	 */
270 	if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
271 		*data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
272 }
273 
274 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
275 {
276 	u16 val = 0xffff;
277 	int ret;
278 
279 	/* No real PHY after this. Simulate the PHY.
280 	 * A fixed PHY can be setup in the device tree, but this function is
281 	 * still called for that port during initialization.
282 	 * For RGMII PHY there is no way to access it so the fixed PHY should
283 	 * be used.  For SGMII PHY the supporting code will be added later.
284 	 */
285 	if (!dev->info->internal_phy[addr]) {
286 		struct ksz_port *p = &dev->ports[addr];
287 
288 		switch (reg) {
289 		case MII_BMCR:
290 			val = 0x1140;
291 			break;
292 		case MII_BMSR:
293 			val = 0x796d;
294 			break;
295 		case MII_PHYSID1:
296 			val = 0x0022;
297 			break;
298 		case MII_PHYSID2:
299 			val = 0x1631;
300 			break;
301 		case MII_ADVERTISE:
302 			val = 0x05e1;
303 			break;
304 		case MII_LPA:
305 			val = 0xc5e1;
306 			break;
307 		case MII_CTRL1000:
308 			val = 0x0700;
309 			break;
310 		case MII_STAT1000:
311 			if (p->phydev.speed == SPEED_1000)
312 				val = 0x3800;
313 			else
314 				val = 0;
315 			break;
316 		}
317 	} else {
318 		ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
319 		if (ret)
320 			return ret;
321 
322 		ksz9477_r_phy_quirks(dev, addr, reg, &val);
323 	}
324 
325 	*data = val;
326 
327 	return 0;
328 }
329 
330 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
331 {
332 	/* No real PHY after this. */
333 	if (!dev->info->internal_phy[addr])
334 		return 0;
335 
336 	return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
337 }
338 
339 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
340 {
341 	ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
342 }
343 
344 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
345 {
346 	const u16 *regs = dev->info->regs;
347 	u8 data;
348 
349 	regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
350 			   SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
351 			   SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
352 
353 	if (port < dev->info->port_cnt) {
354 		/* flush individual port */
355 		ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
356 		if (!(data & PORT_LEARN_DISABLE))
357 			ksz_pwrite8(dev, port, regs[P_STP_CTRL],
358 				    data | PORT_LEARN_DISABLE);
359 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
360 		ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
361 	} else {
362 		/* flush all */
363 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
364 	}
365 }
366 
367 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
368 				bool flag, struct netlink_ext_ack *extack)
369 {
370 	if (flag) {
371 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
372 			     PORT_VLAN_LOOKUP_VID_0, true);
373 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
374 	} else {
375 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
376 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
377 			     PORT_VLAN_LOOKUP_VID_0, false);
378 	}
379 
380 	return 0;
381 }
382 
383 int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
384 			  const struct switchdev_obj_port_vlan *vlan,
385 			  struct netlink_ext_ack *extack)
386 {
387 	u32 vlan_table[3];
388 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
389 	int err;
390 
391 	err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
392 	if (err) {
393 		NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
394 		return err;
395 	}
396 
397 	vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
398 	if (untagged)
399 		vlan_table[1] |= BIT(port);
400 	else
401 		vlan_table[1] &= ~BIT(port);
402 	vlan_table[1] &= ~(BIT(dev->cpu_port));
403 
404 	vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
405 
406 	err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
407 	if (err) {
408 		NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
409 		return err;
410 	}
411 
412 	/* change PVID */
413 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
414 		ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
415 
416 	return 0;
417 }
418 
419 int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
420 			  const struct switchdev_obj_port_vlan *vlan)
421 {
422 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
423 	u32 vlan_table[3];
424 	u16 pvid;
425 
426 	ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
427 	pvid = pvid & 0xFFF;
428 
429 	if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
430 		dev_dbg(dev->dev, "Failed to get vlan table\n");
431 		return -ETIMEDOUT;
432 	}
433 
434 	vlan_table[2] &= ~BIT(port);
435 
436 	if (pvid == vlan->vid)
437 		pvid = 1;
438 
439 	if (untagged)
440 		vlan_table[1] &= ~BIT(port);
441 
442 	if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
443 		dev_dbg(dev->dev, "Failed to set vlan table\n");
444 		return -ETIMEDOUT;
445 	}
446 
447 	ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
448 
449 	return 0;
450 }
451 
452 int ksz9477_fdb_add(struct ksz_device *dev, int port,
453 		    const unsigned char *addr, u16 vid, struct dsa_db db)
454 {
455 	u32 alu_table[4];
456 	u32 data;
457 	int ret = 0;
458 
459 	mutex_lock(&dev->alu_mutex);
460 
461 	/* find any entry with mac & vid */
462 	data = vid << ALU_FID_INDEX_S;
463 	data |= ((addr[0] << 8) | addr[1]);
464 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
465 
466 	data = ((addr[2] << 24) | (addr[3] << 16));
467 	data |= ((addr[4] << 8) | addr[5]);
468 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
469 
470 	/* start read operation */
471 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
472 
473 	/* wait to be finished */
474 	ret = ksz9477_wait_alu_ready(dev);
475 	if (ret) {
476 		dev_dbg(dev->dev, "Failed to read ALU\n");
477 		goto exit;
478 	}
479 
480 	/* read ALU entry */
481 	ksz9477_read_table(dev, alu_table);
482 
483 	/* update ALU entry */
484 	alu_table[0] = ALU_V_STATIC_VALID;
485 	alu_table[1] |= BIT(port);
486 	if (vid)
487 		alu_table[1] |= ALU_V_USE_FID;
488 	alu_table[2] = (vid << ALU_V_FID_S);
489 	alu_table[2] |= ((addr[0] << 8) | addr[1]);
490 	alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
491 	alu_table[3] |= ((addr[4] << 8) | addr[5]);
492 
493 	ksz9477_write_table(dev, alu_table);
494 
495 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
496 
497 	/* wait to be finished */
498 	ret = ksz9477_wait_alu_ready(dev);
499 	if (ret)
500 		dev_dbg(dev->dev, "Failed to write ALU\n");
501 
502 exit:
503 	mutex_unlock(&dev->alu_mutex);
504 
505 	return ret;
506 }
507 
508 int ksz9477_fdb_del(struct ksz_device *dev, int port,
509 		    const unsigned char *addr, u16 vid, struct dsa_db db)
510 {
511 	u32 alu_table[4];
512 	u32 data;
513 	int ret = 0;
514 
515 	mutex_lock(&dev->alu_mutex);
516 
517 	/* read any entry with mac & vid */
518 	data = vid << ALU_FID_INDEX_S;
519 	data |= ((addr[0] << 8) | addr[1]);
520 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
521 
522 	data = ((addr[2] << 24) | (addr[3] << 16));
523 	data |= ((addr[4] << 8) | addr[5]);
524 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
525 
526 	/* start read operation */
527 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
528 
529 	/* wait to be finished */
530 	ret = ksz9477_wait_alu_ready(dev);
531 	if (ret) {
532 		dev_dbg(dev->dev, "Failed to read ALU\n");
533 		goto exit;
534 	}
535 
536 	ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
537 	if (alu_table[0] & ALU_V_STATIC_VALID) {
538 		ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
539 		ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
540 		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
541 
542 		/* clear forwarding port */
543 		alu_table[1] &= ~BIT(port);
544 
545 		/* if there is no port to forward, clear table */
546 		if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
547 			alu_table[0] = 0;
548 			alu_table[1] = 0;
549 			alu_table[2] = 0;
550 			alu_table[3] = 0;
551 		}
552 	} else {
553 		alu_table[0] = 0;
554 		alu_table[1] = 0;
555 		alu_table[2] = 0;
556 		alu_table[3] = 0;
557 	}
558 
559 	ksz9477_write_table(dev, alu_table);
560 
561 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
562 
563 	/* wait to be finished */
564 	ret = ksz9477_wait_alu_ready(dev);
565 	if (ret)
566 		dev_dbg(dev->dev, "Failed to write ALU\n");
567 
568 exit:
569 	mutex_unlock(&dev->alu_mutex);
570 
571 	return ret;
572 }
573 
574 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
575 {
576 	alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
577 	alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
578 	alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
579 	alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
580 			ALU_V_PRIO_AGE_CNT_M;
581 	alu->mstp = alu_table[0] & ALU_V_MSTP_M;
582 
583 	alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
584 	alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
585 	alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
586 
587 	alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
588 
589 	alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
590 	alu->mac[1] = alu_table[2] & 0xFF;
591 	alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
592 	alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
593 	alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
594 	alu->mac[5] = alu_table[3] & 0xFF;
595 }
596 
597 int ksz9477_fdb_dump(struct ksz_device *dev, int port,
598 		     dsa_fdb_dump_cb_t *cb, void *data)
599 {
600 	int ret = 0;
601 	u32 ksz_data;
602 	u32 alu_table[4];
603 	struct alu_struct alu;
604 	int timeout;
605 
606 	mutex_lock(&dev->alu_mutex);
607 
608 	/* start ALU search */
609 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
610 
611 	do {
612 		timeout = 1000;
613 		do {
614 			ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
615 			if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
616 				break;
617 			usleep_range(1, 10);
618 		} while (timeout-- > 0);
619 
620 		if (!timeout) {
621 			dev_dbg(dev->dev, "Failed to search ALU\n");
622 			ret = -ETIMEDOUT;
623 			goto exit;
624 		}
625 
626 		if (!(ksz_data & ALU_VALID))
627 			continue;
628 
629 		/* read ALU table */
630 		ksz9477_read_table(dev, alu_table);
631 
632 		ksz9477_convert_alu(&alu, alu_table);
633 
634 		if (alu.port_forward & BIT(port)) {
635 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
636 			if (ret)
637 				goto exit;
638 		}
639 	} while (ksz_data & ALU_START);
640 
641 exit:
642 
643 	/* stop ALU search */
644 	ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
645 
646 	mutex_unlock(&dev->alu_mutex);
647 
648 	return ret;
649 }
650 
651 int ksz9477_mdb_add(struct ksz_device *dev, int port,
652 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
653 {
654 	u32 static_table[4];
655 	const u8 *shifts;
656 	const u32 *masks;
657 	u32 data;
658 	int index;
659 	u32 mac_hi, mac_lo;
660 	int err = 0;
661 
662 	shifts = dev->info->shifts;
663 	masks = dev->info->masks;
664 
665 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
666 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
667 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
668 
669 	mutex_lock(&dev->alu_mutex);
670 
671 	for (index = 0; index < dev->info->num_statics; index++) {
672 		/* find empty slot first */
673 		data = (index << shifts[ALU_STAT_INDEX]) |
674 			masks[ALU_STAT_READ] | ALU_STAT_START;
675 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
676 
677 		/* wait to be finished */
678 		err = ksz9477_wait_alu_sta_ready(dev);
679 		if (err) {
680 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
681 			goto exit;
682 		}
683 
684 		/* read ALU static table */
685 		ksz9477_read_table(dev, static_table);
686 
687 		if (static_table[0] & ALU_V_STATIC_VALID) {
688 			/* check this has same vid & mac address */
689 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
690 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
691 			    static_table[3] == mac_lo) {
692 				/* found matching one */
693 				break;
694 			}
695 		} else {
696 			/* found empty one */
697 			break;
698 		}
699 	}
700 
701 	/* no available entry */
702 	if (index == dev->info->num_statics) {
703 		err = -ENOSPC;
704 		goto exit;
705 	}
706 
707 	/* add entry */
708 	static_table[0] = ALU_V_STATIC_VALID;
709 	static_table[1] |= BIT(port);
710 	if (mdb->vid)
711 		static_table[1] |= ALU_V_USE_FID;
712 	static_table[2] = (mdb->vid << ALU_V_FID_S);
713 	static_table[2] |= mac_hi;
714 	static_table[3] = mac_lo;
715 
716 	ksz9477_write_table(dev, static_table);
717 
718 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
719 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
720 
721 	/* wait to be finished */
722 	if (ksz9477_wait_alu_sta_ready(dev))
723 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
724 
725 exit:
726 	mutex_unlock(&dev->alu_mutex);
727 	return err;
728 }
729 
730 int ksz9477_mdb_del(struct ksz_device *dev, int port,
731 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
732 {
733 	u32 static_table[4];
734 	const u8 *shifts;
735 	const u32 *masks;
736 	u32 data;
737 	int index;
738 	int ret = 0;
739 	u32 mac_hi, mac_lo;
740 
741 	shifts = dev->info->shifts;
742 	masks = dev->info->masks;
743 
744 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
745 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
746 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
747 
748 	mutex_lock(&dev->alu_mutex);
749 
750 	for (index = 0; index < dev->info->num_statics; index++) {
751 		/* find empty slot first */
752 		data = (index << shifts[ALU_STAT_INDEX]) |
753 			masks[ALU_STAT_READ] | ALU_STAT_START;
754 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
755 
756 		/* wait to be finished */
757 		ret = ksz9477_wait_alu_sta_ready(dev);
758 		if (ret) {
759 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
760 			goto exit;
761 		}
762 
763 		/* read ALU static table */
764 		ksz9477_read_table(dev, static_table);
765 
766 		if (static_table[0] & ALU_V_STATIC_VALID) {
767 			/* check this has same vid & mac address */
768 
769 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
770 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
771 			    static_table[3] == mac_lo) {
772 				/* found matching one */
773 				break;
774 			}
775 		}
776 	}
777 
778 	/* no available entry */
779 	if (index == dev->info->num_statics)
780 		goto exit;
781 
782 	/* clear port */
783 	static_table[1] &= ~BIT(port);
784 
785 	if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
786 		/* delete entry */
787 		static_table[0] = 0;
788 		static_table[1] = 0;
789 		static_table[2] = 0;
790 		static_table[3] = 0;
791 	}
792 
793 	ksz9477_write_table(dev, static_table);
794 
795 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
796 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
797 
798 	/* wait to be finished */
799 	ret = ksz9477_wait_alu_sta_ready(dev);
800 	if (ret)
801 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
802 
803 exit:
804 	mutex_unlock(&dev->alu_mutex);
805 
806 	return ret;
807 }
808 
809 int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
810 			    struct dsa_mall_mirror_tc_entry *mirror,
811 			    bool ingress, struct netlink_ext_ack *extack)
812 {
813 	u8 data;
814 	int p;
815 
816 	/* Limit to one sniffer port
817 	 * Check if any of the port is already set for sniffing
818 	 * If yes, instruct the user to remove the previous entry & exit
819 	 */
820 	for (p = 0; p < dev->info->port_cnt; p++) {
821 		/* Skip the current sniffing port */
822 		if (p == mirror->to_local_port)
823 			continue;
824 
825 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
826 
827 		if (data & PORT_MIRROR_SNIFFER) {
828 			NL_SET_ERR_MSG_MOD(extack,
829 					   "Sniffer port is already configured, delete existing rules & retry");
830 			return -EBUSY;
831 		}
832 	}
833 
834 	if (ingress)
835 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
836 	else
837 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
838 
839 	/* configure mirror port */
840 	ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
841 		     PORT_MIRROR_SNIFFER, true);
842 
843 	ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
844 
845 	return 0;
846 }
847 
848 void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
849 			     struct dsa_mall_mirror_tc_entry *mirror)
850 {
851 	bool in_use = false;
852 	u8 data;
853 	int p;
854 
855 	if (mirror->ingress)
856 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
857 	else
858 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
859 
860 
861 	/* Check if any of the port is still referring to sniffer port */
862 	for (p = 0; p < dev->info->port_cnt; p++) {
863 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
864 
865 		if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
866 			in_use = true;
867 			break;
868 		}
869 	}
870 
871 	/* delete sniffing if there are no other mirroring rules */
872 	if (!in_use)
873 		ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
874 			     PORT_MIRROR_SNIFFER, false);
875 }
876 
877 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
878 {
879 	phy_interface_t interface;
880 	bool gbit;
881 
882 	if (dev->info->internal_phy[port])
883 		return PHY_INTERFACE_MODE_NA;
884 
885 	gbit = ksz_get_gbit(dev, port);
886 
887 	interface = ksz_get_xmii(dev, port, gbit);
888 
889 	return interface;
890 }
891 
892 static void ksz9477_port_mmd_write(struct ksz_device *dev, int port,
893 				   u8 dev_addr, u16 reg_addr, u16 val)
894 {
895 	ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
896 		     MMD_SETUP(PORT_MMD_OP_INDEX, dev_addr));
897 	ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, reg_addr);
898 	ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
899 		     MMD_SETUP(PORT_MMD_OP_DATA_NO_INCR, dev_addr));
900 	ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, val);
901 }
902 
903 static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
904 {
905 	/* Apply PHY settings to address errata listed in
906 	 * KSZ9477, KSZ9897, KSZ9896, KSZ9567, KSZ8565
907 	 * Silicon Errata and Data Sheet Clarification documents:
908 	 *
909 	 * Register settings are needed to improve PHY receive performance
910 	 */
911 	ksz9477_port_mmd_write(dev, port, 0x01, 0x6f, 0xdd0b);
912 	ksz9477_port_mmd_write(dev, port, 0x01, 0x8f, 0x6032);
913 	ksz9477_port_mmd_write(dev, port, 0x01, 0x9d, 0x248c);
914 	ksz9477_port_mmd_write(dev, port, 0x01, 0x75, 0x0060);
915 	ksz9477_port_mmd_write(dev, port, 0x01, 0xd3, 0x7777);
916 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x06, 0x3008);
917 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x08, 0x2001);
918 
919 	/* Transmit waveform amplitude can be improved
920 	 * (1000BASE-T, 100BASE-TX, 10BASE-Te)
921 	 */
922 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x04, 0x00d0);
923 
924 	/* Energy Efficient Ethernet (EEE) feature select must
925 	 * be manually disabled (except on KSZ8565 which is 100Mbit)
926 	 */
927 	if (dev->info->gbit_capable[port])
928 		ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
929 
930 	/* Register settings are required to meet data sheet
931 	 * supply current specifications
932 	 */
933 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x13, 0x6eff);
934 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x14, 0xe6ff);
935 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x15, 0x6eff);
936 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x16, 0xe6ff);
937 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x17, 0x00ff);
938 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x18, 0x43ff);
939 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x19, 0xc3ff);
940 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x1a, 0x6fff);
941 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x1b, 0x07ff);
942 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x1c, 0x0fff);
943 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x1d, 0xe7ff);
944 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x1e, 0xefff);
945 	ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
946 }
947 
948 void ksz9477_get_caps(struct ksz_device *dev, int port,
949 		      struct phylink_config *config)
950 {
951 	config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
952 				   MAC_SYM_PAUSE;
953 
954 	if (dev->info->gbit_capable[port])
955 		config->mac_capabilities |= MAC_1000FD;
956 }
957 
958 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
959 {
960 	u32 secs = msecs / 1000;
961 	u8 value;
962 	u8 data;
963 	int ret;
964 
965 	value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
966 
967 	ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
968 	if (ret < 0)
969 		return ret;
970 
971 	data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
972 
973 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
974 	if (ret < 0)
975 		return ret;
976 
977 	value &= ~SW_AGE_CNT_M;
978 	value |= FIELD_PREP(SW_AGE_CNT_M, data);
979 
980 	return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
981 }
982 
983 void ksz9477_port_queue_split(struct ksz_device *dev, int port)
984 {
985 	u8 data;
986 
987 	if (dev->info->num_tx_queues == 8)
988 		data = PORT_EIGHT_QUEUE;
989 	else if (dev->info->num_tx_queues == 4)
990 		data = PORT_FOUR_QUEUE;
991 	else if (dev->info->num_tx_queues == 2)
992 		data = PORT_TWO_QUEUE;
993 	else
994 		data = PORT_SINGLE_QUEUE;
995 
996 	ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
997 }
998 
999 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
1000 {
1001 	struct dsa_switch *ds = dev->ds;
1002 	u16 data16;
1003 	u8 member;
1004 
1005 	/* enable tag tail for host port */
1006 	if (cpu_port)
1007 		ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
1008 			     true);
1009 
1010 	ksz9477_port_queue_split(dev, port);
1011 
1012 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
1013 
1014 	/* set back pressure */
1015 	ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
1016 
1017 	/* enable broadcast storm limit */
1018 	ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
1019 
1020 	/* disable DiffServ priority */
1021 	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
1022 
1023 	/* replace priority */
1024 	ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
1025 		     false);
1026 	ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
1027 			   MTI_PVID_REPLACE, false);
1028 
1029 	/* enable 802.1p priority */
1030 	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
1031 
1032 	if (dev->info->internal_phy[port]) {
1033 		/* do not force flow control */
1034 		ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
1035 			     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
1036 			     false);
1037 
1038 		if (dev->info->phy_errata_9477)
1039 			ksz9477_phy_errata_setup(dev, port);
1040 	} else {
1041 		/* force flow control */
1042 		ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
1043 			     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
1044 			     true);
1045 	}
1046 
1047 	if (cpu_port)
1048 		member = dsa_user_ports(ds);
1049 	else
1050 		member = BIT(dsa_upstream_port(ds, port));
1051 
1052 	ksz9477_cfg_port_member(dev, port, member);
1053 
1054 	/* clear pending interrupts */
1055 	if (dev->info->internal_phy[port])
1056 		ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
1057 }
1058 
1059 void ksz9477_config_cpu_port(struct dsa_switch *ds)
1060 {
1061 	struct ksz_device *dev = ds->priv;
1062 	struct ksz_port *p;
1063 	int i;
1064 
1065 	for (i = 0; i < dev->info->port_cnt; i++) {
1066 		if (dsa_is_cpu_port(ds, i) &&
1067 		    (dev->info->cpu_ports & (1 << i))) {
1068 			phy_interface_t interface;
1069 			const char *prev_msg;
1070 			const char *prev_mode;
1071 
1072 			dev->cpu_port = i;
1073 			p = &dev->ports[i];
1074 
1075 			/* Read from XMII register to determine host port
1076 			 * interface.  If set specifically in device tree
1077 			 * note the difference to help debugging.
1078 			 */
1079 			interface = ksz9477_get_interface(dev, i);
1080 			if (!p->interface) {
1081 				if (dev->compat_interface) {
1082 					dev_warn(dev->dev,
1083 						 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
1084 						 "Please update your device tree.\n",
1085 						 i);
1086 					p->interface = dev->compat_interface;
1087 				} else {
1088 					p->interface = interface;
1089 				}
1090 			}
1091 			if (interface && interface != p->interface) {
1092 				prev_msg = " instead of ";
1093 				prev_mode = phy_modes(interface);
1094 			} else {
1095 				prev_msg = "";
1096 				prev_mode = "";
1097 			}
1098 			dev_info(dev->dev,
1099 				 "Port%d: using phy mode %s%s%s\n",
1100 				 i,
1101 				 phy_modes(p->interface),
1102 				 prev_msg,
1103 				 prev_mode);
1104 
1105 			/* enable cpu port */
1106 			ksz9477_port_setup(dev, i, true);
1107 		}
1108 	}
1109 
1110 	for (i = 0; i < dev->info->port_cnt; i++) {
1111 		if (i == dev->cpu_port)
1112 			continue;
1113 		ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
1114 	}
1115 }
1116 
1117 int ksz9477_enable_stp_addr(struct ksz_device *dev)
1118 {
1119 	const u32 *masks;
1120 	u32 data;
1121 	int ret;
1122 
1123 	masks = dev->info->masks;
1124 
1125 	/* Enable Reserved multicast table */
1126 	ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
1127 
1128 	/* Set the Override bit for forwarding BPDU packet to CPU */
1129 	ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
1130 			  ALU_V_OVERRIDE | BIT(dev->cpu_port));
1131 	if (ret < 0)
1132 		return ret;
1133 
1134 	data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
1135 
1136 	ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1137 	if (ret < 0)
1138 		return ret;
1139 
1140 	/* wait to be finished */
1141 	ret = ksz9477_wait_alu_sta_ready(dev);
1142 	if (ret < 0) {
1143 		dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
1144 		return ret;
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 int ksz9477_setup(struct dsa_switch *ds)
1151 {
1152 	struct ksz_device *dev = ds->priv;
1153 	int ret = 0;
1154 
1155 	ds->mtu_enforcement_ingress = true;
1156 
1157 	/* Required for port partitioning. */
1158 	ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
1159 		      true);
1160 
1161 	/* Do not work correctly with tail tagging. */
1162 	ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
1163 
1164 	/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
1165 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
1166 
1167 	/* Now we can configure default MTU value */
1168 	ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
1169 				 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1170 	if (ret)
1171 		return ret;
1172 
1173 	/* queue based egress rate limit */
1174 	ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
1175 
1176 	/* enable global MIB counter freeze function */
1177 	ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
1178 
1179 	return 0;
1180 }
1181 
1182 u32 ksz9477_get_port_addr(int port, int offset)
1183 {
1184 	return PORT_CTRL_ADDR(port, offset);
1185 }
1186 
1187 int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
1188 {
1189 	val = val >> 8;
1190 
1191 	return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
1192 }
1193 
1194 int ksz9477_switch_init(struct ksz_device *dev)
1195 {
1196 	u8 data8;
1197 	int ret;
1198 
1199 	dev->port_mask = (1 << dev->info->port_cnt) - 1;
1200 
1201 	/* turn off SPI DO Edge select */
1202 	ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
1203 	if (ret)
1204 		return ret;
1205 
1206 	data8 &= ~SPI_AUTO_EDGE_DETECTION;
1207 	ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
1208 	if (ret)
1209 		return ret;
1210 
1211 	return 0;
1212 }
1213 
1214 void ksz9477_switch_exit(struct ksz_device *dev)
1215 {
1216 	ksz9477_reset_switch(dev);
1217 }
1218 
1219 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
1220 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
1221 MODULE_LICENSE("GPL");
1222