xref: /linux/drivers/net/dsa/microchip/ksz9477.c (revision aba74e639f8d76d29b94991615e33319d7371b63)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip KSZ9477 switch driver main logic
4  *
5  * Copyright (C) 2017-2024 Microchip Technology Inc.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/iopoll.h>
11 #include <linux/platform_data/microchip-ksz.h>
12 #include <linux/phy.h>
13 #include <linux/if_bridge.h>
14 #include <linux/if_vlan.h>
15 #include <net/dsa.h>
16 #include <net/switchdev.h>
17 
18 #include "ksz9477_reg.h"
19 #include "ksz_common.h"
20 #include "ksz9477.h"
21 
ksz_cfg(struct ksz_device * dev,u32 addr,u8 bits,bool set)22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
23 {
24 	regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
25 }
26 
ksz_port_cfg(struct ksz_device * dev,int port,int offset,u8 bits,bool set)27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
28 			 bool set)
29 {
30 	regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
31 			   bits, set ? bits : 0);
32 }
33 
ksz9477_cfg32(struct ksz_device * dev,u32 addr,u32 bits,bool set)34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
35 {
36 	regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
37 }
38 
ksz9477_port_cfg32(struct ksz_device * dev,int port,int offset,u32 bits,bool set)39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
40 			       u32 bits, bool set)
41 {
42 	regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
43 			   bits, set ? bits : 0);
44 }
45 
ksz9477_change_mtu(struct ksz_device * dev,int port,int mtu)46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
47 {
48 	u16 frame_size;
49 
50 	if (!dsa_is_cpu_port(dev->ds, port))
51 		return 0;
52 
53 	frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
54 
55 	return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
56 				  REG_SW_MTU_MASK, frame_size);
57 }
58 
ksz9477_wait_vlan_ctrl_ready(struct ksz_device * dev)59 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
60 {
61 	unsigned int val;
62 
63 	return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
64 					val, !(val & VLAN_START), 10, 1000);
65 }
66 
ksz9477_get_vlan_table(struct ksz_device * dev,u16 vid,u32 * vlan_table)67 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
68 				  u32 *vlan_table)
69 {
70 	int ret;
71 
72 	mutex_lock(&dev->vlan_mutex);
73 
74 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
75 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
76 
77 	/* wait to be cleared */
78 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
79 	if (ret) {
80 		dev_dbg(dev->dev, "Failed to read vlan table\n");
81 		goto exit;
82 	}
83 
84 	ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
85 	ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
86 	ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
87 
88 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
89 
90 exit:
91 	mutex_unlock(&dev->vlan_mutex);
92 
93 	return ret;
94 }
95 
ksz9477_set_vlan_table(struct ksz_device * dev,u16 vid,u32 * vlan_table)96 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
97 				  u32 *vlan_table)
98 {
99 	int ret;
100 
101 	mutex_lock(&dev->vlan_mutex);
102 
103 	ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
104 	ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
105 	ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
106 
107 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
108 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
109 
110 	/* wait to be cleared */
111 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
112 	if (ret) {
113 		dev_dbg(dev->dev, "Failed to write vlan table\n");
114 		goto exit;
115 	}
116 
117 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
118 
119 	/* update vlan cache table */
120 	dev->vlan_cache[vid].table[0] = vlan_table[0];
121 	dev->vlan_cache[vid].table[1] = vlan_table[1];
122 	dev->vlan_cache[vid].table[2] = vlan_table[2];
123 
124 exit:
125 	mutex_unlock(&dev->vlan_mutex);
126 
127 	return ret;
128 }
129 
ksz9477_read_table(struct ksz_device * dev,u32 * table)130 static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
131 {
132 	ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
133 	ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
134 	ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
135 	ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
136 }
137 
ksz9477_write_table(struct ksz_device * dev,u32 * table)138 static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
139 {
140 	ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
141 	ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
142 	ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
143 	ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
144 }
145 
ksz9477_wait_alu_ready(struct ksz_device * dev)146 static int ksz9477_wait_alu_ready(struct ksz_device *dev)
147 {
148 	unsigned int val;
149 
150 	return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
151 					val, !(val & ALU_START), 10, 1000);
152 }
153 
ksz9477_wait_alu_sta_ready(struct ksz_device * dev)154 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
155 {
156 	unsigned int val;
157 
158 	return regmap_read_poll_timeout(ksz_regmap_32(dev),
159 					REG_SW_ALU_STAT_CTRL__4,
160 					val, !(val & ALU_STAT_START),
161 					10, 1000);
162 }
163 
ksz9477_reset_switch(struct ksz_device * dev)164 int ksz9477_reset_switch(struct ksz_device *dev)
165 {
166 	u8 data8;
167 	u32 data32;
168 
169 	/* reset switch */
170 	ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
171 
172 	/* turn off SPI DO Edge select */
173 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
174 			   SPI_AUTO_EDGE_DETECTION, 0);
175 
176 	/* default configuration */
177 	ksz_write8(dev, REG_SW_LUE_CTRL_1,
178 		   SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
179 
180 	/* disable interrupts */
181 	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
182 	ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
183 	ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
184 
185 	/* KSZ9893 compatible chips do not support refclk configuration */
186 	if (dev->chip_id == KSZ9893_CHIP_ID ||
187 	    dev->chip_id == KSZ8563_CHIP_ID ||
188 	    dev->chip_id == KSZ9563_CHIP_ID)
189 		return 0;
190 
191 	data8 = SW_ENABLE_REFCLKO;
192 	if (dev->synclko_disable)
193 		data8 = 0;
194 	else if (dev->synclko_125)
195 		data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
196 	ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
197 
198 	return 0;
199 }
200 
ksz9477_r_mib_cnt(struct ksz_device * dev,int port,u16 addr,u64 * cnt)201 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
202 {
203 	struct ksz_port *p = &dev->ports[port];
204 	unsigned int val;
205 	u32 data;
206 	int ret;
207 
208 	/* retain the flush/freeze bit */
209 	data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
210 	data |= MIB_COUNTER_READ;
211 	data |= (addr << MIB_COUNTER_INDEX_S);
212 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
213 
214 	ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
215 			PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
216 			val, !(val & MIB_COUNTER_READ), 10, 1000);
217 	/* failed to read MIB. get out of loop */
218 	if (ret) {
219 		dev_dbg(dev->dev, "Failed to get MIB\n");
220 		return;
221 	}
222 
223 	/* count resets upon read */
224 	ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
225 	*cnt += data;
226 }
227 
ksz9477_r_mib_pkt(struct ksz_device * dev,int port,u16 addr,u64 * dropped,u64 * cnt)228 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
229 		       u64 *dropped, u64 *cnt)
230 {
231 	addr = dev->info->mib_names[addr].index;
232 	ksz9477_r_mib_cnt(dev, port, addr, cnt);
233 }
234 
ksz9477_freeze_mib(struct ksz_device * dev,int port,bool freeze)235 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
236 {
237 	u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
238 	struct ksz_port *p = &dev->ports[port];
239 
240 	/* enable/disable the port for flush/freeze function */
241 	mutex_lock(&p->mib.cnt_mutex);
242 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
243 
244 	/* used by MIB counter reading code to know freeze is enabled */
245 	p->freeze = freeze;
246 	mutex_unlock(&p->mib.cnt_mutex);
247 }
248 
ksz9477_half_duplex_monitor(struct ksz_device * dev,int port,u64 tx_late_col)249 static int ksz9477_half_duplex_monitor(struct ksz_device *dev, int port,
250 				       u64 tx_late_col)
251 {
252 	u8 lue_ctrl;
253 	u32 pmavbc;
254 	u16 pqm;
255 	int ret;
256 
257 	/* Errata DS80000754 recommends monitoring potential faults in
258 	 * half-duplex mode. The switch might not be able to communicate anymore
259 	 * in these states. If you see this message, please read the
260 	 * errata-sheet for more information:
261 	 * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
262 	 * To workaround this issue, half-duplex mode should be avoided.
263 	 * A software reset could be implemented to recover from this state.
264 	 */
265 	dev_warn_once(dev->dev,
266 		      "Half-duplex detected on port %d, transmission halt may occur\n",
267 		      port);
268 	if (tx_late_col != 0) {
269 		/* Transmission halt with late collisions */
270 		dev_crit_once(dev->dev,
271 			      "TX late collisions detected, transmission may be halted on port %d\n",
272 			      port);
273 	}
274 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &lue_ctrl);
275 	if (ret)
276 		return ret;
277 	if (lue_ctrl & SW_VLAN_ENABLE) {
278 		ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
279 		if (ret)
280 			return ret;
281 
282 		ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
283 		if (ret)
284 			return ret;
285 
286 		if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
287 		    (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
288 			/* Transmission halt with Half-Duplex and VLAN */
289 			dev_crit_once(dev->dev,
290 				      "resources out of limits, transmission may be halted\n");
291 		}
292 	}
293 
294 	return ret;
295 }
296 
ksz9477_errata_monitor(struct ksz_device * dev,int port,u64 tx_late_col)297 int ksz9477_errata_monitor(struct ksz_device *dev, int port,
298 			   u64 tx_late_col)
299 {
300 	u8 status;
301 	int ret;
302 
303 	ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
304 	if (ret)
305 		return ret;
306 
307 	if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status)
308 	      == PORT_INTF_SPEED_NONE) &&
309 	    !(status & PORT_INTF_FULL_DUPLEX)) {
310 		ret = ksz9477_half_duplex_monitor(dev, port, tx_late_col);
311 	}
312 
313 	return ret;
314 }
315 
ksz9477_port_init_cnt(struct ksz_device * dev,int port)316 void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
317 {
318 	struct ksz_port_mib *mib = &dev->ports[port].mib;
319 
320 	/* flush all enabled port MIB counters */
321 	mutex_lock(&mib->cnt_mutex);
322 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
323 		     MIB_COUNTER_FLUSH_FREEZE);
324 	ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
325 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
326 	mutex_unlock(&mib->cnt_mutex);
327 }
328 
ksz9477_r_phy_quirks(struct ksz_device * dev,u16 addr,u16 reg,u16 * data)329 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
330 				 u16 *data)
331 {
332 	/* KSZ8563R do not have extended registers but BMSR_ESTATEN and
333 	 * BMSR_ERCAP bits are set.
334 	 */
335 	if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
336 		*data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
337 }
338 
ksz9477_r_phy(struct ksz_device * dev,u16 addr,u16 reg,u16 * data)339 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
340 {
341 	u16 val = 0xffff;
342 	int ret;
343 
344 	/* No real PHY after this. Simulate the PHY.
345 	 * A fixed PHY can be setup in the device tree, but this function is
346 	 * still called for that port during initialization.
347 	 * For RGMII PHY there is no way to access it so the fixed PHY should
348 	 * be used.  For SGMII PHY the supporting code will be added later.
349 	 */
350 	if (!dev->info->internal_phy[addr]) {
351 		struct ksz_port *p = &dev->ports[addr];
352 
353 		switch (reg) {
354 		case MII_BMCR:
355 			val = 0x1140;
356 			break;
357 		case MII_BMSR:
358 			val = 0x796d;
359 			break;
360 		case MII_PHYSID1:
361 			val = 0x0022;
362 			break;
363 		case MII_PHYSID2:
364 			val = 0x1631;
365 			break;
366 		case MII_ADVERTISE:
367 			val = 0x05e1;
368 			break;
369 		case MII_LPA:
370 			val = 0xc5e1;
371 			break;
372 		case MII_CTRL1000:
373 			val = 0x0700;
374 			break;
375 		case MII_STAT1000:
376 			if (p->phydev.speed == SPEED_1000)
377 				val = 0x3800;
378 			else
379 				val = 0;
380 			break;
381 		}
382 	} else {
383 		ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
384 		if (ret)
385 			return ret;
386 
387 		ksz9477_r_phy_quirks(dev, addr, reg, &val);
388 	}
389 
390 	*data = val;
391 
392 	return 0;
393 }
394 
ksz9477_w_phy(struct ksz_device * dev,u16 addr,u16 reg,u16 val)395 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
396 {
397 	u32 mask, val32;
398 
399 	/* No real PHY after this. */
400 	if (!dev->info->internal_phy[addr])
401 		return 0;
402 
403 	if (reg < 0x10)
404 		return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
405 
406 	/* Errata: When using SPI, I2C, or in-band register access,
407 	 * writes to certain PHY registers should be performed as
408 	 * 32-bit writes instead of 16-bit writes.
409 	 */
410 	val32 = val;
411 	mask = 0xffff;
412 	if ((reg & 1) == 0) {
413 		val32 <<= 16;
414 		mask <<= 16;
415 	}
416 	reg &= ~1;
417 	return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
418 }
419 
ksz9477_cfg_port_member(struct ksz_device * dev,int port,u8 member)420 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
421 {
422 	ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
423 }
424 
ksz9477_flush_dyn_mac_table(struct ksz_device * dev,int port)425 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
426 {
427 	const u16 *regs = dev->info->regs;
428 	u8 data;
429 
430 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
431 			   SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
432 			   SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
433 
434 	if (port < dev->info->port_cnt) {
435 		/* flush individual port */
436 		ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
437 		if (!(data & PORT_LEARN_DISABLE))
438 			ksz_pwrite8(dev, port, regs[P_STP_CTRL],
439 				    data | PORT_LEARN_DISABLE);
440 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
441 		ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
442 	} else {
443 		/* flush all */
444 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
445 	}
446 }
447 
ksz9477_port_vlan_filtering(struct ksz_device * dev,int port,bool flag,struct netlink_ext_ack * extack)448 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
449 				bool flag, struct netlink_ext_ack *extack)
450 {
451 	if (flag) {
452 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
453 			     PORT_VLAN_LOOKUP_VID_0, true);
454 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
455 	} else {
456 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
457 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
458 			     PORT_VLAN_LOOKUP_VID_0, false);
459 	}
460 
461 	return 0;
462 }
463 
ksz9477_port_vlan_add(struct ksz_device * dev,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)464 int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
465 			  const struct switchdev_obj_port_vlan *vlan,
466 			  struct netlink_ext_ack *extack)
467 {
468 	u32 vlan_table[3];
469 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
470 	int err;
471 
472 	err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
473 	if (err) {
474 		NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
475 		return err;
476 	}
477 
478 	vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
479 	if (untagged)
480 		vlan_table[1] |= BIT(port);
481 	else
482 		vlan_table[1] &= ~BIT(port);
483 	vlan_table[1] &= ~(BIT(dev->cpu_port));
484 
485 	vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
486 
487 	err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
488 	if (err) {
489 		NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
490 		return err;
491 	}
492 
493 	/* change PVID */
494 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
495 		ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
496 
497 	return 0;
498 }
499 
ksz9477_port_vlan_del(struct ksz_device * dev,int port,const struct switchdev_obj_port_vlan * vlan)500 int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
501 			  const struct switchdev_obj_port_vlan *vlan)
502 {
503 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
504 	u32 vlan_table[3];
505 	u16 pvid;
506 
507 	ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
508 	pvid = pvid & 0xFFF;
509 
510 	if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
511 		dev_dbg(dev->dev, "Failed to get vlan table\n");
512 		return -ETIMEDOUT;
513 	}
514 
515 	vlan_table[2] &= ~BIT(port);
516 
517 	if (pvid == vlan->vid)
518 		pvid = 1;
519 
520 	if (untagged)
521 		vlan_table[1] &= ~BIT(port);
522 
523 	if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
524 		dev_dbg(dev->dev, "Failed to set vlan table\n");
525 		return -ETIMEDOUT;
526 	}
527 
528 	ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
529 
530 	return 0;
531 }
532 
ksz9477_fdb_add(struct ksz_device * dev,int port,const unsigned char * addr,u16 vid,struct dsa_db db)533 int ksz9477_fdb_add(struct ksz_device *dev, int port,
534 		    const unsigned char *addr, u16 vid, struct dsa_db db)
535 {
536 	u32 alu_table[4];
537 	u32 data;
538 	int ret = 0;
539 
540 	mutex_lock(&dev->alu_mutex);
541 
542 	/* find any entry with mac & vid */
543 	data = vid << ALU_FID_INDEX_S;
544 	data |= ((addr[0] << 8) | addr[1]);
545 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
546 
547 	data = ((addr[2] << 24) | (addr[3] << 16));
548 	data |= ((addr[4] << 8) | addr[5]);
549 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
550 
551 	/* start read operation */
552 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
553 
554 	/* wait to be finished */
555 	ret = ksz9477_wait_alu_ready(dev);
556 	if (ret) {
557 		dev_dbg(dev->dev, "Failed to read ALU\n");
558 		goto exit;
559 	}
560 
561 	/* read ALU entry */
562 	ksz9477_read_table(dev, alu_table);
563 
564 	/* update ALU entry */
565 	alu_table[0] = ALU_V_STATIC_VALID;
566 	alu_table[1] |= BIT(port);
567 	if (vid)
568 		alu_table[1] |= ALU_V_USE_FID;
569 	alu_table[2] = (vid << ALU_V_FID_S);
570 	alu_table[2] |= ((addr[0] << 8) | addr[1]);
571 	alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
572 	alu_table[3] |= ((addr[4] << 8) | addr[5]);
573 
574 	ksz9477_write_table(dev, alu_table);
575 
576 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
577 
578 	/* wait to be finished */
579 	ret = ksz9477_wait_alu_ready(dev);
580 	if (ret)
581 		dev_dbg(dev->dev, "Failed to write ALU\n");
582 
583 exit:
584 	mutex_unlock(&dev->alu_mutex);
585 
586 	return ret;
587 }
588 
ksz9477_fdb_del(struct ksz_device * dev,int port,const unsigned char * addr,u16 vid,struct dsa_db db)589 int ksz9477_fdb_del(struct ksz_device *dev, int port,
590 		    const unsigned char *addr, u16 vid, struct dsa_db db)
591 {
592 	u32 alu_table[4];
593 	u32 data;
594 	int ret = 0;
595 
596 	mutex_lock(&dev->alu_mutex);
597 
598 	/* read any entry with mac & vid */
599 	data = vid << ALU_FID_INDEX_S;
600 	data |= ((addr[0] << 8) | addr[1]);
601 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
602 
603 	data = ((addr[2] << 24) | (addr[3] << 16));
604 	data |= ((addr[4] << 8) | addr[5]);
605 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
606 
607 	/* start read operation */
608 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
609 
610 	/* wait to be finished */
611 	ret = ksz9477_wait_alu_ready(dev);
612 	if (ret) {
613 		dev_dbg(dev->dev, "Failed to read ALU\n");
614 		goto exit;
615 	}
616 
617 	ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
618 	if (alu_table[0] & ALU_V_STATIC_VALID) {
619 		ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
620 		ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
621 		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
622 
623 		/* clear forwarding port */
624 		alu_table[1] &= ~BIT(port);
625 
626 		/* if there is no port to forward, clear table */
627 		if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
628 			alu_table[0] = 0;
629 			alu_table[1] = 0;
630 			alu_table[2] = 0;
631 			alu_table[3] = 0;
632 		}
633 	} else {
634 		alu_table[0] = 0;
635 		alu_table[1] = 0;
636 		alu_table[2] = 0;
637 		alu_table[3] = 0;
638 	}
639 
640 	ksz9477_write_table(dev, alu_table);
641 
642 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
643 
644 	/* wait to be finished */
645 	ret = ksz9477_wait_alu_ready(dev);
646 	if (ret)
647 		dev_dbg(dev->dev, "Failed to write ALU\n");
648 
649 exit:
650 	mutex_unlock(&dev->alu_mutex);
651 
652 	return ret;
653 }
654 
ksz9477_convert_alu(struct alu_struct * alu,u32 * alu_table)655 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
656 {
657 	alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
658 	alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
659 	alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
660 	alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
661 			ALU_V_PRIO_AGE_CNT_M;
662 	alu->mstp = alu_table[0] & ALU_V_MSTP_M;
663 
664 	alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
665 	alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
666 	alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
667 
668 	alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
669 
670 	alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
671 	alu->mac[1] = alu_table[2] & 0xFF;
672 	alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
673 	alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
674 	alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
675 	alu->mac[5] = alu_table[3] & 0xFF;
676 }
677 
ksz9477_fdb_dump(struct ksz_device * dev,int port,dsa_fdb_dump_cb_t * cb,void * data)678 int ksz9477_fdb_dump(struct ksz_device *dev, int port,
679 		     dsa_fdb_dump_cb_t *cb, void *data)
680 {
681 	int ret = 0;
682 	u32 ksz_data;
683 	u32 alu_table[4];
684 	struct alu_struct alu;
685 	int timeout;
686 
687 	mutex_lock(&dev->alu_mutex);
688 
689 	/* start ALU search */
690 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
691 
692 	do {
693 		timeout = 1000;
694 		do {
695 			ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
696 			if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
697 				break;
698 			usleep_range(1, 10);
699 		} while (timeout-- > 0);
700 
701 		if (!timeout) {
702 			dev_dbg(dev->dev, "Failed to search ALU\n");
703 			ret = -ETIMEDOUT;
704 			goto exit;
705 		}
706 
707 		if (!(ksz_data & ALU_VALID))
708 			continue;
709 
710 		/* read ALU table */
711 		ksz9477_read_table(dev, alu_table);
712 
713 		ksz9477_convert_alu(&alu, alu_table);
714 
715 		if (alu.port_forward & BIT(port)) {
716 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
717 			if (ret)
718 				goto exit;
719 		}
720 	} while (ksz_data & ALU_START);
721 
722 exit:
723 
724 	/* stop ALU search */
725 	ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
726 
727 	mutex_unlock(&dev->alu_mutex);
728 
729 	return ret;
730 }
731 
ksz9477_mdb_add(struct ksz_device * dev,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)732 int ksz9477_mdb_add(struct ksz_device *dev, int port,
733 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
734 {
735 	u32 static_table[4];
736 	const u8 *shifts;
737 	const u32 *masks;
738 	u32 data;
739 	int index;
740 	u32 mac_hi, mac_lo;
741 	int err = 0;
742 
743 	shifts = dev->info->shifts;
744 	masks = dev->info->masks;
745 
746 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
747 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
748 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
749 
750 	mutex_lock(&dev->alu_mutex);
751 
752 	for (index = 0; index < dev->info->num_statics; index++) {
753 		/* find empty slot first */
754 		data = (index << shifts[ALU_STAT_INDEX]) |
755 			masks[ALU_STAT_READ] | ALU_STAT_START;
756 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
757 
758 		/* wait to be finished */
759 		err = ksz9477_wait_alu_sta_ready(dev);
760 		if (err) {
761 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
762 			goto exit;
763 		}
764 
765 		/* read ALU static table */
766 		ksz9477_read_table(dev, static_table);
767 
768 		if (static_table[0] & ALU_V_STATIC_VALID) {
769 			/* check this has same vid & mac address */
770 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
771 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
772 			    static_table[3] == mac_lo) {
773 				/* found matching one */
774 				break;
775 			}
776 		} else {
777 			/* found empty one */
778 			break;
779 		}
780 	}
781 
782 	/* no available entry */
783 	if (index == dev->info->num_statics) {
784 		err = -ENOSPC;
785 		goto exit;
786 	}
787 
788 	/* add entry */
789 	static_table[0] = ALU_V_STATIC_VALID;
790 	static_table[1] |= BIT(port);
791 	if (mdb->vid)
792 		static_table[1] |= ALU_V_USE_FID;
793 	static_table[2] = (mdb->vid << ALU_V_FID_S);
794 	static_table[2] |= mac_hi;
795 	static_table[3] = mac_lo;
796 
797 	ksz9477_write_table(dev, static_table);
798 
799 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
800 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
801 
802 	/* wait to be finished */
803 	if (ksz9477_wait_alu_sta_ready(dev))
804 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
805 
806 exit:
807 	mutex_unlock(&dev->alu_mutex);
808 	return err;
809 }
810 
ksz9477_mdb_del(struct ksz_device * dev,int port,const struct switchdev_obj_port_mdb * mdb,struct dsa_db db)811 int ksz9477_mdb_del(struct ksz_device *dev, int port,
812 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
813 {
814 	u32 static_table[4];
815 	const u8 *shifts;
816 	const u32 *masks;
817 	u32 data;
818 	int index;
819 	int ret = 0;
820 	u32 mac_hi, mac_lo;
821 
822 	shifts = dev->info->shifts;
823 	masks = dev->info->masks;
824 
825 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
826 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
827 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
828 
829 	mutex_lock(&dev->alu_mutex);
830 
831 	for (index = 0; index < dev->info->num_statics; index++) {
832 		/* find empty slot first */
833 		data = (index << shifts[ALU_STAT_INDEX]) |
834 			masks[ALU_STAT_READ] | ALU_STAT_START;
835 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
836 
837 		/* wait to be finished */
838 		ret = ksz9477_wait_alu_sta_ready(dev);
839 		if (ret) {
840 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
841 			goto exit;
842 		}
843 
844 		/* read ALU static table */
845 		ksz9477_read_table(dev, static_table);
846 
847 		if (static_table[0] & ALU_V_STATIC_VALID) {
848 			/* check this has same vid & mac address */
849 
850 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
851 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
852 			    static_table[3] == mac_lo) {
853 				/* found matching one */
854 				break;
855 			}
856 		}
857 	}
858 
859 	/* no available entry */
860 	if (index == dev->info->num_statics)
861 		goto exit;
862 
863 	/* clear port */
864 	static_table[1] &= ~BIT(port);
865 
866 	if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
867 		/* delete entry */
868 		static_table[0] = 0;
869 		static_table[1] = 0;
870 		static_table[2] = 0;
871 		static_table[3] = 0;
872 	}
873 
874 	ksz9477_write_table(dev, static_table);
875 
876 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
877 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
878 
879 	/* wait to be finished */
880 	ret = ksz9477_wait_alu_sta_ready(dev);
881 	if (ret)
882 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
883 
884 exit:
885 	mutex_unlock(&dev->alu_mutex);
886 
887 	return ret;
888 }
889 
ksz9477_port_mirror_add(struct ksz_device * dev,int port,struct dsa_mall_mirror_tc_entry * mirror,bool ingress,struct netlink_ext_ack * extack)890 int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
891 			    struct dsa_mall_mirror_tc_entry *mirror,
892 			    bool ingress, struct netlink_ext_ack *extack)
893 {
894 	u8 data;
895 	int p;
896 
897 	/* Limit to one sniffer port
898 	 * Check if any of the port is already set for sniffing
899 	 * If yes, instruct the user to remove the previous entry & exit
900 	 */
901 	for (p = 0; p < dev->info->port_cnt; p++) {
902 		/* Skip the current sniffing port */
903 		if (p == mirror->to_local_port)
904 			continue;
905 
906 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
907 
908 		if (data & PORT_MIRROR_SNIFFER) {
909 			NL_SET_ERR_MSG_MOD(extack,
910 					   "Sniffer port is already configured, delete existing rules & retry");
911 			return -EBUSY;
912 		}
913 	}
914 
915 	if (ingress)
916 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
917 	else
918 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
919 
920 	/* configure mirror port */
921 	ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
922 		     PORT_MIRROR_SNIFFER, true);
923 
924 	ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
925 
926 	return 0;
927 }
928 
ksz9477_port_mirror_del(struct ksz_device * dev,int port,struct dsa_mall_mirror_tc_entry * mirror)929 void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
930 			     struct dsa_mall_mirror_tc_entry *mirror)
931 {
932 	bool in_use = false;
933 	u8 data;
934 	int p;
935 
936 	if (mirror->ingress)
937 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
938 	else
939 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
940 
941 
942 	/* Check if any of the port is still referring to sniffer port */
943 	for (p = 0; p < dev->info->port_cnt; p++) {
944 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
945 
946 		if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
947 			in_use = true;
948 			break;
949 		}
950 	}
951 
952 	/* delete sniffing if there are no other mirroring rules */
953 	if (!in_use)
954 		ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
955 			     PORT_MIRROR_SNIFFER, false);
956 }
957 
ksz9477_get_interface(struct ksz_device * dev,int port)958 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
959 {
960 	phy_interface_t interface;
961 	bool gbit;
962 
963 	if (dev->info->internal_phy[port])
964 		return PHY_INTERFACE_MODE_NA;
965 
966 	gbit = ksz_get_gbit(dev, port);
967 
968 	interface = ksz_get_xmii(dev, port, gbit);
969 
970 	return interface;
971 }
972 
ksz9477_get_caps(struct ksz_device * dev,int port,struct phylink_config * config)973 void ksz9477_get_caps(struct ksz_device *dev, int port,
974 		      struct phylink_config *config)
975 {
976 	config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
977 				   MAC_SYM_PAUSE;
978 
979 	if (dev->info->gbit_capable[port])
980 		config->mac_capabilities |= MAC_1000FD;
981 }
982 
ksz9477_set_ageing_time(struct ksz_device * dev,unsigned int msecs)983 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
984 {
985 	u32 secs = msecs / 1000;
986 	u8 data, mult, value;
987 	u32 max_val;
988 	int ret;
989 
990 #define MAX_TIMER_VAL	((1 << 8) - 1)
991 
992 	/* The aging timer comprises a 3-bit multiplier and an 8-bit second
993 	 * value.  Either of them cannot be zero.  The maximum timer is then
994 	 * 7 * 255 = 1785 seconds.
995 	 */
996 	if (!secs)
997 		secs = 1;
998 
999 	/* Return error if too large. */
1000 	else if (secs > 7 * MAX_TIMER_VAL)
1001 		return -EINVAL;
1002 
1003 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
1004 	if (ret < 0)
1005 		return ret;
1006 
1007 	/* Check whether there is need to update the multiplier. */
1008 	mult = FIELD_GET(SW_AGE_CNT_M, value);
1009 	max_val = MAX_TIMER_VAL;
1010 	if (mult > 0) {
1011 		/* Try to use the same multiplier already in the register as
1012 		 * the hardware default uses multiplier 4 and 75 seconds for
1013 		 * 300 seconds.
1014 		 */
1015 		max_val = DIV_ROUND_UP(secs, mult);
1016 		if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
1017 			max_val = MAX_TIMER_VAL;
1018 	}
1019 
1020 	data = DIV_ROUND_UP(secs, max_val);
1021 	if (mult != data) {
1022 		value &= ~SW_AGE_CNT_M;
1023 		value |= FIELD_PREP(SW_AGE_CNT_M, data);
1024 		ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
1025 		if (ret < 0)
1026 			return ret;
1027 	}
1028 
1029 	value = DIV_ROUND_UP(secs, data);
1030 	return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
1031 }
1032 
ksz9477_port_queue_split(struct ksz_device * dev,int port)1033 void ksz9477_port_queue_split(struct ksz_device *dev, int port)
1034 {
1035 	u8 data;
1036 
1037 	if (dev->info->num_tx_queues == 8)
1038 		data = PORT_EIGHT_QUEUE;
1039 	else if (dev->info->num_tx_queues == 4)
1040 		data = PORT_FOUR_QUEUE;
1041 	else if (dev->info->num_tx_queues == 2)
1042 		data = PORT_TWO_QUEUE;
1043 	else
1044 		data = PORT_SINGLE_QUEUE;
1045 
1046 	ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
1047 }
1048 
ksz9477_port_setup(struct ksz_device * dev,int port,bool cpu_port)1049 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
1050 {
1051 	const u16 *regs = dev->info->regs;
1052 	struct dsa_switch *ds = dev->ds;
1053 	u16 data16;
1054 	u8 member;
1055 
1056 	/* enable tag tail for host port */
1057 	if (cpu_port)
1058 		ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
1059 			     true);
1060 
1061 	ksz9477_port_queue_split(dev, port);
1062 
1063 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
1064 
1065 	/* set back pressure */
1066 	ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
1067 
1068 	/* enable broadcast storm limit */
1069 	ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
1070 
1071 	/* replace priority */
1072 	ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
1073 		     false);
1074 	ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
1075 			   MTI_PVID_REPLACE, false);
1076 
1077 	/* force flow control for non-PHY ports only */
1078 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
1079 		     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
1080 		     !dev->info->internal_phy[port]);
1081 
1082 	if (cpu_port)
1083 		member = dsa_user_ports(ds);
1084 	else
1085 		member = BIT(dsa_upstream_port(ds, port));
1086 
1087 	ksz9477_cfg_port_member(dev, port, member);
1088 
1089 	/* clear pending interrupts */
1090 	if (dev->info->internal_phy[port])
1091 		ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
1092 
1093 	ksz9477_port_acl_init(dev, port);
1094 
1095 	/* clear pending wake flags */
1096 	ksz_handle_wake_reason(dev, port);
1097 
1098 	/* Disable all WoL options by default. Otherwise
1099 	 * ksz_switch_macaddr_get/put logic will not work properly.
1100 	 */
1101 	ksz_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
1102 }
1103 
ksz9477_config_cpu_port(struct dsa_switch * ds)1104 void ksz9477_config_cpu_port(struct dsa_switch *ds)
1105 {
1106 	struct ksz_device *dev = ds->priv;
1107 	struct ksz_port *p;
1108 	int i;
1109 
1110 	for (i = 0; i < dev->info->port_cnt; i++) {
1111 		if (dsa_is_cpu_port(ds, i) &&
1112 		    (dev->info->cpu_ports & (1 << i))) {
1113 			phy_interface_t interface;
1114 			const char *prev_msg;
1115 			const char *prev_mode;
1116 
1117 			dev->cpu_port = i;
1118 			p = &dev->ports[i];
1119 
1120 			/* Read from XMII register to determine host port
1121 			 * interface.  If set specifically in device tree
1122 			 * note the difference to help debugging.
1123 			 */
1124 			interface = ksz9477_get_interface(dev, i);
1125 			if (!p->interface) {
1126 				if (dev->compat_interface) {
1127 					dev_warn(dev->dev,
1128 						 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
1129 						 "Please update your device tree.\n",
1130 						 i);
1131 					p->interface = dev->compat_interface;
1132 				} else {
1133 					p->interface = interface;
1134 				}
1135 			}
1136 			if (interface && interface != p->interface) {
1137 				prev_msg = " instead of ";
1138 				prev_mode = phy_modes(interface);
1139 			} else {
1140 				prev_msg = "";
1141 				prev_mode = "";
1142 			}
1143 			dev_info(dev->dev,
1144 				 "Port%d: using phy mode %s%s%s\n",
1145 				 i,
1146 				 phy_modes(p->interface),
1147 				 prev_msg,
1148 				 prev_mode);
1149 
1150 			/* enable cpu port */
1151 			ksz9477_port_setup(dev, i, true);
1152 		}
1153 	}
1154 
1155 	for (i = 0; i < dev->info->port_cnt; i++) {
1156 		if (i == dev->cpu_port)
1157 			continue;
1158 		ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
1159 
1160 		/* Power down the internal PHY if port is unused. */
1161 		if (dsa_is_unused_port(ds, i) && dev->info->internal_phy[i])
1162 			ksz_pwrite16(dev, i, 0x100, BMCR_PDOWN);
1163 	}
1164 }
1165 
ksz9477_enable_stp_addr(struct ksz_device * dev)1166 int ksz9477_enable_stp_addr(struct ksz_device *dev)
1167 {
1168 	const u32 *masks;
1169 	u32 data;
1170 	int ret;
1171 
1172 	masks = dev->info->masks;
1173 
1174 	/* Enable Reserved multicast table */
1175 	ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
1176 
1177 	/* Set the Override bit for forwarding BPDU packet to CPU */
1178 	ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
1179 			  ALU_V_OVERRIDE | BIT(dev->cpu_port));
1180 	if (ret < 0)
1181 		return ret;
1182 
1183 	data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
1184 
1185 	ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1186 	if (ret < 0)
1187 		return ret;
1188 
1189 	/* wait to be finished */
1190 	ret = ksz9477_wait_alu_sta_ready(dev);
1191 	if (ret < 0) {
1192 		dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
1193 		return ret;
1194 	}
1195 
1196 	return 0;
1197 }
1198 
ksz9477_setup(struct dsa_switch * ds)1199 int ksz9477_setup(struct dsa_switch *ds)
1200 {
1201 	struct ksz_device *dev = ds->priv;
1202 	const u16 *regs = dev->info->regs;
1203 	int ret = 0;
1204 
1205 	ds->mtu_enforcement_ingress = true;
1206 
1207 	/* Required for port partitioning. */
1208 	ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
1209 		      true);
1210 
1211 	/* Do not work correctly with tail tagging. */
1212 	ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
1213 
1214 	/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
1215 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
1216 
1217 	/* Use collision based back pressure mode. */
1218 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
1219 		SW_BACK_PRESSURE_COLLISION);
1220 
1221 	/* Now we can configure default MTU value */
1222 	ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
1223 				 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1224 	if (ret)
1225 		return ret;
1226 
1227 	/* queue based egress rate limit */
1228 	ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
1229 
1230 	/* enable global MIB counter freeze function */
1231 	ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
1232 
1233 	/* Make sure PME (WoL) is not enabled. If requested, it will
1234 	 * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
1235 	 * do not like PME events changes before shutdown.
1236 	 */
1237 	return ksz_write8(dev, regs[REG_SW_PME_CTRL], 0);
1238 }
1239 
ksz9477_get_port_addr(int port,int offset)1240 u32 ksz9477_get_port_addr(int port, int offset)
1241 {
1242 	return PORT_CTRL_ADDR(port, offset);
1243 }
1244 
ksz9477_tc_cbs_set_cinc(struct ksz_device * dev,int port,u32 val)1245 int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
1246 {
1247 	val = val >> 8;
1248 
1249 	return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
1250 }
1251 
1252 /* The KSZ9477 provides following HW features to accelerate
1253  * HSR frames handling:
1254  *
1255  * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH
1256  * 2. RX PACKET DUPLICATION DISCARDING
1257  * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING
1258  *
1259  * Only one from point 1. has the NETIF_F* flag available.
1260  *
1261  * Ones from point 2 and 3 are "best effort" - i.e. those will
1262  * work correctly most of the time, but it may happen that some
1263  * frames will not be caught - to be more specific; there is a race
1264  * condition in hardware such that, when duplicate packets are received
1265  * on member ports very close in time to each other, the hardware fails
1266  * to detect that they are duplicates.
1267  *
1268  * Hence, the SW needs to handle those special cases. However, the speed
1269  * up gain is considerable when above features are used.
1270  *
1271  * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames
1272  * can be forwarded in the switch fabric between HSR ports.
1273  */
1274 #define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD)
1275 
ksz9477_hsr_join(struct dsa_switch * ds,int port,struct net_device * hsr)1276 void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
1277 {
1278 	struct ksz_device *dev = ds->priv;
1279 	struct net_device *user;
1280 	struct dsa_port *hsr_dp;
1281 	u8 data, hsr_ports = 0;
1282 
1283 	/* Program which port(s) shall support HSR */
1284 	ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port));
1285 
1286 	/* Forward frames between HSR ports (i.e. bridge together HSR ports) */
1287 	if (dev->hsr_ports) {
1288 		dsa_hsr_foreach_port(hsr_dp, ds, hsr)
1289 			hsr_ports |= BIT(hsr_dp->index);
1290 
1291 		hsr_ports |= BIT(dsa_upstream_port(ds, port));
1292 		dsa_hsr_foreach_port(hsr_dp, ds, hsr)
1293 			ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports);
1294 	}
1295 
1296 	if (!dev->hsr_ports) {
1297 		/* Enable discarding of received HSR frames */
1298 		ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data);
1299 		data |= HSR_DUPLICATE_DISCARD;
1300 		data &= ~HSR_NODE_UNICAST;
1301 		ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data);
1302 	}
1303 
1304 	/* Enable per port self-address filtering.
1305 	 * The global self-address filtering has already been enabled in the
1306 	 * ksz9477_reset_switch() function.
1307 	 */
1308 	ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true);
1309 
1310 	/* Setup HW supported features for lan HSR ports */
1311 	user = dsa_to_port(ds, port)->user;
1312 	user->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
1313 }
1314 
ksz9477_hsr_leave(struct dsa_switch * ds,int port,struct net_device * hsr)1315 void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr)
1316 {
1317 	struct ksz_device *dev = ds->priv;
1318 
1319 	/* Clear port HSR support */
1320 	ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0);
1321 
1322 	/* Disable forwarding frames between HSR ports */
1323 	ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port)));
1324 
1325 	/* Disable per port self-address filtering */
1326 	ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false);
1327 }
1328 
ksz9477_switch_init(struct ksz_device * dev)1329 int ksz9477_switch_init(struct ksz_device *dev)
1330 {
1331 	u8 data8;
1332 	int ret;
1333 
1334 	dev->port_mask = (1 << dev->info->port_cnt) - 1;
1335 
1336 	/* turn off SPI DO Edge select */
1337 	ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
1338 	if (ret)
1339 		return ret;
1340 
1341 	data8 &= ~SPI_AUTO_EDGE_DETECTION;
1342 	ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
1343 	if (ret)
1344 		return ret;
1345 
1346 	return 0;
1347 }
1348 
ksz9477_switch_exit(struct ksz_device * dev)1349 void ksz9477_switch_exit(struct ksz_device *dev)
1350 {
1351 	ksz9477_reset_switch(dev);
1352 }
1353 
1354 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
1355 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
1356 MODULE_LICENSE("GPL");
1357