xref: /linux/drivers/net/dsa/lantiq/lantiq_gswip_common.c (revision f2161d5f1aae21a42b0a64d87e10cb31db423f42)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Lantiq / Intel / MaxLinear GSWIP common function library
4  *
5  * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
6  * Copyright (C) 2023 - 2024 MaxLinear Inc.
7  * Copyright (C) 2022 Snap One, LLC.  All rights reserved.
8  * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
9  * Copyright (C) 2012 John Crispin <john@phrozen.org>
10  * Copyright (C) 2010 Lantiq Deutschland
11  *
12  * The VLAN and bridge model the GSWIP hardware uses does not directly
13  * matches the model DSA uses.
14  *
15  * The hardware has 64 possible table entries for bridges with one VLAN
16  * ID, one flow id and a list of ports for each bridge. All entries which
17  * match the same flow ID are combined in the mac learning table, they
18  * act as one global bridge.
19  * The hardware does not support VLAN filter on the port, but on the
20  * bridge, this driver converts the DSA model to the hardware.
21  *
22  * The CPU gets all the exception frames which do not match any forwarding
23  * rule and the CPU port is also added to all bridges. This makes it possible
24  * to handle all the special cases easily in software.
25  * At the initialization the driver allocates one bridge table entry for
26  * each switch port which is used when the port is used without an
27  * explicit bridge. This prevents the frames from being forwarded
28  * between all LAN ports by default.
29  */
30 
31 #include "lantiq_gswip.h"
32 
33 #include <linux/delay.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_bridge.h>
36 #include <linux/if_vlan.h>
37 #include <linux/iopoll.h>
38 #include <linux/module.h>
39 #include <linux/of_mdio.h>
40 #include <linux/of_net.h>
41 #include <linux/phy.h>
42 #include <linux/phylink.h>
43 #include <linux/regmap.h>
44 #include <net/dsa.h>
45 
46 struct gswip_pce_table_entry {
47 	u16 index;      // PCE_TBL_ADDR.ADDR = pData->table_index
48 	u16 table;      // PCE_TBL_CTRL.ADDR = pData->table
49 	u16 key[8];
50 	u16 val[5];
51 	u16 mask;
52 	u8 gmap;
53 	bool type;
54 	bool valid;
55 	bool key_mode;
56 };
57 
58 struct gswip_rmon_cnt_desc {
59 	unsigned int size;
60 	unsigned int offset;
61 	const char *name;
62 };
63 
64 #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
65 
66 static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
67 	/** Receive Packet Count (only packets that are accepted and not discarded). */
68 	MIB_DESC(1, 0x1F, "RxGoodPkts"),
69 	MIB_DESC(1, 0x23, "RxUnicastPkts"),
70 	MIB_DESC(1, 0x22, "RxMulticastPkts"),
71 	MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
72 	MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
73 	MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
74 	MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
75 	MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
76 	MIB_DESC(1, 0x20, "RxGoodPausePkts"),
77 	MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
78 	MIB_DESC(1, 0x12, "Rx64BytePkts"),
79 	MIB_DESC(1, 0x13, "Rx127BytePkts"),
80 	MIB_DESC(1, 0x14, "Rx255BytePkts"),
81 	MIB_DESC(1, 0x15, "Rx511BytePkts"),
82 	MIB_DESC(1, 0x16, "Rx1023BytePkts"),
83 	/** Receive Size 1024-1522 (or more, if configured) Packet Count. */
84 	MIB_DESC(1, 0x17, "RxMaxBytePkts"),
85 	MIB_DESC(1, 0x18, "RxDroppedPkts"),
86 	MIB_DESC(1, 0x19, "RxFilteredPkts"),
87 	MIB_DESC(2, 0x24, "RxGoodBytes"),
88 	MIB_DESC(2, 0x26, "RxBadBytes"),
89 	MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
90 	MIB_DESC(1, 0x0C, "TxGoodPkts"),
91 	MIB_DESC(1, 0x06, "TxUnicastPkts"),
92 	MIB_DESC(1, 0x07, "TxMulticastPkts"),
93 	MIB_DESC(1, 0x00, "Tx64BytePkts"),
94 	MIB_DESC(1, 0x01, "Tx127BytePkts"),
95 	MIB_DESC(1, 0x02, "Tx255BytePkts"),
96 	MIB_DESC(1, 0x03, "Tx511BytePkts"),
97 	MIB_DESC(1, 0x04, "Tx1023BytePkts"),
98 	/** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
99 	MIB_DESC(1, 0x05, "TxMaxBytePkts"),
100 	MIB_DESC(1, 0x08, "TxSingleCollCount"),
101 	MIB_DESC(1, 0x09, "TxMultCollCount"),
102 	MIB_DESC(1, 0x0A, "TxLateCollCount"),
103 	MIB_DESC(1, 0x0B, "TxExcessCollCount"),
104 	MIB_DESC(1, 0x0D, "TxPauseCount"),
105 	MIB_DESC(1, 0x10, "TxDroppedPkts"),
106 	MIB_DESC(2, 0x0E, "TxGoodBytes"),
107 };
108 
gswip_switch_r_timeout(struct gswip_priv * priv,u32 offset,u32 cleared)109 static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
110 				  u32 cleared)
111 {
112 	u32 val;
113 
114 	return regmap_read_poll_timeout(priv->gswip, offset, val,
115 					!(val & cleared), 20, 50000);
116 }
117 
gswip_mii_mask_cfg(struct gswip_priv * priv,u32 mask,u32 set,int port)118 static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 mask, u32 set,
119 			       int port)
120 {
121 	int reg_port;
122 
123 	/* MII_CFG register only exists for MII ports */
124 	if (!(priv->hw_info->mii_ports & BIT(port)))
125 		return;
126 
127 	reg_port = port + priv->hw_info->mii_port_reg_offset;
128 
129 	regmap_write_bits(priv->mii, GSWIP_MII_CFGp(reg_port), mask,
130 			  set);
131 }
132 
gswip_mdio_poll(struct gswip_priv * priv)133 static int gswip_mdio_poll(struct gswip_priv *priv)
134 {
135 	u32 ctrl;
136 
137 	return regmap_read_poll_timeout(priv->mdio, GSWIP_MDIO_CTRL, ctrl,
138 					!(ctrl & GSWIP_MDIO_CTRL_BUSY), 40, 4000);
139 }
140 
gswip_mdio_wr(struct mii_bus * bus,int addr,int reg,u16 val)141 static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
142 {
143 	struct gswip_priv *priv = bus->priv;
144 	int err;
145 
146 	err = gswip_mdio_poll(priv);
147 	if (err) {
148 		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
149 		return err;
150 	}
151 
152 	regmap_write(priv->mdio, GSWIP_MDIO_WRITE, val);
153 	regmap_write(priv->mdio, GSWIP_MDIO_CTRL,
154 		     GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
155 		     ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
156 		     (reg & GSWIP_MDIO_CTRL_REGAD_MASK));
157 
158 	return 0;
159 }
160 
gswip_mdio_rd(struct mii_bus * bus,int addr,int reg)161 static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
162 {
163 	struct gswip_priv *priv = bus->priv;
164 	u32 val;
165 	int err;
166 
167 	err = gswip_mdio_poll(priv);
168 	if (err) {
169 		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
170 		return err;
171 	}
172 
173 	regmap_write(priv->mdio, GSWIP_MDIO_CTRL,
174 		     GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
175 		     ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
176 		     (reg & GSWIP_MDIO_CTRL_REGAD_MASK));
177 
178 	err = gswip_mdio_poll(priv);
179 	if (err) {
180 		dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
181 		return err;
182 	}
183 
184 	err = regmap_read(priv->mdio, GSWIP_MDIO_READ, &val);
185 	if (err)
186 		return err;
187 
188 	return val;
189 }
190 
gswip_mdio(struct gswip_priv * priv)191 static int gswip_mdio(struct gswip_priv *priv)
192 {
193 	struct device_node *mdio_np, *switch_np = priv->dev->of_node;
194 	struct device *dev = priv->dev;
195 	struct mii_bus *bus;
196 	int err = 0;
197 
198 	mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
199 	if (!mdio_np)
200 		mdio_np = of_get_child_by_name(switch_np, "mdio");
201 
202 	if (!of_device_is_available(mdio_np))
203 		goto out_put_node;
204 
205 	bus = devm_mdiobus_alloc(dev);
206 	if (!bus) {
207 		err = -ENOMEM;
208 		goto out_put_node;
209 	}
210 
211 	bus->priv = priv;
212 	bus->read = gswip_mdio_rd;
213 	bus->write = gswip_mdio_wr;
214 	bus->name = "lantiq,xrx200-mdio";
215 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
216 	bus->parent = priv->dev;
217 
218 	err = devm_of_mdiobus_register(dev, bus, mdio_np);
219 
220 out_put_node:
221 	of_node_put(mdio_np);
222 
223 	return err;
224 }
225 
gswip_pce_table_entry_read(struct gswip_priv * priv,struct gswip_pce_table_entry * tbl)226 static int gswip_pce_table_entry_read(struct gswip_priv *priv,
227 				      struct gswip_pce_table_entry *tbl)
228 {
229 	int i;
230 	int err;
231 	u32 crtl;
232 	u32 tmp;
233 	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
234 					GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
235 
236 	mutex_lock(&priv->pce_table_lock);
237 
238 	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
239 				     GSWIP_PCE_TBL_CTRL_BAS);
240 	if (err)
241 		goto out_unlock;
242 
243 	regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index);
244 	regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
245 			  GSWIP_PCE_TBL_CTRL_ADDR_MASK |
246 			  GSWIP_PCE_TBL_CTRL_OPMOD_MASK |
247 			  GSWIP_PCE_TBL_CTRL_BAS,
248 			  tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS);
249 
250 	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
251 				     GSWIP_PCE_TBL_CTRL_BAS);
252 	if (err)
253 		goto out_unlock;
254 
255 	for (i = 0; i < ARRAY_SIZE(tbl->key); i++) {
256 		err = regmap_read(priv->gswip, GSWIP_PCE_TBL_KEY(i), &tmp);
257 		if (err)
258 			goto out_unlock;
259 		tbl->key[i] = tmp;
260 	}
261 	for (i = 0; i < ARRAY_SIZE(tbl->val); i++) {
262 		err = regmap_read(priv->gswip, GSWIP_PCE_TBL_VAL(i), &tmp);
263 		if (err)
264 			goto out_unlock;
265 		tbl->val[i] = tmp;
266 	}
267 
268 	err = regmap_read(priv->gswip, GSWIP_PCE_TBL_MASK, &tmp);
269 	if (err)
270 		goto out_unlock;
271 
272 	tbl->mask = tmp;
273 	err = regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl);
274 	if (err)
275 		goto out_unlock;
276 
277 	tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
278 	tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
279 	tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
280 
281 out_unlock:
282 	mutex_unlock(&priv->pce_table_lock);
283 
284 	return err;
285 }
286 
gswip_pce_table_entry_write(struct gswip_priv * priv,struct gswip_pce_table_entry * tbl)287 static int gswip_pce_table_entry_write(struct gswip_priv *priv,
288 				       struct gswip_pce_table_entry *tbl)
289 {
290 	int i;
291 	int err;
292 	u32 crtl;
293 	u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
294 					GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
295 
296 	mutex_lock(&priv->pce_table_lock);
297 
298 	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
299 				     GSWIP_PCE_TBL_CTRL_BAS);
300 	if (err) {
301 		mutex_unlock(&priv->pce_table_lock);
302 		return err;
303 	}
304 
305 	regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index);
306 	regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
307 			  GSWIP_PCE_TBL_CTRL_ADDR_MASK |
308 			  GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
309 			  tbl->table | addr_mode);
310 
311 	for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
312 		regmap_write(priv->gswip, GSWIP_PCE_TBL_KEY(i), tbl->key[i]);
313 
314 	for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
315 		regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(i), tbl->val[i]);
316 
317 	regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
318 			  GSWIP_PCE_TBL_CTRL_ADDR_MASK |
319 			  GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
320 			  tbl->table | addr_mode);
321 
322 	regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, tbl->mask);
323 
324 	regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl);
325 	crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
326 		  GSWIP_PCE_TBL_CTRL_GMAP_MASK);
327 	if (tbl->type)
328 		crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
329 	if (tbl->valid)
330 		crtl |= GSWIP_PCE_TBL_CTRL_VLD;
331 	crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
332 	crtl |= GSWIP_PCE_TBL_CTRL_BAS;
333 	regmap_write(priv->gswip, GSWIP_PCE_TBL_CTRL, crtl);
334 
335 	err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
336 				     GSWIP_PCE_TBL_CTRL_BAS);
337 
338 	mutex_unlock(&priv->pce_table_lock);
339 
340 	return err;
341 }
342 
343 /* Add the LAN port into a bridge with the CPU port by
344  * default. This prevents automatic forwarding of
345  * packages between the LAN ports when no explicit
346  * bridge is configured.
347  */
gswip_add_single_port_br(struct gswip_priv * priv,int port,bool add)348 static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
349 {
350 	struct gswip_pce_table_entry vlan_active = {0,};
351 	struct gswip_pce_table_entry vlan_mapping = {0,};
352 	int err;
353 
354 	vlan_active.index = port + 1;
355 	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
356 	vlan_active.key[0] = GSWIP_VLAN_UNAWARE_PVID;
357 	vlan_active.val[0] = port + 1 /* fid */;
358 	vlan_active.valid = add;
359 	err = gswip_pce_table_entry_write(priv, &vlan_active);
360 	if (err) {
361 		dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
362 		return err;
363 	}
364 
365 	if (!add)
366 		return 0;
367 
368 	vlan_mapping.index = port + 1;
369 	vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
370 	vlan_mapping.val[0] = GSWIP_VLAN_UNAWARE_PVID;
371 	vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds);
372 	vlan_mapping.val[2] = 0;
373 	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
374 	if (err) {
375 		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
376 		return err;
377 	}
378 
379 	return 0;
380 }
381 
gswip_port_set_learning(struct gswip_priv * priv,int port,bool enable)382 static int gswip_port_set_learning(struct gswip_priv *priv, int port,
383 				   bool enable)
384 {
385 	if (!GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
386 		return -EOPNOTSUPP;
387 
388 	/* learning disable bit */
389 	return regmap_update_bits(priv->gswip, GSWIP_PCE_PCTRL_3p(port),
390 				  GSWIP_PCE_PCTRL_3_LNDIS,
391 				  enable ? 0 : GSWIP_PCE_PCTRL_3_LNDIS);
392 }
393 
gswip_port_pre_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)394 static int gswip_port_pre_bridge_flags(struct dsa_switch *ds, int port,
395 				       struct switchdev_brport_flags flags,
396 				       struct netlink_ext_ack *extack)
397 {
398 	struct gswip_priv *priv = ds->priv;
399 	unsigned long supported = 0;
400 
401 	if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
402 		supported |= BR_LEARNING;
403 
404 	if (flags.mask & ~supported)
405 		return -EINVAL;
406 
407 	return 0;
408 }
409 
gswip_port_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags flags,struct netlink_ext_ack * extack)410 static int gswip_port_bridge_flags(struct dsa_switch *ds, int port,
411 				   struct switchdev_brport_flags flags,
412 				   struct netlink_ext_ack *extack)
413 {
414 	struct gswip_priv *priv = ds->priv;
415 
416 	if (flags.mask & BR_LEARNING)
417 		return gswip_port_set_learning(priv, port,
418 					       !!(flags.val & BR_LEARNING));
419 
420 	return 0;
421 }
422 
gswip_port_setup(struct dsa_switch * ds,int port)423 static int gswip_port_setup(struct dsa_switch *ds, int port)
424 {
425 	struct gswip_priv *priv = ds->priv;
426 	int err;
427 
428 	if (!dsa_is_cpu_port(ds, port)) {
429 		err = gswip_add_single_port_br(priv, port, true);
430 		if (err)
431 			return err;
432 	}
433 
434 	return 0;
435 }
436 
gswip_port_enable(struct dsa_switch * ds,int port,struct phy_device * phydev)437 static int gswip_port_enable(struct dsa_switch *ds, int port,
438 			     struct phy_device *phydev)
439 {
440 	struct gswip_priv *priv = ds->priv;
441 
442 	if (!dsa_is_cpu_port(ds, port)) {
443 		u32 mdio_phy = 0;
444 
445 		if (phydev)
446 			mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
447 
448 		regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
449 				  GSWIP_MDIO_PHY_ADDR_MASK,
450 				  mdio_phy);
451 	}
452 
453 	/* RMON Counter Enable for port */
454 	regmap_write(priv->gswip, GSWIP_BM_PCFGp(port), GSWIP_BM_PCFG_CNTEN);
455 
456 	/* enable port fetch/store dma & VLAN Modification */
457 	regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port),
458 			GSWIP_FDMA_PCTRL_EN | GSWIP_FDMA_PCTRL_VLANMOD_BOTH);
459 	regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
460 			GSWIP_SDMA_PCTRL_EN);
461 
462 	return 0;
463 }
464 
gswip_port_disable(struct dsa_switch * ds,int port)465 static void gswip_port_disable(struct dsa_switch *ds, int port)
466 {
467 	struct gswip_priv *priv = ds->priv;
468 
469 	regmap_clear_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port),
470 			  GSWIP_FDMA_PCTRL_EN);
471 	regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
472 			  GSWIP_SDMA_PCTRL_EN);
473 }
474 
gswip_pce_load_microcode(struct gswip_priv * priv)475 static int gswip_pce_load_microcode(struct gswip_priv *priv)
476 {
477 	int i;
478 	int err;
479 
480 	regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
481 			  GSWIP_PCE_TBL_CTRL_ADDR_MASK |
482 			  GSWIP_PCE_TBL_CTRL_OPMOD_MASK |
483 			  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR,
484 			  GSWIP_PCE_TBL_CTRL_OPMOD_ADWR);
485 	regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, 0);
486 
487 	for (i = 0; i < priv->hw_info->pce_microcode_size; i++) {
488 		regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, i);
489 		regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(0),
490 			     (*priv->hw_info->pce_microcode)[i].val_0);
491 		regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(1),
492 			     (*priv->hw_info->pce_microcode)[i].val_1);
493 		regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(2),
494 			     (*priv->hw_info->pce_microcode)[i].val_2);
495 		regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(3),
496 			     (*priv->hw_info->pce_microcode)[i].val_3);
497 
498 		/* start the table access: */
499 		regmap_set_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
500 				GSWIP_PCE_TBL_CTRL_BAS);
501 		err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
502 					     GSWIP_PCE_TBL_CTRL_BAS);
503 		if (err)
504 			return err;
505 	}
506 
507 	/* tell the switch that the microcode is loaded */
508 	regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
509 			GSWIP_PCE_GCTRL_0_MC_VALID);
510 
511 	return 0;
512 }
513 
gswip_port_commit_pvid(struct gswip_priv * priv,int port)514 static void gswip_port_commit_pvid(struct gswip_priv *priv, int port)
515 {
516 	struct dsa_port *dp = dsa_to_port(priv->ds, port);
517 	struct net_device *br = dsa_port_bridge_dev_get(dp);
518 	u32 vinr;
519 	int idx;
520 
521 	if (!dsa_port_is_user(dp))
522 		return;
523 
524 	if (br) {
525 		u16 pvid = GSWIP_VLAN_UNAWARE_PVID;
526 
527 		if (br_vlan_enabled(br))
528 			br_vlan_get_pvid(br, &pvid);
529 
530 		/* VLAN-aware bridge ports with no PVID will use Active VLAN
531 		 * index 0. The expectation is that this drops all untagged and
532 		 * VID-0 tagged ingress traffic.
533 		 */
534 		idx = 0;
535 		for (int i = priv->hw_info->max_ports;
536 		     i < ARRAY_SIZE(priv->vlans); i++) {
537 			if (priv->vlans[i].bridge == br &&
538 			    priv->vlans[i].vid == pvid) {
539 				idx = i;
540 				break;
541 			}
542 		}
543 	} else {
544 		/* The Active VLAN table index as configured by
545 		 * gswip_add_single_port_br()
546 		 */
547 		idx = port + 1;
548 	}
549 
550 	vinr = idx ? GSWIP_PCE_VCTRL_VINR_ALL : GSWIP_PCE_VCTRL_VINR_TAGGED;
551 	regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
552 			  GSWIP_PCE_VCTRL_VINR,
553 			  FIELD_PREP(GSWIP_PCE_VCTRL_VINR, vinr));
554 
555 	/* Note that in GSWIP 2.2 VLAN mode the VID needs to be programmed
556 	 * directly instead of referencing the index in the Active VLAN Tablet.
557 	 * However, without the VLANMD bit (9) in PCE_GCTRL_1 (0x457) even
558 	 * GSWIP 2.2 and newer hardware maintain the GSWIP 2.1 behavior.
559 	 */
560 	regmap_write(priv->gswip, GSWIP_PCE_DEFPVID(port), idx);
561 }
562 
gswip_port_vlan_filtering(struct dsa_switch * ds,int port,bool vlan_filtering,struct netlink_ext_ack * extack)563 static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
564 				     bool vlan_filtering,
565 				     struct netlink_ext_ack *extack)
566 {
567 	struct gswip_priv *priv = ds->priv;
568 
569 	if (vlan_filtering) {
570 		/* Use tag based VLAN */
571 		regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
572 				  GSWIP_PCE_VCTRL_VSR |
573 				  GSWIP_PCE_VCTRL_UVR |
574 				  GSWIP_PCE_VCTRL_VIMR |
575 				  GSWIP_PCE_VCTRL_VEMR |
576 				  GSWIP_PCE_VCTRL_VID0,
577 				  GSWIP_PCE_VCTRL_UVR |
578 				  GSWIP_PCE_VCTRL_VIMR |
579 				  GSWIP_PCE_VCTRL_VEMR |
580 				  GSWIP_PCE_VCTRL_VID0);
581 		regmap_clear_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
582 				  GSWIP_PCE_PCTRL_0_TVM);
583 	} else {
584 		/* Use port based VLAN */
585 		regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
586 				  GSWIP_PCE_VCTRL_UVR |
587 				  GSWIP_PCE_VCTRL_VIMR |
588 				  GSWIP_PCE_VCTRL_VEMR |
589 				  GSWIP_PCE_VCTRL_VID0 |
590 				  GSWIP_PCE_VCTRL_VSR,
591 				  GSWIP_PCE_VCTRL_VSR);
592 		regmap_set_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
593 				GSWIP_PCE_PCTRL_0_TVM);
594 	}
595 
596 	gswip_port_commit_pvid(priv, port);
597 
598 	return 0;
599 }
600 
gswip_mii_delay_setup(struct gswip_priv * priv,struct dsa_port * dp,phy_interface_t interface)601 static void gswip_mii_delay_setup(struct gswip_priv *priv, struct dsa_port *dp,
602 				  phy_interface_t interface)
603 {
604 	u32 tx_delay = GSWIP_MII_PCDU_TXDLY_DEFAULT;
605 	u32 rx_delay = GSWIP_MII_PCDU_RXDLY_DEFAULT;
606 	struct device_node *port_dn = dp->dn;
607 	u16 mii_pcdu_reg;
608 
609 	/* As MII_PCDU registers only exist for MII ports, silently return
610 	 * unless the port is an MII port
611 	 */
612 	if (!(priv->hw_info->mii_ports & BIT(dp->index)))
613 		return;
614 
615 	switch (dp->index + priv->hw_info->mii_port_reg_offset) {
616 	case 0:
617 		mii_pcdu_reg = GSWIP_MII_PCDU0;
618 		break;
619 	case 1:
620 		mii_pcdu_reg = GSWIP_MII_PCDU1;
621 		break;
622 	case 5:
623 		mii_pcdu_reg = GSWIP_MII_PCDU5;
624 		break;
625 	default:
626 		return;
627 	}
628 
629 	/* legacy code to set default delays according to the interface mode */
630 	switch (interface) {
631 	case PHY_INTERFACE_MODE_RGMII_ID:
632 		tx_delay = 0;
633 		rx_delay = 0;
634 		break;
635 	case PHY_INTERFACE_MODE_RGMII_RXID:
636 		rx_delay = 0;
637 		break;
638 	case PHY_INTERFACE_MODE_RGMII_TXID:
639 		tx_delay = 0;
640 		break;
641 	default:
642 		break;
643 	}
644 
645 	/* allow settings delays using device tree properties */
646 	of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
647 	of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
648 
649 	regmap_write_bits(priv->mii, mii_pcdu_reg,
650 			  GSWIP_MII_PCDU_TXDLY_MASK |
651 			  GSWIP_MII_PCDU_RXDLY_MASK,
652 			  GSWIP_MII_PCDU_TXDLY(tx_delay) |
653 			  GSWIP_MII_PCDU_RXDLY(rx_delay));
654 }
655 
gswip_setup(struct dsa_switch * ds)656 static int gswip_setup(struct dsa_switch *ds)
657 {
658 	unsigned int cpu_ports = dsa_cpu_ports(ds);
659 	struct gswip_priv *priv = ds->priv;
660 	struct dsa_port *cpu_dp;
661 	int err, i;
662 
663 	regmap_write(priv->gswip, GSWIP_SWRES, GSWIP_SWRES_R0);
664 	usleep_range(5000, 10000);
665 	regmap_write(priv->gswip, GSWIP_SWRES, 0);
666 
667 	/* disable port fetch/store dma on all ports */
668 	for (i = 0; i < priv->hw_info->max_ports; i++) {
669 		gswip_port_disable(ds, i);
670 		gswip_port_vlan_filtering(ds, i, false, NULL);
671 	}
672 
673 	/* enable Switch */
674 	regmap_set_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
675 
676 	err = gswip_pce_load_microcode(priv);
677 	if (err) {
678 		dev_err(priv->dev, "writing PCE microcode failed, %i\n", err);
679 		return err;
680 	}
681 
682 	/* Default unknown Broadcast/Multicast/Unicast port maps */
683 	regmap_write(priv->gswip, GSWIP_PCE_PMAP1, cpu_ports);
684 	regmap_write(priv->gswip, GSWIP_PCE_PMAP2, cpu_ports);
685 	regmap_write(priv->gswip, GSWIP_PCE_PMAP3, cpu_ports);
686 
687 	/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
688 	 * interoperability problem with this auto polling mechanism because
689 	 * their status registers think that the link is in a different state
690 	 * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
691 	 * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
692 	 * auto polling state machine consider the link being negotiated with
693 	 * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
694 	 * to the switch port being completely dead (RX and TX are both not
695 	 * working).
696 	 * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
697 	 * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
698 	 * it would work fine for a few minutes to hours and then stop, on
699 	 * other device it would no traffic could be sent or received at all.
700 	 * Testing shows that when PHY auto polling is disabled these problems
701 	 * go away.
702 	 */
703 	regmap_write(priv->mdio, GSWIP_MDIO_MDC_CFG0, 0x0);
704 
705 	/* Configure the MDIO Clock 2.5 MHz */
706 	regmap_write_bits(priv->mdio, GSWIP_MDIO_MDC_CFG1, 0xff, 0x09);
707 
708 	/* bring up the mdio bus */
709 	err = gswip_mdio(priv);
710 	if (err) {
711 		dev_err(priv->dev, "mdio bus setup failed\n");
712 		return err;
713 	}
714 
715 	/* Disable the xMII interface and clear it's isolation bit */
716 	for (i = 0; i < priv->hw_info->max_ports; i++)
717 		gswip_mii_mask_cfg(priv,
718 				   GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
719 				   0, i);
720 
721 	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
722 		/* enable special tag insertion on cpu port */
723 		regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(cpu_dp->index),
724 				GSWIP_FDMA_PCTRL_STEN);
725 
726 		/* accept special tag in ingress direction */
727 		regmap_set_bits(priv->gswip,
728 				GSWIP_PCE_PCTRL_0p(cpu_dp->index),
729 				GSWIP_PCE_PCTRL_0_INGRESS);
730 	}
731 
732 	regmap_set_bits(priv->gswip, GSWIP_BM_QUEUE_GCTRL,
733 			GSWIP_BM_QUEUE_GCTRL_GL_MOD);
734 
735 	/* VLAN aware Switching */
736 	regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
737 			GSWIP_PCE_GCTRL_0_VLAN);
738 
739 	/* Flush MAC Table */
740 	regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
741 			GSWIP_PCE_GCTRL_0_MTFL);
742 
743 	err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
744 				     GSWIP_PCE_GCTRL_0_MTFL);
745 	if (err) {
746 		dev_err(priv->dev, "MAC flushing didn't finish\n");
747 		return err;
748 	}
749 
750 	ds->mtu_enforcement_ingress = true;
751 
752 	return 0;
753 }
754 
gswip_teardown(struct dsa_switch * ds)755 static void gswip_teardown(struct dsa_switch *ds)
756 {
757 	struct gswip_priv *priv = ds->priv;
758 
759 	regmap_clear_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
760 }
761 
gswip_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mp)762 static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
763 						    int port,
764 						    enum dsa_tag_protocol mp)
765 {
766 	struct gswip_priv *priv = ds->priv;
767 
768 	return priv->hw_info->tag_protocol;
769 }
770 
gswip_vlan_active_create(struct gswip_priv * priv,struct net_device * bridge,int fid,u16 vid)771 static int gswip_vlan_active_create(struct gswip_priv *priv,
772 				    struct net_device *bridge,
773 				    int fid, u16 vid)
774 {
775 	struct gswip_pce_table_entry vlan_active = {0,};
776 	unsigned int max_ports = priv->hw_info->max_ports;
777 	int idx = -1;
778 	int err;
779 	int i;
780 
781 	/* Look for a free slot */
782 	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
783 		if (!priv->vlans[i].bridge) {
784 			idx = i;
785 			break;
786 		}
787 	}
788 
789 	if (idx == -1)
790 		return -ENOSPC;
791 
792 	if (fid == -1)
793 		fid = idx;
794 
795 	vlan_active.index = idx;
796 	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
797 	vlan_active.key[0] = vid;
798 	vlan_active.val[0] = fid;
799 	vlan_active.valid = true;
800 
801 	err = gswip_pce_table_entry_write(priv, &vlan_active);
802 	if (err) {
803 		dev_err(priv->dev, "failed to write active VLAN: %d\n",	err);
804 		return err;
805 	}
806 
807 	priv->vlans[idx].bridge = bridge;
808 	priv->vlans[idx].vid = vid;
809 	priv->vlans[idx].fid = fid;
810 
811 	return idx;
812 }
813 
gswip_vlan_active_remove(struct gswip_priv * priv,int idx)814 static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
815 {
816 	struct gswip_pce_table_entry vlan_active = {0,};
817 	int err;
818 
819 	vlan_active.index = idx;
820 	vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
821 	vlan_active.valid = false;
822 	err = gswip_pce_table_entry_write(priv, &vlan_active);
823 	if (err)
824 		dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
825 	priv->vlans[idx].bridge = NULL;
826 
827 	return err;
828 }
829 
gswip_vlan_add(struct gswip_priv * priv,struct net_device * bridge,int port,u16 vid,bool untagged,bool pvid,bool vlan_aware)830 static int gswip_vlan_add(struct gswip_priv *priv, struct net_device *bridge,
831 			  int port, u16 vid, bool untagged, bool pvid,
832 			  bool vlan_aware)
833 {
834 	struct gswip_pce_table_entry vlan_mapping = {0,};
835 	unsigned int max_ports = priv->hw_info->max_ports;
836 	unsigned int cpu_ports = dsa_cpu_ports(priv->ds);
837 	bool active_vlan_created = false;
838 	int fid = -1, idx = -1;
839 	int i, err;
840 
841 	/* Check if there is already a page for this bridge */
842 	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
843 		if (priv->vlans[i].bridge == bridge) {
844 			if (vlan_aware) {
845 				if (fid != -1 && fid != priv->vlans[i].fid)
846 					dev_err(priv->dev, "one bridge with multiple flow ids\n");
847 				fid = priv->vlans[i].fid;
848 			}
849 			if (priv->vlans[i].vid == vid) {
850 				idx = i;
851 				break;
852 			}
853 		}
854 	}
855 
856 	/* If this bridge is not programmed yet, add a Active VLAN table
857 	 * entry in a free slot and prepare the VLAN mapping table entry.
858 	 */
859 	if (idx == -1) {
860 		idx = gswip_vlan_active_create(priv, bridge, fid, vid);
861 		if (idx < 0)
862 			return idx;
863 		active_vlan_created = true;
864 
865 		vlan_mapping.index = idx;
866 		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
867 	} else {
868 		/* Read the existing VLAN mapping entry from the switch */
869 		vlan_mapping.index = idx;
870 		vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
871 		err = gswip_pce_table_entry_read(priv, &vlan_mapping);
872 		if (err) {
873 			dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
874 				err);
875 			return err;
876 		}
877 	}
878 
879 	/* VLAN ID byte, maps to the VLAN ID of vlan active table */
880 	vlan_mapping.val[0] = vid;
881 	/* Update the VLAN mapping entry and write it to the switch */
882 	vlan_mapping.val[1] |= cpu_ports;
883 	vlan_mapping.val[1] |= BIT(port);
884 	if (vlan_aware)
885 		vlan_mapping.val[2] |= cpu_ports;
886 	if (untagged)
887 		vlan_mapping.val[2] &= ~BIT(port);
888 	else
889 		vlan_mapping.val[2] |= BIT(port);
890 	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
891 	if (err) {
892 		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
893 		/* In case an Active VLAN was creaetd delete it again */
894 		if (active_vlan_created)
895 			gswip_vlan_active_remove(priv, idx);
896 		return err;
897 	}
898 
899 	gswip_port_commit_pvid(priv, port);
900 
901 	return 0;
902 }
903 
gswip_vlan_remove(struct gswip_priv * priv,struct net_device * bridge,int port,u16 vid)904 static int gswip_vlan_remove(struct gswip_priv *priv,
905 			     struct net_device *bridge, int port,
906 			     u16 vid)
907 {
908 	struct gswip_pce_table_entry vlan_mapping = {0,};
909 	unsigned int max_ports = priv->hw_info->max_ports;
910 	int idx = -1;
911 	int i;
912 	int err;
913 
914 	/* Check if there is already a page for this bridge */
915 	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
916 		if (priv->vlans[i].bridge == bridge &&
917 		    priv->vlans[i].vid == vid) {
918 			idx = i;
919 			break;
920 		}
921 	}
922 
923 	if (idx == -1) {
924 		dev_err(priv->dev, "Port %d cannot find VID %u of bridge %s\n",
925 			port, vid, bridge ? bridge->name : "(null)");
926 		return -ENOENT;
927 	}
928 
929 	vlan_mapping.index = idx;
930 	vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
931 	err = gswip_pce_table_entry_read(priv, &vlan_mapping);
932 	if (err) {
933 		dev_err(priv->dev, "failed to read VLAN mapping: %d\n",	err);
934 		return err;
935 	}
936 
937 	vlan_mapping.val[1] &= ~BIT(port);
938 	vlan_mapping.val[2] &= ~BIT(port);
939 	err = gswip_pce_table_entry_write(priv, &vlan_mapping);
940 	if (err) {
941 		dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
942 		return err;
943 	}
944 
945 	/* In case all ports are removed from the bridge, remove the VLAN */
946 	if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) {
947 		err = gswip_vlan_active_remove(priv, idx);
948 		if (err) {
949 			dev_err(priv->dev, "failed to write active VLAN: %d\n",
950 				err);
951 			return err;
952 		}
953 	}
954 
955 	gswip_port_commit_pvid(priv, port);
956 
957 	return 0;
958 }
959 
gswip_port_bridge_join(struct dsa_switch * ds,int port,struct dsa_bridge bridge,bool * tx_fwd_offload,struct netlink_ext_ack * extack)960 static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
961 				  struct dsa_bridge bridge,
962 				  bool *tx_fwd_offload,
963 				  struct netlink_ext_ack *extack)
964 {
965 	struct net_device *br = bridge.dev;
966 	struct gswip_priv *priv = ds->priv;
967 	int err;
968 
969 	/* Set up the VLAN for VLAN-unaware bridging for this port, and remove
970 	 * it from the "single-port bridge" through which it was operating as
971 	 * standalone.
972 	 */
973 	err = gswip_vlan_add(priv, br, port, GSWIP_VLAN_UNAWARE_PVID,
974 			     true, true, false);
975 	if (err)
976 		return err;
977 
978 	return gswip_add_single_port_br(priv, port, false);
979 }
980 
gswip_port_bridge_leave(struct dsa_switch * ds,int port,struct dsa_bridge bridge)981 static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
982 				    struct dsa_bridge bridge)
983 {
984 	struct net_device *br = bridge.dev;
985 	struct gswip_priv *priv = ds->priv;
986 
987 	/* Add the port back to the "single-port bridge", and remove it from
988 	 * the VLAN-unaware PVID created for this bridge.
989 	 */
990 	gswip_add_single_port_br(priv, port, true);
991 	gswip_vlan_remove(priv, br, port, GSWIP_VLAN_UNAWARE_PVID);
992 }
993 
gswip_port_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)994 static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
995 				   const struct switchdev_obj_port_vlan *vlan,
996 				   struct netlink_ext_ack *extack)
997 {
998 	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
999 	struct gswip_priv *priv = ds->priv;
1000 	unsigned int max_ports = priv->hw_info->max_ports;
1001 	int pos = max_ports;
1002 	int i, idx = -1;
1003 
1004 	/* We only support VLAN filtering on bridges */
1005 	if (!dsa_is_cpu_port(ds, port) && !bridge)
1006 		return -EOPNOTSUPP;
1007 
1008 	/* Check if there is already a page for this VLAN */
1009 	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1010 		if (priv->vlans[i].bridge == bridge &&
1011 		    priv->vlans[i].vid == vlan->vid) {
1012 			idx = i;
1013 			break;
1014 		}
1015 	}
1016 
1017 	/* If this VLAN is not programmed yet, we have to reserve
1018 	 * one entry in the VLAN table. Make sure we start at the
1019 	 * next position round.
1020 	 */
1021 	if (idx == -1) {
1022 		/* Look for a free slot */
1023 		for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
1024 			if (!priv->vlans[pos].bridge) {
1025 				idx = pos;
1026 				pos++;
1027 				break;
1028 			}
1029 		}
1030 
1031 		if (idx == -1) {
1032 			NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
1033 			return -ENOSPC;
1034 		}
1035 	}
1036 
1037 	return 0;
1038 }
1039 
gswip_port_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)1040 static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
1041 			       const struct switchdev_obj_port_vlan *vlan,
1042 			       struct netlink_ext_ack *extack)
1043 {
1044 	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1045 	struct gswip_priv *priv = ds->priv;
1046 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1047 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1048 	int err;
1049 
1050 	if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID)
1051 		return 0;
1052 
1053 	err = gswip_port_vlan_prepare(ds, port, vlan, extack);
1054 	if (err)
1055 		return err;
1056 
1057 	/* We have to receive all packets on the CPU port and should not
1058 	 * do any VLAN filtering here. This is also called with bridge
1059 	 * NULL and then we do not know for which bridge to configure
1060 	 * this.
1061 	 */
1062 	if (dsa_is_cpu_port(ds, port))
1063 		return 0;
1064 
1065 	return gswip_vlan_add(priv, bridge, port, vlan->vid, untagged, pvid,
1066 			      true);
1067 }
1068 
gswip_port_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)1069 static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
1070 			       const struct switchdev_obj_port_vlan *vlan)
1071 {
1072 	struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
1073 	struct gswip_priv *priv = ds->priv;
1074 
1075 	if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID)
1076 		return 0;
1077 
1078 	/* We have to receive all packets on the CPU port and should not
1079 	 * do any VLAN filtering here. This is also called with bridge
1080 	 * NULL and then we do not know for which bridge to configure
1081 	 * this.
1082 	 */
1083 	if (dsa_is_cpu_port(ds, port))
1084 		return 0;
1085 
1086 	return gswip_vlan_remove(priv, bridge, port, vlan->vid);
1087 }
1088 
gswip_port_fast_age(struct dsa_switch * ds,int port)1089 static void gswip_port_fast_age(struct dsa_switch *ds, int port)
1090 {
1091 	struct gswip_priv *priv = ds->priv;
1092 	struct gswip_pce_table_entry mac_bridge = {0,};
1093 	int i;
1094 	int err;
1095 
1096 	for (i = 0; i < 2048; i++) {
1097 		mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1098 		mac_bridge.index = i;
1099 
1100 		err = gswip_pce_table_entry_read(priv, &mac_bridge);
1101 		if (err) {
1102 			dev_err(priv->dev, "failed to read mac bridge: %d\n",
1103 				err);
1104 			return;
1105 		}
1106 
1107 		if (!mac_bridge.valid)
1108 			continue;
1109 
1110 		if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC)
1111 			continue;
1112 
1113 		if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
1114 				      mac_bridge.val[0]))
1115 			continue;
1116 
1117 		mac_bridge.valid = false;
1118 		err = gswip_pce_table_entry_write(priv, &mac_bridge);
1119 		if (err) {
1120 			dev_err(priv->dev, "failed to write mac bridge: %d\n",
1121 				err);
1122 			return;
1123 		}
1124 	}
1125 }
1126 
gswip_port_stp_state_set(struct dsa_switch * ds,int port,u8 state)1127 static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1128 {
1129 	struct gswip_priv *priv = ds->priv;
1130 	u32 stp_state;
1131 
1132 	switch (state) {
1133 	case BR_STATE_DISABLED:
1134 		regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
1135 				  GSWIP_SDMA_PCTRL_EN);
1136 		return;
1137 	case BR_STATE_BLOCKING:
1138 	case BR_STATE_LISTENING:
1139 		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
1140 		break;
1141 	case BR_STATE_LEARNING:
1142 		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
1143 		break;
1144 	case BR_STATE_FORWARDING:
1145 		stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
1146 		break;
1147 	default:
1148 		dev_err(priv->dev, "invalid STP state: %d\n", state);
1149 		return;
1150 	}
1151 
1152 	regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
1153 			GSWIP_SDMA_PCTRL_EN);
1154 	regmap_write_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
1155 			  GSWIP_PCE_PCTRL_0_PSTATE_MASK,
1156 			  stp_state);
1157 }
1158 
gswip_port_fdb(struct dsa_switch * ds,int port,struct net_device * bridge,const unsigned char * addr,u16 vid,bool add)1159 static int gswip_port_fdb(struct dsa_switch *ds, int port,
1160 			  struct net_device *bridge, const unsigned char *addr,
1161 			  u16 vid, bool add)
1162 {
1163 	struct gswip_priv *priv = ds->priv;
1164 	struct gswip_pce_table_entry mac_bridge = {0,};
1165 	unsigned int max_ports = priv->hw_info->max_ports;
1166 	int fid = -1;
1167 	int i;
1168 	int err;
1169 
1170 	for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
1171 		if (priv->vlans[i].bridge == bridge) {
1172 			fid = priv->vlans[i].fid;
1173 			break;
1174 		}
1175 	}
1176 
1177 	if (fid == -1) {
1178 		dev_err(priv->dev, "no FID found for bridge %s\n",
1179 			bridge->name);
1180 		return -EINVAL;
1181 	}
1182 
1183 	mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1184 	mac_bridge.key_mode = true;
1185 	mac_bridge.key[0] = addr[5] | (addr[4] << 8);
1186 	mac_bridge.key[1] = addr[3] | (addr[2] << 8);
1187 	mac_bridge.key[2] = addr[1] | (addr[0] << 8);
1188 	mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid);
1189 	mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
1190 	if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2_ETC))
1191 		mac_bridge.val[1] = add ? (GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC |
1192 					   GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID) : 0;
1193 	else
1194 		mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC;
1195 
1196 	mac_bridge.valid = add;
1197 
1198 	err = gswip_pce_table_entry_write(priv, &mac_bridge);
1199 	if (err)
1200 		dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
1201 
1202 	return err;
1203 }
1204 
gswip_port_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1205 static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
1206 			      const unsigned char *addr, u16 vid,
1207 			      struct dsa_db db)
1208 {
1209 	if (db.type != DSA_DB_BRIDGE)
1210 		return -EOPNOTSUPP;
1211 
1212 	return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, true);
1213 }
1214 
gswip_port_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid,struct dsa_db db)1215 static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
1216 			      const unsigned char *addr, u16 vid,
1217 			      struct dsa_db db)
1218 {
1219 	if (db.type != DSA_DB_BRIDGE)
1220 		return -EOPNOTSUPP;
1221 
1222 	return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, false);
1223 }
1224 
gswip_port_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)1225 static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
1226 			       dsa_fdb_dump_cb_t *cb, void *data)
1227 {
1228 	struct gswip_priv *priv = ds->priv;
1229 	struct gswip_pce_table_entry mac_bridge = {0,};
1230 	unsigned char addr[ETH_ALEN];
1231 	int i;
1232 	int err;
1233 
1234 	for (i = 0; i < 2048; i++) {
1235 		mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
1236 		mac_bridge.index = i;
1237 
1238 		err = gswip_pce_table_entry_read(priv, &mac_bridge);
1239 		if (err) {
1240 			dev_err(priv->dev,
1241 				"failed to read mac bridge entry %d: %d\n",
1242 				i, err);
1243 			return err;
1244 		}
1245 
1246 		if (!mac_bridge.valid)
1247 			continue;
1248 
1249 		addr[5] = mac_bridge.key[0] & 0xff;
1250 		addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
1251 		addr[3] = mac_bridge.key[1] & 0xff;
1252 		addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
1253 		addr[1] = mac_bridge.key[2] & 0xff;
1254 		addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
1255 		if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) {
1256 			if (mac_bridge.val[0] & BIT(port)) {
1257 				err = cb(addr, 0, true, data);
1258 				if (err)
1259 					return err;
1260 			}
1261 		} else {
1262 			if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
1263 					      mac_bridge.val[0])) {
1264 				err = cb(addr, 0, false, data);
1265 				if (err)
1266 					return err;
1267 			}
1268 		}
1269 	}
1270 	return 0;
1271 }
1272 
gswip_port_max_mtu(struct dsa_switch * ds,int port)1273 static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
1274 {
1275 	/* Includes 8 bytes for special header. */
1276 	return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
1277 }
1278 
gswip_port_change_mtu(struct dsa_switch * ds,int port,int new_mtu)1279 static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1280 {
1281 	struct gswip_priv *priv = ds->priv;
1282 
1283 	/* CPU port always has maximum mtu of user ports, so use it to set
1284 	 * switch frame size, including 8 byte special header.
1285 	 */
1286 	if (dsa_is_cpu_port(ds, port)) {
1287 		new_mtu += 8;
1288 		regmap_write(priv->gswip, GSWIP_MAC_FLEN,
1289 			     VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN);
1290 	}
1291 
1292 	/* Enable MLEN for ports with non-standard MTUs, including the special
1293 	 * header on the CPU port added above.
1294 	 */
1295 	if (new_mtu != ETH_DATA_LEN)
1296 		regmap_set_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port),
1297 				GSWIP_MAC_CTRL_2_MLEN);
1298 	else
1299 		regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port),
1300 				  GSWIP_MAC_CTRL_2_MLEN);
1301 
1302 	return 0;
1303 }
1304 
gswip_phylink_get_caps(struct dsa_switch * ds,int port,struct phylink_config * config)1305 static void gswip_phylink_get_caps(struct dsa_switch *ds, int port,
1306 				   struct phylink_config *config)
1307 {
1308 	struct gswip_priv *priv = ds->priv;
1309 
1310 	priv->hw_info->phylink_get_caps(ds, port, config);
1311 }
1312 
gswip_port_set_link(struct gswip_priv * priv,int port,bool link)1313 static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
1314 {
1315 	u32 mdio_phy;
1316 
1317 	if (link)
1318 		mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
1319 	else
1320 		mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
1321 
1322 	regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
1323 			  GSWIP_MDIO_PHY_LINK_MASK, mdio_phy);
1324 }
1325 
gswip_port_set_speed(struct gswip_priv * priv,int port,int speed,phy_interface_t interface)1326 static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
1327 				 phy_interface_t interface)
1328 {
1329 	u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
1330 
1331 	switch (speed) {
1332 	case SPEED_10:
1333 		mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
1334 
1335 		if (interface == PHY_INTERFACE_MODE_RMII)
1336 			mii_cfg = GSWIP_MII_CFG_RATE_M50;
1337 		else
1338 			mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
1339 
1340 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
1341 		break;
1342 
1343 	case SPEED_100:
1344 		mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
1345 
1346 		if (interface == PHY_INTERFACE_MODE_RMII)
1347 			mii_cfg = GSWIP_MII_CFG_RATE_M50;
1348 		else
1349 			mii_cfg = GSWIP_MII_CFG_RATE_M25;
1350 
1351 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
1352 		break;
1353 
1354 	case SPEED_1000:
1355 		mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
1356 
1357 		mii_cfg = GSWIP_MII_CFG_RATE_M125;
1358 
1359 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
1360 		break;
1361 	}
1362 
1363 	regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
1364 			  GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy);
1365 	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
1366 	regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
1367 			  GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0);
1368 }
1369 
gswip_port_set_duplex(struct gswip_priv * priv,int port,int duplex)1370 static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
1371 {
1372 	u32 mac_ctrl_0, mdio_phy;
1373 
1374 	if (duplex == DUPLEX_FULL) {
1375 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
1376 		mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
1377 	} else {
1378 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
1379 		mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
1380 	}
1381 
1382 	regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
1383 			  GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0);
1384 	regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
1385 			  GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy);
1386 }
1387 
gswip_port_set_pause(struct gswip_priv * priv,int port,bool tx_pause,bool rx_pause)1388 static void gswip_port_set_pause(struct gswip_priv *priv, int port,
1389 				 bool tx_pause, bool rx_pause)
1390 {
1391 	u32 mac_ctrl_0, mdio_phy;
1392 
1393 	if (tx_pause && rx_pause) {
1394 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
1395 		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
1396 			   GSWIP_MDIO_PHY_FCONRX_EN;
1397 	} else if (tx_pause) {
1398 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
1399 		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
1400 			   GSWIP_MDIO_PHY_FCONRX_DIS;
1401 	} else if (rx_pause) {
1402 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
1403 		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
1404 			   GSWIP_MDIO_PHY_FCONRX_EN;
1405 	} else {
1406 		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
1407 		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
1408 			   GSWIP_MDIO_PHY_FCONRX_DIS;
1409 	}
1410 
1411 	regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
1412 			  GSWIP_MAC_CTRL_0_FCON_MASK, mac_ctrl_0);
1413 	regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
1414 			  GSWIP_MDIO_PHY_FCONTX_MASK | GSWIP_MDIO_PHY_FCONRX_MASK,
1415 			  mdio_phy);
1416 }
1417 
gswip_phylink_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1418 static void gswip_phylink_mac_config(struct phylink_config *config,
1419 				     unsigned int mode,
1420 				     const struct phylink_link_state *state)
1421 {
1422 	struct dsa_port *dp = dsa_phylink_to_port(config);
1423 	struct gswip_priv *priv = dp->ds->priv;
1424 	int port = dp->index;
1425 	u32 miicfg = 0;
1426 
1427 	miicfg |= GSWIP_MII_CFG_LDCLKDIS;
1428 
1429 	switch (state->interface) {
1430 	case PHY_INTERFACE_MODE_SGMII:
1431 	case PHY_INTERFACE_MODE_1000BASEX:
1432 	case PHY_INTERFACE_MODE_2500BASEX:
1433 		return;
1434 	case PHY_INTERFACE_MODE_MII:
1435 	case PHY_INTERFACE_MODE_INTERNAL:
1436 		miicfg |= GSWIP_MII_CFG_MODE_MIIM;
1437 		break;
1438 	case PHY_INTERFACE_MODE_REVMII:
1439 		miicfg |= GSWIP_MII_CFG_MODE_MIIP;
1440 		break;
1441 	case PHY_INTERFACE_MODE_RMII:
1442 		miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
1443 		if (of_property_read_bool(dp->dn, "maxlinear,rmii-refclk-out"))
1444 			miicfg |= GSWIP_MII_CFG_RMII_CLK;
1445 		break;
1446 	case PHY_INTERFACE_MODE_RGMII:
1447 	case PHY_INTERFACE_MODE_RGMII_ID:
1448 	case PHY_INTERFACE_MODE_RGMII_RXID:
1449 	case PHY_INTERFACE_MODE_RGMII_TXID:
1450 		miicfg |= GSWIP_MII_CFG_MODE_RGMII;
1451 		break;
1452 	case PHY_INTERFACE_MODE_GMII:
1453 		miicfg |= GSWIP_MII_CFG_MODE_GMII;
1454 		break;
1455 	default:
1456 		dev_err(dp->ds->dev,
1457 			"Unsupported interface: %d\n", state->interface);
1458 		return;
1459 	}
1460 
1461 	gswip_mii_mask_cfg(priv,
1462 			   GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
1463 			   GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
1464 			   miicfg, port);
1465 
1466 	gswip_mii_delay_setup(priv, dp, state->interface);
1467 }
1468 
gswip_phylink_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1469 static void gswip_phylink_mac_link_down(struct phylink_config *config,
1470 					unsigned int mode,
1471 					phy_interface_t interface)
1472 {
1473 	struct dsa_port *dp = dsa_phylink_to_port(config);
1474 	struct gswip_priv *priv = dp->ds->priv;
1475 	int port = dp->index;
1476 
1477 	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
1478 
1479 	if (!dsa_port_is_cpu(dp))
1480 		gswip_port_set_link(priv, port, false);
1481 }
1482 
gswip_phylink_mac_link_up(struct phylink_config * config,struct phy_device * phydev,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1483 static void gswip_phylink_mac_link_up(struct phylink_config *config,
1484 				      struct phy_device *phydev,
1485 				      unsigned int mode,
1486 				      phy_interface_t interface,
1487 				      int speed, int duplex,
1488 				      bool tx_pause, bool rx_pause)
1489 {
1490 	struct dsa_port *dp = dsa_phylink_to_port(config);
1491 	struct gswip_priv *priv = dp->ds->priv;
1492 	int port = dp->index;
1493 
1494 	if (!dsa_port_is_cpu(dp) || interface != PHY_INTERFACE_MODE_INTERNAL) {
1495 		gswip_port_set_link(priv, port, true);
1496 		gswip_port_set_speed(priv, port, speed, interface);
1497 		gswip_port_set_duplex(priv, port, duplex);
1498 		gswip_port_set_pause(priv, port, tx_pause, rx_pause);
1499 	}
1500 
1501 	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, GSWIP_MII_CFG_EN, port);
1502 }
1503 
gswip_get_strings(struct dsa_switch * ds,int port,u32 stringset,uint8_t * data)1504 static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
1505 			      uint8_t *data)
1506 {
1507 	int i;
1508 
1509 	if (stringset != ETH_SS_STATS)
1510 		return;
1511 
1512 	for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
1513 		ethtool_puts(&data, gswip_rmon_cnt[i].name);
1514 }
1515 
gswip_bcm_ram_entry_read(struct gswip_priv * priv,u32 table,u32 index)1516 static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
1517 				    u32 index)
1518 {
1519 	u32 result, val;
1520 	int err;
1521 
1522 	regmap_write(priv->gswip, GSWIP_BM_RAM_ADDR, index);
1523 	regmap_write_bits(priv->gswip, GSWIP_BM_RAM_CTRL,
1524 			  GSWIP_BM_RAM_CTRL_ADDR_MASK | GSWIP_BM_RAM_CTRL_OPMOD |
1525 			  GSWIP_BM_RAM_CTRL_BAS,
1526 			  table | GSWIP_BM_RAM_CTRL_BAS);
1527 
1528 	err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
1529 				     GSWIP_BM_RAM_CTRL_BAS);
1530 	if (err) {
1531 		dev_err(priv->dev, "timeout while reading table: %u, index: %u\n",
1532 			table, index);
1533 		return 0;
1534 	}
1535 
1536 	regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(0), &result);
1537 	regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(1), &val);
1538 	result |= val << 16;
1539 
1540 	return result;
1541 }
1542 
gswip_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)1543 static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
1544 				    uint64_t *data)
1545 {
1546 	struct gswip_priv *priv = ds->priv;
1547 	const struct gswip_rmon_cnt_desc *rmon_cnt;
1548 	int i;
1549 	u64 high;
1550 
1551 	for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
1552 		rmon_cnt = &gswip_rmon_cnt[i];
1553 
1554 		data[i] = gswip_bcm_ram_entry_read(priv, port,
1555 						   rmon_cnt->offset);
1556 		if (rmon_cnt->size == 2) {
1557 			high = gswip_bcm_ram_entry_read(priv, port,
1558 							rmon_cnt->offset + 1);
1559 			data[i] |= high << 32;
1560 		}
1561 	}
1562 }
1563 
gswip_get_sset_count(struct dsa_switch * ds,int port,int sset)1564 static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
1565 {
1566 	if (sset != ETH_SS_STATS)
1567 		return 0;
1568 
1569 	return ARRAY_SIZE(gswip_rmon_cnt);
1570 }
1571 
gswip_set_mac_eee(struct dsa_switch * ds,int port,struct ethtool_keee * e)1572 static int gswip_set_mac_eee(struct dsa_switch *ds, int port,
1573 			     struct ethtool_keee *e)
1574 {
1575 	if (e->tx_lpi_timer > 0x7f)
1576 		return -EINVAL;
1577 
1578 	return 0;
1579 }
1580 
gswip_phylink_mac_disable_tx_lpi(struct phylink_config * config)1581 static void gswip_phylink_mac_disable_tx_lpi(struct phylink_config *config)
1582 {
1583 	struct dsa_port *dp = dsa_phylink_to_port(config);
1584 	struct gswip_priv *priv = dp->ds->priv;
1585 
1586 	regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index),
1587 			  GSWIP_MAC_CTRL_4_LPIEN);
1588 }
1589 
gswip_phylink_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clock_stop)1590 static int gswip_phylink_mac_enable_tx_lpi(struct phylink_config *config,
1591 					   u32 timer, bool tx_clock_stop)
1592 {
1593 	struct dsa_port *dp = dsa_phylink_to_port(config);
1594 	struct gswip_priv *priv = dp->ds->priv;
1595 
1596 	return regmap_update_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index),
1597 				  GSWIP_MAC_CTRL_4_LPIEN |
1598 				  GSWIP_MAC_CTRL_4_GWAIT_MASK |
1599 				  GSWIP_MAC_CTRL_4_WAIT_MASK,
1600 				  GSWIP_MAC_CTRL_4_LPIEN |
1601 				  GSWIP_MAC_CTRL_4_GWAIT(timer) |
1602 				  GSWIP_MAC_CTRL_4_WAIT(timer));
1603 }
1604 
gswip_support_eee(struct dsa_switch * ds,int port)1605 static bool gswip_support_eee(struct dsa_switch *ds, int port)
1606 {
1607 	struct gswip_priv *priv = ds->priv;
1608 
1609 	if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
1610 		return true;
1611 
1612 	return false;
1613 }
1614 
gswip_phylink_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)1615 static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config,
1616 							phy_interface_t interface)
1617 {
1618 	struct dsa_port *dp = dsa_phylink_to_port(config);
1619 	struct gswip_priv *priv = dp->ds->priv;
1620 
1621 	if (priv->hw_info->mac_select_pcs)
1622 		return priv->hw_info->mac_select_pcs(config, interface);
1623 
1624 	return NULL;
1625 }
1626 
1627 static const struct phylink_mac_ops gswip_phylink_mac_ops = {
1628 	.mac_config		= gswip_phylink_mac_config,
1629 	.mac_link_down		= gswip_phylink_mac_link_down,
1630 	.mac_link_up		= gswip_phylink_mac_link_up,
1631 	.mac_disable_tx_lpi	= gswip_phylink_mac_disable_tx_lpi,
1632 	.mac_enable_tx_lpi	= gswip_phylink_mac_enable_tx_lpi,
1633 	.mac_select_pcs		= gswip_phylink_mac_select_pcs,
1634 };
1635 
1636 static const struct dsa_switch_ops gswip_switch_ops = {
1637 	.get_tag_protocol	= gswip_get_tag_protocol,
1638 	.setup			= gswip_setup,
1639 	.teardown		= gswip_teardown,
1640 	.port_setup		= gswip_port_setup,
1641 	.port_enable		= gswip_port_enable,
1642 	.port_disable		= gswip_port_disable,
1643 	.port_pre_bridge_flags	= gswip_port_pre_bridge_flags,
1644 	.port_bridge_flags	= gswip_port_bridge_flags,
1645 	.port_bridge_join	= gswip_port_bridge_join,
1646 	.port_bridge_leave	= gswip_port_bridge_leave,
1647 	.port_fast_age		= gswip_port_fast_age,
1648 	.port_vlan_filtering	= gswip_port_vlan_filtering,
1649 	.port_vlan_add		= gswip_port_vlan_add,
1650 	.port_vlan_del		= gswip_port_vlan_del,
1651 	.port_stp_state_set	= gswip_port_stp_state_set,
1652 	.port_fdb_add		= gswip_port_fdb_add,
1653 	.port_fdb_del		= gswip_port_fdb_del,
1654 	.port_fdb_dump		= gswip_port_fdb_dump,
1655 	.port_change_mtu	= gswip_port_change_mtu,
1656 	.port_max_mtu		= gswip_port_max_mtu,
1657 	.phylink_get_caps	= gswip_phylink_get_caps,
1658 	.get_strings		= gswip_get_strings,
1659 	.get_ethtool_stats	= gswip_get_ethtool_stats,
1660 	.get_sset_count		= gswip_get_sset_count,
1661 	.set_mac_eee		= gswip_set_mac_eee,
1662 	.support_eee		= gswip_support_eee,
1663 	.port_hsr_join		= dsa_port_simple_hsr_join,
1664 	.port_hsr_leave		= dsa_port_simple_hsr_leave,
1665 };
1666 
gswip_validate_cpu_port(struct dsa_switch * ds)1667 static int gswip_validate_cpu_port(struct dsa_switch *ds)
1668 {
1669 	struct gswip_priv *priv = ds->priv;
1670 	struct dsa_port *cpu_dp;
1671 	int cpu_port = -1;
1672 
1673 	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
1674 		if (cpu_port != -1)
1675 			return dev_err_probe(ds->dev, -EINVAL,
1676 					     "only a single CPU port is supported\n");
1677 
1678 		cpu_port = cpu_dp->index;
1679 	}
1680 
1681 	if (cpu_port == -1)
1682 		return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n");
1683 
1684 	if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports)
1685 		return dev_err_probe(ds->dev, -EINVAL,
1686 				     "unsupported CPU port defined\n");
1687 
1688 	return 0;
1689 }
1690 
gswip_probe_common(struct gswip_priv * priv,u32 version)1691 int gswip_probe_common(struct gswip_priv *priv, u32 version)
1692 {
1693 	int err;
1694 
1695 	mutex_init(&priv->pce_table_lock);
1696 
1697 	priv->ds = devm_kzalloc(priv->dev, sizeof(*priv->ds), GFP_KERNEL);
1698 	if (!priv->ds)
1699 		return -ENOMEM;
1700 
1701 	priv->ds->dev = priv->dev;
1702 	priv->ds->num_ports = priv->hw_info->max_ports;
1703 	priv->ds->ops = &gswip_switch_ops;
1704 	priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
1705 	priv->ds->priv = priv;
1706 
1707 	/* The hardware has the 'major/minor' version bytes in the wrong order
1708 	 * preventing numerical comparisons. Construct a 16-bit unsigned integer
1709 	 * having the REV field as most significant byte and the MOD field as
1710 	 * least significant byte. This is effectively swapping the two bytes of
1711 	 * the version variable, but other than using swab16 it doesn't affect
1712 	 * the source variable.
1713 	 */
1714 	priv->version = GSWIP_VERSION_REV(version) << 8 |
1715 			GSWIP_VERSION_MOD(version);
1716 
1717 	err = dsa_register_switch(priv->ds);
1718 	if (err)
1719 		return dev_err_probe(priv->dev, err, "dsa switch registration failed\n");
1720 
1721 	err = gswip_validate_cpu_port(priv->ds);
1722 	if (err)
1723 		goto unregister_switch;
1724 
1725 	dev_info(priv->dev, "probed GSWIP version %lx mod %lx\n",
1726 		 GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version));
1727 
1728 	return 0;
1729 
1730 unregister_switch:
1731 	dsa_unregister_switch(priv->ds);
1732 
1733 	return err;
1734 }
1735 EXPORT_SYMBOL_GPL(gswip_probe_common);
1736 
1737 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1738 MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
1739 MODULE_DESCRIPTION("Lantiq / Intel / MaxLinear GSWIP common functions");
1740 MODULE_LICENSE("GPL");
1741