xref: /linux/drivers/net/dsa/qca/qca8k-common.c (revision 90e0d94d369d342e735a75174439482119b6c393)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4  * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5  * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2016 John Crispin <john@phrozen.org>
7  */
8 
9 #include <linux/netdevice.h>
10 #include <net/dsa.h>
11 #include <linux/if_bridge.h>
12 
13 #include "qca8k.h"
14 
15 #define MIB_DESC(_s, _o, _n)	\
16 	{			\
17 		.size = (_s),	\
18 		.offset = (_o),	\
19 		.name = (_n),	\
20 	}
21 
22 const struct qca8k_mib_desc ar8327_mib[] = {
23 	MIB_DESC(1, 0x00, "RxBroad"),
24 	MIB_DESC(1, 0x04, "RxPause"),
25 	MIB_DESC(1, 0x08, "RxMulti"),
26 	MIB_DESC(1, 0x0c, "RxFcsErr"),
27 	MIB_DESC(1, 0x10, "RxAlignErr"),
28 	MIB_DESC(1, 0x14, "RxRunt"),
29 	MIB_DESC(1, 0x18, "RxFragment"),
30 	MIB_DESC(1, 0x1c, "Rx64Byte"),
31 	MIB_DESC(1, 0x20, "Rx128Byte"),
32 	MIB_DESC(1, 0x24, "Rx256Byte"),
33 	MIB_DESC(1, 0x28, "Rx512Byte"),
34 	MIB_DESC(1, 0x2c, "Rx1024Byte"),
35 	MIB_DESC(1, 0x30, "Rx1518Byte"),
36 	MIB_DESC(1, 0x34, "RxMaxByte"),
37 	MIB_DESC(1, 0x38, "RxTooLong"),
38 	MIB_DESC(2, 0x3c, "RxGoodByte"),
39 	MIB_DESC(2, 0x44, "RxBadByte"),
40 	MIB_DESC(1, 0x4c, "RxOverFlow"),
41 	MIB_DESC(1, 0x50, "Filtered"),
42 	MIB_DESC(1, 0x54, "TxBroad"),
43 	MIB_DESC(1, 0x58, "TxPause"),
44 	MIB_DESC(1, 0x5c, "TxMulti"),
45 	MIB_DESC(1, 0x60, "TxUnderRun"),
46 	MIB_DESC(1, 0x64, "Tx64Byte"),
47 	MIB_DESC(1, 0x68, "Tx128Byte"),
48 	MIB_DESC(1, 0x6c, "Tx256Byte"),
49 	MIB_DESC(1, 0x70, "Tx512Byte"),
50 	MIB_DESC(1, 0x74, "Tx1024Byte"),
51 	MIB_DESC(1, 0x78, "Tx1518Byte"),
52 	MIB_DESC(1, 0x7c, "TxMaxByte"),
53 	MIB_DESC(1, 0x80, "TxOverSize"),
54 	MIB_DESC(2, 0x84, "TxByte"),
55 	MIB_DESC(1, 0x8c, "TxCollision"),
56 	MIB_DESC(1, 0x90, "TxAbortCol"),
57 	MIB_DESC(1, 0x94, "TxMultiCol"),
58 	MIB_DESC(1, 0x98, "TxSingleCol"),
59 	MIB_DESC(1, 0x9c, "TxExcDefer"),
60 	MIB_DESC(1, 0xa0, "TxDefer"),
61 	MIB_DESC(1, 0xa4, "TxLateCol"),
62 	MIB_DESC(1, 0xa8, "RXUnicast"),
63 	MIB_DESC(1, 0xac, "TXUnicast"),
64 };
65 
66 int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
67 {
68 	return regmap_read(priv->regmap, reg, val);
69 }
70 
71 int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
72 {
73 	return regmap_write(priv->regmap, reg, val);
74 }
75 
76 int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
77 {
78 	return regmap_update_bits(priv->regmap, reg, mask, write_val);
79 }
80 
81 static const struct regmap_range qca8k_readable_ranges[] = {
82 	regmap_reg_range(0x0000, 0x00e4), /* Global control */
83 	regmap_reg_range(0x0100, 0x0168), /* EEE control */
84 	regmap_reg_range(0x0200, 0x0270), /* Parser control */
85 	regmap_reg_range(0x0400, 0x0454), /* ACL */
86 	regmap_reg_range(0x0600, 0x0718), /* Lookup */
87 	regmap_reg_range(0x0800, 0x0b70), /* QM */
88 	regmap_reg_range(0x0c00, 0x0c80), /* PKT */
89 	regmap_reg_range(0x0e00, 0x0e98), /* L3 */
90 	regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
91 	regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
92 	regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
93 	regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
94 	regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
95 	regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
96 	regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
97 };
98 
99 const struct regmap_access_table qca8k_readable_table = {
100 	.yes_ranges = qca8k_readable_ranges,
101 	.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
102 };
103 
104 /* TODO: remove these extra ops when we can support regmap bulk read/write */
105 static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
106 {
107 	int i, count = len / sizeof(u32), ret;
108 
109 	if (priv->mgmt_master && priv->info->ops->read_eth &&
110 	    !priv->info->ops->read_eth(priv, reg, val, len))
111 		return 0;
112 
113 	for (i = 0; i < count; i++) {
114 		ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
115 		if (ret < 0)
116 			return ret;
117 	}
118 
119 	return 0;
120 }
121 
122 /* TODO: remove these extra ops when we can support regmap bulk read/write */
123 static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
124 {
125 	int i, count = len / sizeof(u32), ret;
126 	u32 tmp;
127 
128 	if (priv->mgmt_master && priv->info->ops->write_eth &&
129 	    !priv->info->ops->write_eth(priv, reg, val, len))
130 		return 0;
131 
132 	for (i = 0; i < count; i++) {
133 		tmp = val[i];
134 
135 		ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
136 		if (ret < 0)
137 			return ret;
138 	}
139 
140 	return 0;
141 }
142 
143 static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
144 {
145 	u32 val;
146 
147 	return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
148 				       QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
149 }
150 
151 static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
152 {
153 	u32 reg[3];
154 	int ret;
155 
156 	/* load the ARL table into an array */
157 	ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
158 	if (ret)
159 		return ret;
160 
161 	/* vid - 83:72 */
162 	fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
163 	/* aging - 67:64 */
164 	fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
165 	/* portmask - 54:48 */
166 	fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
167 	/* mac - 47:0 */
168 	fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
169 	fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
170 	fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
171 	fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
172 	fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
173 	fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
174 
175 	return 0;
176 }
177 
178 static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
179 			    const u8 *mac, u8 aging)
180 {
181 	u32 reg[3] = { 0 };
182 
183 	/* vid - 83:72 */
184 	reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
185 	/* aging - 67:64 */
186 	reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
187 	/* portmask - 54:48 */
188 	reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
189 	/* mac - 47:0 */
190 	reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
191 	reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
192 	reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
193 	reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
194 	reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
195 	reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
196 
197 	/* load the array into the ARL table */
198 	qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
199 }
200 
201 static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
202 			    int port)
203 {
204 	u32 reg;
205 	int ret;
206 
207 	/* Set the command and FDB index */
208 	reg = QCA8K_ATU_FUNC_BUSY;
209 	reg |= cmd;
210 	if (port >= 0) {
211 		reg |= QCA8K_ATU_FUNC_PORT_EN;
212 		reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
213 	}
214 
215 	/* Write the function register triggering the table access */
216 	ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
217 	if (ret)
218 		return ret;
219 
220 	/* wait for completion */
221 	ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
222 	if (ret)
223 		return ret;
224 
225 	/* Check for table full violation when adding an entry */
226 	if (cmd == QCA8K_FDB_LOAD) {
227 		ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
228 		if (ret < 0)
229 			return ret;
230 		if (reg & QCA8K_ATU_FUNC_FULL)
231 			return -1;
232 	}
233 
234 	return 0;
235 }
236 
237 static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
238 			  int port)
239 {
240 	int ret;
241 
242 	qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
243 	ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
244 	if (ret < 0)
245 		return ret;
246 
247 	return qca8k_fdb_read(priv, fdb);
248 }
249 
250 static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
251 			 u16 port_mask, u16 vid, u8 aging)
252 {
253 	int ret;
254 
255 	mutex_lock(&priv->reg_mutex);
256 	qca8k_fdb_write(priv, vid, port_mask, mac, aging);
257 	ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
258 	mutex_unlock(&priv->reg_mutex);
259 
260 	return ret;
261 }
262 
263 static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
264 			 u16 port_mask, u16 vid)
265 {
266 	int ret;
267 
268 	mutex_lock(&priv->reg_mutex);
269 	qca8k_fdb_write(priv, vid, port_mask, mac, 0);
270 	ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
271 	mutex_unlock(&priv->reg_mutex);
272 
273 	return ret;
274 }
275 
276 void qca8k_fdb_flush(struct qca8k_priv *priv)
277 {
278 	mutex_lock(&priv->reg_mutex);
279 	qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
280 	mutex_unlock(&priv->reg_mutex);
281 }
282 
283 static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
284 				       const u8 *mac, u16 vid)
285 {
286 	struct qca8k_fdb fdb = { 0 };
287 	int ret;
288 
289 	mutex_lock(&priv->reg_mutex);
290 
291 	qca8k_fdb_write(priv, vid, 0, mac, 0);
292 	ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
293 	if (ret < 0)
294 		goto exit;
295 
296 	ret = qca8k_fdb_read(priv, &fdb);
297 	if (ret < 0)
298 		goto exit;
299 
300 	/* Rule exist. Delete first */
301 	if (!fdb.aging) {
302 		ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
303 		if (ret)
304 			goto exit;
305 	}
306 
307 	/* Add port to fdb portmask */
308 	fdb.port_mask |= port_mask;
309 
310 	qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
311 	ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
312 
313 exit:
314 	mutex_unlock(&priv->reg_mutex);
315 	return ret;
316 }
317 
318 static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
319 				    const u8 *mac, u16 vid)
320 {
321 	struct qca8k_fdb fdb = { 0 };
322 	int ret;
323 
324 	mutex_lock(&priv->reg_mutex);
325 
326 	qca8k_fdb_write(priv, vid, 0, mac, 0);
327 	ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
328 	if (ret < 0)
329 		goto exit;
330 
331 	/* Rule doesn't exist. Why delete? */
332 	if (!fdb.aging) {
333 		ret = -EINVAL;
334 		goto exit;
335 	}
336 
337 	ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
338 	if (ret)
339 		goto exit;
340 
341 	/* Only port in the rule is this port. Don't re insert */
342 	if (fdb.port_mask == port_mask)
343 		goto exit;
344 
345 	/* Remove port from port mask */
346 	fdb.port_mask &= ~port_mask;
347 
348 	qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
349 	ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
350 
351 exit:
352 	mutex_unlock(&priv->reg_mutex);
353 	return ret;
354 }
355 
356 static int qca8k_vlan_access(struct qca8k_priv *priv,
357 			     enum qca8k_vlan_cmd cmd, u16 vid)
358 {
359 	u32 reg;
360 	int ret;
361 
362 	/* Set the command and VLAN index */
363 	reg = QCA8K_VTU_FUNC1_BUSY;
364 	reg |= cmd;
365 	reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
366 
367 	/* Write the function register triggering the table access */
368 	ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
369 	if (ret)
370 		return ret;
371 
372 	/* wait for completion */
373 	ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
374 	if (ret)
375 		return ret;
376 
377 	/* Check for table full violation when adding an entry */
378 	if (cmd == QCA8K_VLAN_LOAD) {
379 		ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
380 		if (ret < 0)
381 			return ret;
382 		if (reg & QCA8K_VTU_FUNC1_FULL)
383 			return -ENOMEM;
384 	}
385 
386 	return 0;
387 }
388 
389 static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
390 			  bool untagged)
391 {
392 	u32 reg;
393 	int ret;
394 
395 	/* We do the right thing with VLAN 0 and treat it as untagged while
396 	 * preserving the tag on egress.
397 	 */
398 	if (vid == 0)
399 		return 0;
400 
401 	mutex_lock(&priv->reg_mutex);
402 	ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
403 	if (ret < 0)
404 		goto out;
405 
406 	ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
407 	if (ret < 0)
408 		goto out;
409 	reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
410 	reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
411 	if (untagged)
412 		reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
413 	else
414 		reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
415 
416 	ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
417 	if (ret)
418 		goto out;
419 	ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
420 
421 out:
422 	mutex_unlock(&priv->reg_mutex);
423 
424 	return ret;
425 }
426 
427 static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
428 {
429 	u32 reg, mask;
430 	int ret, i;
431 	bool del;
432 
433 	mutex_lock(&priv->reg_mutex);
434 	ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
435 	if (ret < 0)
436 		goto out;
437 
438 	ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
439 	if (ret < 0)
440 		goto out;
441 	reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
442 	reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
443 
444 	/* Check if we're the last member to be removed */
445 	del = true;
446 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
447 		mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
448 
449 		if ((reg & mask) != mask) {
450 			del = false;
451 			break;
452 		}
453 	}
454 
455 	if (del) {
456 		ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
457 	} else {
458 		ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
459 		if (ret)
460 			goto out;
461 		ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
462 	}
463 
464 out:
465 	mutex_unlock(&priv->reg_mutex);
466 
467 	return ret;
468 }
469 
470 int qca8k_mib_init(struct qca8k_priv *priv)
471 {
472 	int ret;
473 
474 	mutex_lock(&priv->reg_mutex);
475 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
476 				 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
477 				 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
478 				 QCA8K_MIB_BUSY);
479 	if (ret)
480 		goto exit;
481 
482 	ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
483 	if (ret)
484 		goto exit;
485 
486 	ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
487 	if (ret)
488 		goto exit;
489 
490 	ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
491 
492 exit:
493 	mutex_unlock(&priv->reg_mutex);
494 	return ret;
495 }
496 
497 void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
498 {
499 	u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
500 
501 	/* Port 0 and 6 have no internal PHY */
502 	if (port > 0 && port < 6)
503 		mask |= QCA8K_PORT_STATUS_LINK_AUTO;
504 
505 	if (enable)
506 		regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
507 	else
508 		regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
509 }
510 
511 void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
512 		       uint8_t *data)
513 {
514 	struct qca8k_priv *priv = ds->priv;
515 	int i;
516 
517 	if (stringset != ETH_SS_STATS)
518 		return;
519 
520 	for (i = 0; i < priv->info->mib_count; i++)
521 		strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
522 			ETH_GSTRING_LEN);
523 }
524 
525 void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
526 			     uint64_t *data)
527 {
528 	struct qca8k_priv *priv = ds->priv;
529 	const struct qca8k_mib_desc *mib;
530 	u32 reg, i, val;
531 	u32 hi = 0;
532 	int ret;
533 
534 	if (priv->mgmt_master && priv->info->ops->autocast_mib &&
535 	    priv->info->ops->autocast_mib(ds, port, data) > 0)
536 		return;
537 
538 	for (i = 0; i < priv->info->mib_count; i++) {
539 		mib = &ar8327_mib[i];
540 		reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
541 
542 		ret = qca8k_read(priv, reg, &val);
543 		if (ret < 0)
544 			continue;
545 
546 		if (mib->size == 2) {
547 			ret = qca8k_read(priv, reg + 4, &hi);
548 			if (ret < 0)
549 				continue;
550 		}
551 
552 		data[i] = val;
553 		if (mib->size == 2)
554 			data[i] |= (u64)hi << 32;
555 	}
556 }
557 
558 int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
559 {
560 	struct qca8k_priv *priv = ds->priv;
561 
562 	if (sset != ETH_SS_STATS)
563 		return 0;
564 
565 	return priv->info->mib_count;
566 }
567 
568 int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
569 		      struct ethtool_eee *eee)
570 {
571 	u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
572 	struct qca8k_priv *priv = ds->priv;
573 	u32 reg;
574 	int ret;
575 
576 	mutex_lock(&priv->reg_mutex);
577 	ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
578 	if (ret < 0)
579 		goto exit;
580 
581 	if (eee->eee_enabled)
582 		reg |= lpi_en;
583 	else
584 		reg &= ~lpi_en;
585 	ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
586 
587 exit:
588 	mutex_unlock(&priv->reg_mutex);
589 	return ret;
590 }
591 
592 int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
593 		      struct ethtool_eee *e)
594 {
595 	/* Nothing to do on the port's MAC */
596 	return 0;
597 }
598 
599 void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
600 {
601 	struct qca8k_priv *priv = ds->priv;
602 	u32 stp_state;
603 
604 	switch (state) {
605 	case BR_STATE_DISABLED:
606 		stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
607 		break;
608 	case BR_STATE_BLOCKING:
609 		stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
610 		break;
611 	case BR_STATE_LISTENING:
612 		stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
613 		break;
614 	case BR_STATE_LEARNING:
615 		stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
616 		break;
617 	case BR_STATE_FORWARDING:
618 	default:
619 		stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
620 		break;
621 	}
622 
623 	qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
624 		  QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
625 }
626 
627 int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
628 			   struct dsa_bridge bridge,
629 			   bool *tx_fwd_offload,
630 			   struct netlink_ext_ack *extack)
631 {
632 	struct qca8k_priv *priv = ds->priv;
633 	int port_mask, cpu_port;
634 	int i, ret;
635 
636 	cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
637 	port_mask = BIT(cpu_port);
638 
639 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
640 		if (dsa_is_cpu_port(ds, i))
641 			continue;
642 		if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
643 			continue;
644 		/* Add this port to the portvlan mask of the other ports
645 		 * in the bridge
646 		 */
647 		ret = regmap_set_bits(priv->regmap,
648 				      QCA8K_PORT_LOOKUP_CTRL(i),
649 				      BIT(port));
650 		if (ret)
651 			return ret;
652 		if (i != port)
653 			port_mask |= BIT(i);
654 	}
655 
656 	/* Add all other ports to this ports portvlan mask */
657 	ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
658 			QCA8K_PORT_LOOKUP_MEMBER, port_mask);
659 
660 	return ret;
661 }
662 
663 void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
664 			     struct dsa_bridge bridge)
665 {
666 	struct qca8k_priv *priv = ds->priv;
667 	int cpu_port, i;
668 
669 	cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
670 
671 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
672 		if (dsa_is_cpu_port(ds, i))
673 			continue;
674 		if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
675 			continue;
676 		/* Remove this port to the portvlan mask of the other ports
677 		 * in the bridge
678 		 */
679 		regmap_clear_bits(priv->regmap,
680 				  QCA8K_PORT_LOOKUP_CTRL(i),
681 				  BIT(port));
682 	}
683 
684 	/* Set the cpu port to be the only one in the portvlan mask of
685 	 * this port
686 	 */
687 	qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
688 		  QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
689 }
690 
691 void qca8k_port_fast_age(struct dsa_switch *ds, int port)
692 {
693 	struct qca8k_priv *priv = ds->priv;
694 
695 	mutex_lock(&priv->reg_mutex);
696 	qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
697 	mutex_unlock(&priv->reg_mutex);
698 }
699 
700 int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
701 {
702 	struct qca8k_priv *priv = ds->priv;
703 	unsigned int secs = msecs / 1000;
704 	u32 val;
705 
706 	/* AGE_TIME reg is set in 7s step */
707 	val = secs / 7;
708 
709 	/* Handle case with 0 as val to NOT disable
710 	 * learning
711 	 */
712 	if (!val)
713 		val = 1;
714 
715 	return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
716 				  QCA8K_ATU_AGE_TIME_MASK,
717 				  QCA8K_ATU_AGE_TIME(val));
718 }
719 
720 int qca8k_port_enable(struct dsa_switch *ds, int port,
721 		      struct phy_device *phy)
722 {
723 	struct qca8k_priv *priv = ds->priv;
724 
725 	qca8k_port_set_status(priv, port, 1);
726 	priv->port_enabled_map |= BIT(port);
727 
728 	if (dsa_is_user_port(ds, port))
729 		phy_support_asym_pause(phy);
730 
731 	return 0;
732 }
733 
734 void qca8k_port_disable(struct dsa_switch *ds, int port)
735 {
736 	struct qca8k_priv *priv = ds->priv;
737 
738 	qca8k_port_set_status(priv, port, 0);
739 	priv->port_enabled_map &= ~BIT(port);
740 }
741 
742 int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
743 {
744 	struct qca8k_priv *priv = ds->priv;
745 	int ret;
746 
747 	/* We have only have a general MTU setting.
748 	 * DSA always set the CPU port's MTU to the largest MTU of the slave
749 	 * ports.
750 	 * Setting MTU just for the CPU port is sufficient to correctly set a
751 	 * value for every port.
752 	 */
753 	if (!dsa_is_cpu_port(ds, port))
754 		return 0;
755 
756 	/* To change the MAX_FRAME_SIZE the cpu ports must be off or
757 	 * the switch panics.
758 	 * Turn off both cpu ports before applying the new value to prevent
759 	 * this.
760 	 */
761 	if (priv->port_enabled_map & BIT(0))
762 		qca8k_port_set_status(priv, 0, 0);
763 
764 	if (priv->port_enabled_map & BIT(6))
765 		qca8k_port_set_status(priv, 6, 0);
766 
767 	/* Include L2 header / FCS length */
768 	ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
769 			  ETH_HLEN + ETH_FCS_LEN);
770 
771 	if (priv->port_enabled_map & BIT(0))
772 		qca8k_port_set_status(priv, 0, 1);
773 
774 	if (priv->port_enabled_map & BIT(6))
775 		qca8k_port_set_status(priv, 6, 1);
776 
777 	return ret;
778 }
779 
780 int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
781 {
782 	return QCA8K_MAX_MTU;
783 }
784 
785 int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
786 			  u16 port_mask, u16 vid)
787 {
788 	/* Set the vid to the port vlan id if no vid is set */
789 	if (!vid)
790 		vid = QCA8K_PORT_VID_DEF;
791 
792 	return qca8k_fdb_add(priv, addr, port_mask, vid,
793 			     QCA8K_ATU_STATUS_STATIC);
794 }
795 
796 int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
797 		       const unsigned char *addr, u16 vid,
798 		       struct dsa_db db)
799 {
800 	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
801 	u16 port_mask = BIT(port);
802 
803 	return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
804 }
805 
806 int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
807 		       const unsigned char *addr, u16 vid,
808 		       struct dsa_db db)
809 {
810 	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
811 	u16 port_mask = BIT(port);
812 
813 	if (!vid)
814 		vid = QCA8K_PORT_VID_DEF;
815 
816 	return qca8k_fdb_del(priv, addr, port_mask, vid);
817 }
818 
819 int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
820 			dsa_fdb_dump_cb_t *cb, void *data)
821 {
822 	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
823 	struct qca8k_fdb _fdb = { 0 };
824 	int cnt = QCA8K_NUM_FDB_RECORDS;
825 	bool is_static;
826 	int ret = 0;
827 
828 	mutex_lock(&priv->reg_mutex);
829 	while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
830 		if (!_fdb.aging)
831 			break;
832 		is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
833 		ret = cb(_fdb.mac, _fdb.vid, is_static, data);
834 		if (ret)
835 			break;
836 	}
837 	mutex_unlock(&priv->reg_mutex);
838 
839 	return 0;
840 }
841 
842 int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
843 		       const struct switchdev_obj_port_mdb *mdb,
844 		       struct dsa_db db)
845 {
846 	struct qca8k_priv *priv = ds->priv;
847 	const u8 *addr = mdb->addr;
848 	u16 vid = mdb->vid;
849 
850 	return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
851 }
852 
853 int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
854 		       const struct switchdev_obj_port_mdb *mdb,
855 		       struct dsa_db db)
856 {
857 	struct qca8k_priv *priv = ds->priv;
858 	const u8 *addr = mdb->addr;
859 	u16 vid = mdb->vid;
860 
861 	return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
862 }
863 
864 int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
865 			  struct dsa_mall_mirror_tc_entry *mirror,
866 			  bool ingress, struct netlink_ext_ack *extack)
867 {
868 	struct qca8k_priv *priv = ds->priv;
869 	int monitor_port, ret;
870 	u32 reg, val;
871 
872 	/* Check for existent entry */
873 	if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
874 		return -EEXIST;
875 
876 	ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
877 	if (ret)
878 		return ret;
879 
880 	/* QCA83xx can have only one port set to mirror mode.
881 	 * Check that the correct port is requested and return error otherwise.
882 	 * When no mirror port is set, the values is set to 0xF
883 	 */
884 	monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
885 	if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
886 		return -EEXIST;
887 
888 	/* Set the monitor port */
889 	val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
890 			 mirror->to_local_port);
891 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
892 				 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
893 	if (ret)
894 		return ret;
895 
896 	if (ingress) {
897 		reg = QCA8K_PORT_LOOKUP_CTRL(port);
898 		val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
899 	} else {
900 		reg = QCA8K_REG_PORT_HOL_CTRL1(port);
901 		val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
902 	}
903 
904 	ret = regmap_update_bits(priv->regmap, reg, val, val);
905 	if (ret)
906 		return ret;
907 
908 	/* Track mirror port for tx and rx to decide when the
909 	 * mirror port has to be disabled.
910 	 */
911 	if (ingress)
912 		priv->mirror_rx |= BIT(port);
913 	else
914 		priv->mirror_tx |= BIT(port);
915 
916 	return 0;
917 }
918 
919 void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
920 			   struct dsa_mall_mirror_tc_entry *mirror)
921 {
922 	struct qca8k_priv *priv = ds->priv;
923 	u32 reg, val;
924 	int ret;
925 
926 	if (mirror->ingress) {
927 		reg = QCA8K_PORT_LOOKUP_CTRL(port);
928 		val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
929 	} else {
930 		reg = QCA8K_REG_PORT_HOL_CTRL1(port);
931 		val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
932 	}
933 
934 	ret = regmap_clear_bits(priv->regmap, reg, val);
935 	if (ret)
936 		goto err;
937 
938 	if (mirror->ingress)
939 		priv->mirror_rx &= ~BIT(port);
940 	else
941 		priv->mirror_tx &= ~BIT(port);
942 
943 	/* No port set to send packet to mirror port. Disable mirror port */
944 	if (!priv->mirror_rx && !priv->mirror_tx) {
945 		val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
946 		ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
947 					 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
948 		if (ret)
949 			goto err;
950 	}
951 err:
952 	dev_err(priv->dev, "Failed to del mirror port from %d", port);
953 }
954 
955 int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
956 			      bool vlan_filtering,
957 			      struct netlink_ext_ack *extack)
958 {
959 	struct qca8k_priv *priv = ds->priv;
960 	int ret;
961 
962 	if (vlan_filtering) {
963 		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
964 				QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
965 				QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
966 	} else {
967 		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
968 				QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
969 				QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
970 	}
971 
972 	return ret;
973 }
974 
975 int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
976 			const struct switchdev_obj_port_vlan *vlan,
977 			struct netlink_ext_ack *extack)
978 {
979 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
980 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
981 	struct qca8k_priv *priv = ds->priv;
982 	int ret;
983 
984 	ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
985 	if (ret) {
986 		dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
987 		return ret;
988 	}
989 
990 	if (pvid) {
991 		ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
992 				QCA8K_EGREES_VLAN_PORT_MASK(port),
993 				QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
994 		if (ret)
995 			return ret;
996 
997 		ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
998 				  QCA8K_PORT_VLAN_CVID(vlan->vid) |
999 				  QCA8K_PORT_VLAN_SVID(vlan->vid));
1000 	}
1001 
1002 	return ret;
1003 }
1004 
1005 int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
1006 			const struct switchdev_obj_port_vlan *vlan)
1007 {
1008 	struct qca8k_priv *priv = ds->priv;
1009 	int ret;
1010 
1011 	ret = qca8k_vlan_del(priv, port, vlan->vid);
1012 	if (ret)
1013 		dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
1014 
1015 	return ret;
1016 }
1017 
1018 static bool qca8k_lag_can_offload(struct dsa_switch *ds,
1019 				  struct dsa_lag lag,
1020 				  struct netdev_lag_upper_info *info,
1021 				  struct netlink_ext_ack *extack)
1022 {
1023 	struct dsa_port *dp;
1024 	int members = 0;
1025 
1026 	if (!lag.id)
1027 		return false;
1028 
1029 	dsa_lag_foreach_port(dp, ds->dst, &lag)
1030 		/* Includes the port joining the LAG */
1031 		members++;
1032 
1033 	if (members > QCA8K_NUM_PORTS_FOR_LAG) {
1034 		NL_SET_ERR_MSG_MOD(extack,
1035 				   "Cannot offload more than 4 LAG ports");
1036 		return false;
1037 	}
1038 
1039 	if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
1040 		NL_SET_ERR_MSG_MOD(extack,
1041 				   "Can only offload LAG using hash TX type");
1042 		return false;
1043 	}
1044 
1045 	if (info->hash_type != NETDEV_LAG_HASH_L2 &&
1046 	    info->hash_type != NETDEV_LAG_HASH_L23) {
1047 		NL_SET_ERR_MSG_MOD(extack,
1048 				   "Can only offload L2 or L2+L3 TX hash");
1049 		return false;
1050 	}
1051 
1052 	return true;
1053 }
1054 
1055 static int qca8k_lag_setup_hash(struct dsa_switch *ds,
1056 				struct dsa_lag lag,
1057 				struct netdev_lag_upper_info *info)
1058 {
1059 	struct net_device *lag_dev = lag.dev;
1060 	struct qca8k_priv *priv = ds->priv;
1061 	bool unique_lag = true;
1062 	unsigned int i;
1063 	u32 hash = 0;
1064 
1065 	switch (info->hash_type) {
1066 	case NETDEV_LAG_HASH_L23:
1067 		hash |= QCA8K_TRUNK_HASH_SIP_EN;
1068 		hash |= QCA8K_TRUNK_HASH_DIP_EN;
1069 		fallthrough;
1070 	case NETDEV_LAG_HASH_L2:
1071 		hash |= QCA8K_TRUNK_HASH_SA_EN;
1072 		hash |= QCA8K_TRUNK_HASH_DA_EN;
1073 		break;
1074 	default: /* We should NEVER reach this */
1075 		return -EOPNOTSUPP;
1076 	}
1077 
1078 	/* Check if we are the unique configured LAG */
1079 	dsa_lags_foreach_id(i, ds->dst)
1080 		if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
1081 			unique_lag = false;
1082 			break;
1083 		}
1084 
1085 	/* Hash Mode is global. Make sure the same Hash Mode
1086 	 * is set to all the 4 possible lag.
1087 	 * If we are the unique LAG we can set whatever hash
1088 	 * mode we want.
1089 	 * To change hash mode it's needed to remove all LAG
1090 	 * and change the mode with the latest.
1091 	 */
1092 	if (unique_lag) {
1093 		priv->lag_hash_mode = hash;
1094 	} else if (priv->lag_hash_mode != hash) {
1095 		netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
1096 		return -EOPNOTSUPP;
1097 	}
1098 
1099 	return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
1100 				  QCA8K_TRUNK_HASH_MASK, hash);
1101 }
1102 
1103 static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
1104 				     struct dsa_lag lag, bool delete)
1105 {
1106 	struct qca8k_priv *priv = ds->priv;
1107 	int ret, id, i;
1108 	u32 val;
1109 
1110 	/* DSA LAG IDs are one-based, hardware is zero-based */
1111 	id = lag.id - 1;
1112 
1113 	/* Read current port member */
1114 	ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
1115 	if (ret)
1116 		return ret;
1117 
1118 	/* Shift val to the correct trunk */
1119 	val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
1120 	val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
1121 	if (delete)
1122 		val &= ~BIT(port);
1123 	else
1124 		val |= BIT(port);
1125 
1126 	/* Update port member. With empty portmap disable trunk */
1127 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
1128 				 QCA8K_REG_GOL_TRUNK_MEMBER(id) |
1129 				 QCA8K_REG_GOL_TRUNK_EN(id),
1130 				 !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
1131 				 val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
1132 
1133 	/* Search empty member if adding or port on deleting */
1134 	for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
1135 		ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
1136 		if (ret)
1137 			return ret;
1138 
1139 		val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
1140 		val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
1141 
1142 		if (delete) {
1143 			/* If port flagged to be disabled assume this member is
1144 			 * empty
1145 			 */
1146 			if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
1147 				continue;
1148 
1149 			val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
1150 			if (val != port)
1151 				continue;
1152 		} else {
1153 			/* If port flagged to be enabled assume this member is
1154 			 * already set
1155 			 */
1156 			if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
1157 				continue;
1158 		}
1159 
1160 		/* We have found the member to add/remove */
1161 		break;
1162 	}
1163 
1164 	/* Set port in the correct port mask or disable port if in delete mode */
1165 	return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
1166 				  QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
1167 				  QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
1168 				  !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
1169 				  port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
1170 }
1171 
1172 int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
1173 			struct netdev_lag_upper_info *info,
1174 			struct netlink_ext_ack *extack)
1175 {
1176 	int ret;
1177 
1178 	if (!qca8k_lag_can_offload(ds, lag, info, extack))
1179 		return -EOPNOTSUPP;
1180 
1181 	ret = qca8k_lag_setup_hash(ds, lag, info);
1182 	if (ret)
1183 		return ret;
1184 
1185 	return qca8k_lag_refresh_portmap(ds, port, lag, false);
1186 }
1187 
1188 int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
1189 			 struct dsa_lag lag)
1190 {
1191 	return qca8k_lag_refresh_portmap(ds, port, lag, true);
1192 }
1193 
1194 int qca8k_read_switch_id(struct qca8k_priv *priv)
1195 {
1196 	u32 val;
1197 	u8 id;
1198 	int ret;
1199 
1200 	if (!priv->info)
1201 		return -ENODEV;
1202 
1203 	ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
1204 	if (ret < 0)
1205 		return -ENODEV;
1206 
1207 	id = QCA8K_MASK_CTRL_DEVICE_ID(val);
1208 	if (id != priv->info->id) {
1209 		dev_err(priv->dev,
1210 			"Switch id detected %x but expected %x",
1211 			id, priv->info->id);
1212 		return -ENODEV;
1213 	}
1214 
1215 	priv->switch_id = id;
1216 
1217 	/* Save revision to communicate to the internal PHY driver */
1218 	priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
1219 
1220 	return 0;
1221 }
1222