xref: /linux/drivers/net/dsa/qca/qca8k-8xxx.c (revision 3a38ef2b3cb6b63c105247b5ea4a9cf600e673f0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4  * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5  * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2016 John Crispin <john@phrozen.org>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
12 #include <linux/bitfield.h>
13 #include <linux/regmap.h>
14 #include <net/dsa.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_platform.h>
18 #include <linux/mdio.h>
19 #include <linux/phylink.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/etherdevice.h>
22 #include <linux/dsa/tag_qca.h>
23 
24 #include "qca8k.h"
25 
26 static void
27 qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
28 {
29 	regaddr >>= 1;
30 	*r1 = regaddr & 0x1e;
31 
32 	regaddr >>= 5;
33 	*r2 = regaddr & 0x7;
34 
35 	regaddr >>= 3;
36 	*page = regaddr & 0x3ff;
37 }
38 
39 static int
40 qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
41 {
42 	u16 *cached_lo = &priv->mdio_cache.lo;
43 	struct mii_bus *bus = priv->bus;
44 	int ret;
45 
46 	if (lo == *cached_lo)
47 		return 0;
48 
49 	ret = bus->write(bus, phy_id, regnum, lo);
50 	if (ret < 0)
51 		dev_err_ratelimited(&bus->dev,
52 				    "failed to write qca8k 32bit lo register\n");
53 
54 	*cached_lo = lo;
55 	return 0;
56 }
57 
58 static int
59 qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
60 {
61 	u16 *cached_hi = &priv->mdio_cache.hi;
62 	struct mii_bus *bus = priv->bus;
63 	int ret;
64 
65 	if (hi == *cached_hi)
66 		return 0;
67 
68 	ret = bus->write(bus, phy_id, regnum, hi);
69 	if (ret < 0)
70 		dev_err_ratelimited(&bus->dev,
71 				    "failed to write qca8k 32bit hi register\n");
72 
73 	*cached_hi = hi;
74 	return 0;
75 }
76 
77 static int
78 qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
79 {
80 	int ret;
81 
82 	ret = bus->read(bus, phy_id, regnum);
83 	if (ret >= 0) {
84 		*val = ret;
85 		ret = bus->read(bus, phy_id, regnum + 1);
86 		*val |= ret << 16;
87 	}
88 
89 	if (ret < 0) {
90 		dev_err_ratelimited(&bus->dev,
91 				    "failed to read qca8k 32bit register\n");
92 		*val = 0;
93 		return ret;
94 	}
95 
96 	return 0;
97 }
98 
99 static void
100 qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
101 {
102 	u16 lo, hi;
103 	int ret;
104 
105 	lo = val & 0xffff;
106 	hi = (u16)(val >> 16);
107 
108 	ret = qca8k_set_lo(priv, phy_id, regnum, lo);
109 	if (ret >= 0)
110 		ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
111 }
112 
113 static int
114 qca8k_set_page(struct qca8k_priv *priv, u16 page)
115 {
116 	u16 *cached_page = &priv->mdio_cache.page;
117 	struct mii_bus *bus = priv->bus;
118 	int ret;
119 
120 	if (page == *cached_page)
121 		return 0;
122 
123 	ret = bus->write(bus, 0x18, 0, page);
124 	if (ret < 0) {
125 		dev_err_ratelimited(&bus->dev,
126 				    "failed to set qca8k page\n");
127 		return ret;
128 	}
129 
130 	*cached_page = page;
131 	usleep_range(1000, 2000);
132 	return 0;
133 }
134 
135 static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
136 {
137 	struct qca8k_mgmt_eth_data *mgmt_eth_data;
138 	struct qca8k_priv *priv = ds->priv;
139 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
140 	u32 command;
141 	u8 len, cmd;
142 	int i;
143 
144 	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
145 	mgmt_eth_data = &priv->mgmt_eth_data;
146 
147 	command = get_unaligned_le32(&mgmt_ethhdr->command);
148 	cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
149 	len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
150 
151 	/* Make sure the seq match the requested packet */
152 	if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
153 		mgmt_eth_data->ack = true;
154 
155 	if (cmd == MDIO_READ) {
156 		u32 *val = mgmt_eth_data->data;
157 
158 		*val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
159 
160 		/* Get the rest of the 12 byte of data.
161 		 * The read/write function will extract the requested data.
162 		 */
163 		if (len > QCA_HDR_MGMT_DATA1_LEN) {
164 			__le32 *data2 = (__le32 *)skb->data;
165 			int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
166 					     len - QCA_HDR_MGMT_DATA1_LEN);
167 
168 			val++;
169 
170 			for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
171 				*val = get_unaligned_le32(data2);
172 				val++;
173 				data2++;
174 			}
175 		}
176 	}
177 
178 	complete(&mgmt_eth_data->rw_done);
179 }
180 
181 static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
182 					       int priority, unsigned int len)
183 {
184 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
185 	unsigned int real_len;
186 	struct sk_buff *skb;
187 	__le32 *data2;
188 	u32 command;
189 	u16 hdr;
190 	int i;
191 
192 	skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
193 	if (!skb)
194 		return NULL;
195 
196 	/* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
197 	 * Actually for some reason the steps are:
198 	 * 0: nothing
199 	 * 1-4: first 4 byte
200 	 * 5-6: first 12 byte
201 	 * 7-15: all 16 byte
202 	 */
203 	if (len == 16)
204 		real_len = 15;
205 	else
206 		real_len = len;
207 
208 	skb_reset_mac_header(skb);
209 	skb_set_network_header(skb, skb->len);
210 
211 	mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
212 
213 	hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
214 	hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
215 	hdr |= QCA_HDR_XMIT_FROM_CPU;
216 	hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
217 	hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
218 
219 	command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
220 	command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
221 	command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
222 	command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
223 					   QCA_HDR_MGMT_CHECK_CODE_VAL);
224 
225 	put_unaligned_le32(command, &mgmt_ethhdr->command);
226 
227 	if (cmd == MDIO_WRITE)
228 		put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
229 
230 	mgmt_ethhdr->hdr = htons(hdr);
231 
232 	data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
233 	if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
234 		int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
235 				     len - QCA_HDR_MGMT_DATA1_LEN);
236 
237 		val++;
238 
239 		for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
240 			put_unaligned_le32(*val, data2);
241 			data2++;
242 			val++;
243 		}
244 	}
245 
246 	return skb;
247 }
248 
249 static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
250 {
251 	struct qca_mgmt_ethhdr *mgmt_ethhdr;
252 	u32 seq;
253 
254 	seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
255 	mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
256 	put_unaligned_le32(seq, &mgmt_ethhdr->seq);
257 }
258 
259 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
260 {
261 	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
262 	struct sk_buff *skb;
263 	bool ack;
264 	int ret;
265 
266 	skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
267 				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
268 	if (!skb)
269 		return -ENOMEM;
270 
271 	mutex_lock(&mgmt_eth_data->mutex);
272 
273 	/* Check mgmt_master if is operational */
274 	if (!priv->mgmt_master) {
275 		kfree_skb(skb);
276 		mutex_unlock(&mgmt_eth_data->mutex);
277 		return -EINVAL;
278 	}
279 
280 	skb->dev = priv->mgmt_master;
281 
282 	reinit_completion(&mgmt_eth_data->rw_done);
283 
284 	/* Increment seq_num and set it in the mdio pkt */
285 	mgmt_eth_data->seq++;
286 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
287 	mgmt_eth_data->ack = false;
288 
289 	dev_queue_xmit(skb);
290 
291 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
292 					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
293 
294 	*val = mgmt_eth_data->data[0];
295 	if (len > QCA_HDR_MGMT_DATA1_LEN)
296 		memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
297 
298 	ack = mgmt_eth_data->ack;
299 
300 	mutex_unlock(&mgmt_eth_data->mutex);
301 
302 	if (ret <= 0)
303 		return -ETIMEDOUT;
304 
305 	if (!ack)
306 		return -EINVAL;
307 
308 	return 0;
309 }
310 
311 static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
312 {
313 	struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
314 	struct sk_buff *skb;
315 	bool ack;
316 	int ret;
317 
318 	skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
319 				      QCA8K_ETHERNET_MDIO_PRIORITY, len);
320 	if (!skb)
321 		return -ENOMEM;
322 
323 	mutex_lock(&mgmt_eth_data->mutex);
324 
325 	/* Check mgmt_master if is operational */
326 	if (!priv->mgmt_master) {
327 		kfree_skb(skb);
328 		mutex_unlock(&mgmt_eth_data->mutex);
329 		return -EINVAL;
330 	}
331 
332 	skb->dev = priv->mgmt_master;
333 
334 	reinit_completion(&mgmt_eth_data->rw_done);
335 
336 	/* Increment seq_num and set it in the mdio pkt */
337 	mgmt_eth_data->seq++;
338 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
339 	mgmt_eth_data->ack = false;
340 
341 	dev_queue_xmit(skb);
342 
343 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
344 					  msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
345 
346 	ack = mgmt_eth_data->ack;
347 
348 	mutex_unlock(&mgmt_eth_data->mutex);
349 
350 	if (ret <= 0)
351 		return -ETIMEDOUT;
352 
353 	if (!ack)
354 		return -EINVAL;
355 
356 	return 0;
357 }
358 
359 static int
360 qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
361 {
362 	u32 val = 0;
363 	int ret;
364 
365 	ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
366 	if (ret)
367 		return ret;
368 
369 	val &= ~mask;
370 	val |= write_val;
371 
372 	return qca8k_write_eth(priv, reg, &val, sizeof(val));
373 }
374 
375 static int
376 qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
377 {
378 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
379 	struct mii_bus *bus = priv->bus;
380 	u16 r1, r2, page;
381 	int ret;
382 
383 	if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
384 		return 0;
385 
386 	qca8k_split_addr(reg, &r1, &r2, &page);
387 
388 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
389 
390 	ret = qca8k_set_page(priv, page);
391 	if (ret < 0)
392 		goto exit;
393 
394 	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
395 
396 exit:
397 	mutex_unlock(&bus->mdio_lock);
398 	return ret;
399 }
400 
401 static int
402 qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
403 {
404 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
405 	struct mii_bus *bus = priv->bus;
406 	u16 r1, r2, page;
407 	int ret;
408 
409 	if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
410 		return 0;
411 
412 	qca8k_split_addr(reg, &r1, &r2, &page);
413 
414 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
415 
416 	ret = qca8k_set_page(priv, page);
417 	if (ret < 0)
418 		goto exit;
419 
420 	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
421 
422 exit:
423 	mutex_unlock(&bus->mdio_lock);
424 	return ret;
425 }
426 
427 static int
428 qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
429 {
430 	struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
431 	struct mii_bus *bus = priv->bus;
432 	u16 r1, r2, page;
433 	u32 val;
434 	int ret;
435 
436 	if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
437 		return 0;
438 
439 	qca8k_split_addr(reg, &r1, &r2, &page);
440 
441 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
442 
443 	ret = qca8k_set_page(priv, page);
444 	if (ret < 0)
445 		goto exit;
446 
447 	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
448 	if (ret < 0)
449 		goto exit;
450 
451 	val &= ~mask;
452 	val |= write_val;
453 	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
454 
455 exit:
456 	mutex_unlock(&bus->mdio_lock);
457 
458 	return ret;
459 }
460 
461 static struct regmap_config qca8k_regmap_config = {
462 	.reg_bits = 16,
463 	.val_bits = 32,
464 	.reg_stride = 4,
465 	.max_register = 0x16ac, /* end MIB - Port6 range */
466 	.reg_read = qca8k_regmap_read,
467 	.reg_write = qca8k_regmap_write,
468 	.reg_update_bits = qca8k_regmap_update_bits,
469 	.rd_table = &qca8k_readable_table,
470 	.disable_locking = true, /* Locking is handled by qca8k read/write */
471 	.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
472 };
473 
474 static int
475 qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
476 			struct sk_buff *read_skb, u32 *val)
477 {
478 	struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
479 	bool ack;
480 	int ret;
481 
482 	reinit_completion(&mgmt_eth_data->rw_done);
483 
484 	/* Increment seq_num and set it in the copy pkt */
485 	mgmt_eth_data->seq++;
486 	qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
487 	mgmt_eth_data->ack = false;
488 
489 	dev_queue_xmit(skb);
490 
491 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
492 					  QCA8K_ETHERNET_TIMEOUT);
493 
494 	ack = mgmt_eth_data->ack;
495 
496 	if (ret <= 0)
497 		return -ETIMEDOUT;
498 
499 	if (!ack)
500 		return -EINVAL;
501 
502 	*val = mgmt_eth_data->data[0];
503 
504 	return 0;
505 }
506 
507 static int
508 qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
509 		      int regnum, u16 data)
510 {
511 	struct sk_buff *write_skb, *clear_skb, *read_skb;
512 	struct qca8k_mgmt_eth_data *mgmt_eth_data;
513 	u32 write_val, clear_val = 0, val;
514 	struct net_device *mgmt_master;
515 	int ret, ret1;
516 	bool ack;
517 
518 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
519 		return -EINVAL;
520 
521 	mgmt_eth_data = &priv->mgmt_eth_data;
522 
523 	write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
524 		    QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
525 		    QCA8K_MDIO_MASTER_REG_ADDR(regnum);
526 
527 	if (read) {
528 		write_val |= QCA8K_MDIO_MASTER_READ;
529 	} else {
530 		write_val |= QCA8K_MDIO_MASTER_WRITE;
531 		write_val |= QCA8K_MDIO_MASTER_DATA(data);
532 	}
533 
534 	/* Prealloc all the needed skb before the lock */
535 	write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
536 					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
537 	if (!write_skb)
538 		return -ENOMEM;
539 
540 	clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
541 					    QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
542 	if (!clear_skb) {
543 		ret = -ENOMEM;
544 		goto err_clear_skb;
545 	}
546 
547 	read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
548 					   QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
549 	if (!read_skb) {
550 		ret = -ENOMEM;
551 		goto err_read_skb;
552 	}
553 
554 	/* Actually start the request:
555 	 * 1. Send mdio master packet
556 	 * 2. Busy Wait for mdio master command
557 	 * 3. Get the data if we are reading
558 	 * 4. Reset the mdio master (even with error)
559 	 */
560 	mutex_lock(&mgmt_eth_data->mutex);
561 
562 	/* Check if mgmt_master is operational */
563 	mgmt_master = priv->mgmt_master;
564 	if (!mgmt_master) {
565 		mutex_unlock(&mgmt_eth_data->mutex);
566 		ret = -EINVAL;
567 		goto err_mgmt_master;
568 	}
569 
570 	read_skb->dev = mgmt_master;
571 	clear_skb->dev = mgmt_master;
572 	write_skb->dev = mgmt_master;
573 
574 	reinit_completion(&mgmt_eth_data->rw_done);
575 
576 	/* Increment seq_num and set it in the write pkt */
577 	mgmt_eth_data->seq++;
578 	qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
579 	mgmt_eth_data->ack = false;
580 
581 	dev_queue_xmit(write_skb);
582 
583 	ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
584 					  QCA8K_ETHERNET_TIMEOUT);
585 
586 	ack = mgmt_eth_data->ack;
587 
588 	if (ret <= 0) {
589 		ret = -ETIMEDOUT;
590 		kfree_skb(read_skb);
591 		goto exit;
592 	}
593 
594 	if (!ack) {
595 		ret = -EINVAL;
596 		kfree_skb(read_skb);
597 		goto exit;
598 	}
599 
600 	ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
601 				!(val & QCA8K_MDIO_MASTER_BUSY), 0,
602 				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
603 				mgmt_eth_data, read_skb, &val);
604 
605 	if (ret < 0 && ret1 < 0) {
606 		ret = ret1;
607 		goto exit;
608 	}
609 
610 	if (read) {
611 		reinit_completion(&mgmt_eth_data->rw_done);
612 
613 		/* Increment seq_num and set it in the read pkt */
614 		mgmt_eth_data->seq++;
615 		qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
616 		mgmt_eth_data->ack = false;
617 
618 		dev_queue_xmit(read_skb);
619 
620 		ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
621 						  QCA8K_ETHERNET_TIMEOUT);
622 
623 		ack = mgmt_eth_data->ack;
624 
625 		if (ret <= 0) {
626 			ret = -ETIMEDOUT;
627 			goto exit;
628 		}
629 
630 		if (!ack) {
631 			ret = -EINVAL;
632 			goto exit;
633 		}
634 
635 		ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
636 	} else {
637 		kfree_skb(read_skb);
638 	}
639 exit:
640 	reinit_completion(&mgmt_eth_data->rw_done);
641 
642 	/* Increment seq_num and set it in the clear pkt */
643 	mgmt_eth_data->seq++;
644 	qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
645 	mgmt_eth_data->ack = false;
646 
647 	dev_queue_xmit(clear_skb);
648 
649 	wait_for_completion_timeout(&mgmt_eth_data->rw_done,
650 				    QCA8K_ETHERNET_TIMEOUT);
651 
652 	mutex_unlock(&mgmt_eth_data->mutex);
653 
654 	return ret;
655 
656 	/* Error handling before lock */
657 err_mgmt_master:
658 	kfree_skb(read_skb);
659 err_read_skb:
660 	kfree_skb(clear_skb);
661 err_clear_skb:
662 	kfree_skb(write_skb);
663 
664 	return ret;
665 }
666 
667 static u32
668 qca8k_port_to_phy(int port)
669 {
670 	/* From Andrew Lunn:
671 	 * Port 0 has no internal phy.
672 	 * Port 1 has an internal PHY at MDIO address 0.
673 	 * Port 2 has an internal PHY at MDIO address 1.
674 	 * ...
675 	 * Port 5 has an internal PHY at MDIO address 4.
676 	 * Port 6 has no internal PHY.
677 	 */
678 
679 	return port - 1;
680 }
681 
682 static int
683 qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
684 {
685 	u16 r1, r2, page;
686 	u32 val;
687 	int ret, ret1;
688 
689 	qca8k_split_addr(reg, &r1, &r2, &page);
690 
691 	ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
692 				QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
693 				bus, 0x10 | r2, r1, &val);
694 
695 	/* Check if qca8k_read has failed for a different reason
696 	 * before returnting -ETIMEDOUT
697 	 */
698 	if (ret < 0 && ret1 < 0)
699 		return ret1;
700 
701 	return ret;
702 }
703 
704 static int
705 qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
706 {
707 	struct mii_bus *bus = priv->bus;
708 	u16 r1, r2, page;
709 	u32 val;
710 	int ret;
711 
712 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
713 		return -EINVAL;
714 
715 	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
716 	      QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
717 	      QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
718 	      QCA8K_MDIO_MASTER_DATA(data);
719 
720 	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
721 
722 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
723 
724 	ret = qca8k_set_page(priv, page);
725 	if (ret)
726 		goto exit;
727 
728 	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
729 
730 	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
731 				   QCA8K_MDIO_MASTER_BUSY);
732 
733 exit:
734 	/* even if the busy_wait timeouts try to clear the MASTER_EN */
735 	qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
736 
737 	mutex_unlock(&bus->mdio_lock);
738 
739 	return ret;
740 }
741 
742 static int
743 qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
744 {
745 	struct mii_bus *bus = priv->bus;
746 	u16 r1, r2, page;
747 	u32 val;
748 	int ret;
749 
750 	if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
751 		return -EINVAL;
752 
753 	val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
754 	      QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
755 	      QCA8K_MDIO_MASTER_REG_ADDR(regnum);
756 
757 	qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
758 
759 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
760 
761 	ret = qca8k_set_page(priv, page);
762 	if (ret)
763 		goto exit;
764 
765 	qca8k_mii_write32(priv, 0x10 | r2, r1, val);
766 
767 	ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
768 				   QCA8K_MDIO_MASTER_BUSY);
769 	if (ret)
770 		goto exit;
771 
772 	ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
773 
774 exit:
775 	/* even if the busy_wait timeouts try to clear the MASTER_EN */
776 	qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
777 
778 	mutex_unlock(&bus->mdio_lock);
779 
780 	if (ret >= 0)
781 		ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
782 
783 	return ret;
784 }
785 
786 static int
787 qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
788 {
789 	struct qca8k_priv *priv = slave_bus->priv;
790 	int ret;
791 
792 	/* Use mdio Ethernet when available, fallback to legacy one on error */
793 	ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
794 	if (!ret)
795 		return 0;
796 
797 	return qca8k_mdio_write(priv, phy, regnum, data);
798 }
799 
800 static int
801 qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
802 {
803 	struct qca8k_priv *priv = slave_bus->priv;
804 	int ret;
805 
806 	/* Use mdio Ethernet when available, fallback to legacy one on error */
807 	ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
808 	if (ret >= 0)
809 		return ret;
810 
811 	ret = qca8k_mdio_read(priv, phy, regnum);
812 
813 	if (ret < 0)
814 		return 0xffff;
815 
816 	return ret;
817 }
818 
819 static int
820 qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
821 {
822 	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
823 
824 	return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
825 }
826 
827 static int
828 qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
829 {
830 	port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
831 
832 	return qca8k_internal_mdio_read(slave_bus, port, regnum);
833 }
834 
835 static int
836 qca8k_mdio_register(struct qca8k_priv *priv)
837 {
838 	struct dsa_switch *ds = priv->ds;
839 	struct device_node *mdio;
840 	struct mii_bus *bus;
841 
842 	bus = devm_mdiobus_alloc(ds->dev);
843 	if (!bus)
844 		return -ENOMEM;
845 
846 	bus->priv = (void *)priv;
847 	snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
848 		 ds->dst->index, ds->index);
849 	bus->parent = ds->dev;
850 	bus->phy_mask = ~ds->phys_mii_mask;
851 	ds->slave_mii_bus = bus;
852 
853 	/* Check if the devicetree declare the port:phy mapping */
854 	mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
855 	if (of_device_is_available(mdio)) {
856 		bus->name = "qca8k slave mii";
857 		bus->read = qca8k_internal_mdio_read;
858 		bus->write = qca8k_internal_mdio_write;
859 		return devm_of_mdiobus_register(priv->dev, bus, mdio);
860 	}
861 
862 	/* If a mapping can't be found the legacy mapping is used,
863 	 * using the qca8k_port_to_phy function
864 	 */
865 	bus->name = "qca8k-legacy slave mii";
866 	bus->read = qca8k_legacy_mdio_read;
867 	bus->write = qca8k_legacy_mdio_write;
868 	return devm_mdiobus_register(priv->dev, bus);
869 }
870 
871 static int
872 qca8k_setup_mdio_bus(struct qca8k_priv *priv)
873 {
874 	u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
875 	struct device_node *ports, *port;
876 	phy_interface_t mode;
877 	int err;
878 
879 	ports = of_get_child_by_name(priv->dev->of_node, "ports");
880 	if (!ports)
881 		ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
882 
883 	if (!ports)
884 		return -EINVAL;
885 
886 	for_each_available_child_of_node(ports, port) {
887 		err = of_property_read_u32(port, "reg", &reg);
888 		if (err) {
889 			of_node_put(port);
890 			of_node_put(ports);
891 			return err;
892 		}
893 
894 		if (!dsa_is_user_port(priv->ds, reg))
895 			continue;
896 
897 		of_get_phy_mode(port, &mode);
898 
899 		if (of_property_read_bool(port, "phy-handle") &&
900 		    mode != PHY_INTERFACE_MODE_INTERNAL)
901 			external_mdio_mask |= BIT(reg);
902 		else
903 			internal_mdio_mask |= BIT(reg);
904 	}
905 
906 	of_node_put(ports);
907 	if (!external_mdio_mask && !internal_mdio_mask) {
908 		dev_err(priv->dev, "no PHYs are defined.\n");
909 		return -EINVAL;
910 	}
911 
912 	/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
913 	 * the MDIO_MASTER register also _disconnects_ the external MDC
914 	 * passthrough to the internal PHYs. It's not possible to use both
915 	 * configurations at the same time!
916 	 *
917 	 * Because this came up during the review process:
918 	 * If the external mdio-bus driver is capable magically disabling
919 	 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
920 	 * accessors for the time being, it would be possible to pull this
921 	 * off.
922 	 */
923 	if (!!external_mdio_mask && !!internal_mdio_mask) {
924 		dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
925 		return -EINVAL;
926 	}
927 
928 	if (external_mdio_mask) {
929 		/* Make sure to disable the internal mdio bus in cases
930 		 * a dt-overlay and driver reload changed the configuration
931 		 */
932 
933 		return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
934 					 QCA8K_MDIO_MASTER_EN);
935 	}
936 
937 	return qca8k_mdio_register(priv);
938 }
939 
940 static int
941 qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
942 {
943 	u32 mask = 0;
944 	int ret = 0;
945 
946 	/* SoC specific settings for ipq8064.
947 	 * If more device require this consider adding
948 	 * a dedicated binding.
949 	 */
950 	if (of_machine_is_compatible("qcom,ipq8064"))
951 		mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
952 
953 	/* SoC specific settings for ipq8065 */
954 	if (of_machine_is_compatible("qcom,ipq8065"))
955 		mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
956 
957 	if (mask) {
958 		ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
959 				QCA8K_MAC_PWR_RGMII0_1_8V |
960 				QCA8K_MAC_PWR_RGMII1_1_8V,
961 				mask);
962 	}
963 
964 	return ret;
965 }
966 
967 static int qca8k_find_cpu_port(struct dsa_switch *ds)
968 {
969 	struct qca8k_priv *priv = ds->priv;
970 
971 	/* Find the connected cpu port. Valid port are 0 or 6 */
972 	if (dsa_is_cpu_port(ds, 0))
973 		return 0;
974 
975 	dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
976 
977 	if (dsa_is_cpu_port(ds, 6))
978 		return 6;
979 
980 	return -EINVAL;
981 }
982 
983 static int
984 qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
985 {
986 	const struct qca8k_match_data *data = priv->info;
987 	struct device_node *node = priv->dev->of_node;
988 	u32 val = 0;
989 	int ret;
990 
991 	/* QCA8327 require to set to the correct mode.
992 	 * His bigger brother QCA8328 have the 172 pin layout.
993 	 * Should be applied by default but we set this just to make sure.
994 	 */
995 	if (priv->switch_id == QCA8K_ID_QCA8327) {
996 		/* Set the correct package of 148 pin for QCA8327 */
997 		if (data->reduced_package)
998 			val |= QCA8327_PWS_PACKAGE148_EN;
999 
1000 		ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1001 				val);
1002 		if (ret)
1003 			return ret;
1004 	}
1005 
1006 	if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1007 		val |= QCA8K_PWS_POWER_ON_SEL;
1008 
1009 	if (of_property_read_bool(node, "qca,led-open-drain")) {
1010 		if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1011 			dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1012 			return -EINVAL;
1013 		}
1014 
1015 		val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1016 	}
1017 
1018 	return qca8k_rmw(priv, QCA8K_REG_PWS,
1019 			QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1020 			val);
1021 }
1022 
1023 static int
1024 qca8k_parse_port_config(struct qca8k_priv *priv)
1025 {
1026 	int port, cpu_port_index = -1, ret;
1027 	struct device_node *port_dn;
1028 	phy_interface_t mode;
1029 	struct dsa_port *dp;
1030 	u32 delay;
1031 
1032 	/* We have 2 CPU port. Check them */
1033 	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1034 		/* Skip every other port */
1035 		if (port != 0 && port != 6)
1036 			continue;
1037 
1038 		dp = dsa_to_port(priv->ds, port);
1039 		port_dn = dp->dn;
1040 		cpu_port_index++;
1041 
1042 		if (!of_device_is_available(port_dn))
1043 			continue;
1044 
1045 		ret = of_get_phy_mode(port_dn, &mode);
1046 		if (ret)
1047 			continue;
1048 
1049 		switch (mode) {
1050 		case PHY_INTERFACE_MODE_RGMII:
1051 		case PHY_INTERFACE_MODE_RGMII_ID:
1052 		case PHY_INTERFACE_MODE_RGMII_TXID:
1053 		case PHY_INTERFACE_MODE_RGMII_RXID:
1054 		case PHY_INTERFACE_MODE_SGMII:
1055 			delay = 0;
1056 
1057 			if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1058 				/* Switch regs accept value in ns, convert ps to ns */
1059 				delay = delay / 1000;
1060 			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1061 				 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1062 				delay = 1;
1063 
1064 			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1065 				dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1066 				delay = 3;
1067 			}
1068 
1069 			priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1070 
1071 			delay = 0;
1072 
1073 			if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1074 				/* Switch regs accept value in ns, convert ps to ns */
1075 				delay = delay / 1000;
1076 			else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1077 				 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1078 				delay = 2;
1079 
1080 			if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1081 				dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1082 				delay = 3;
1083 			}
1084 
1085 			priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1086 
1087 			/* Skip sgmii parsing for rgmii* mode */
1088 			if (mode == PHY_INTERFACE_MODE_RGMII ||
1089 			    mode == PHY_INTERFACE_MODE_RGMII_ID ||
1090 			    mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1091 			    mode == PHY_INTERFACE_MODE_RGMII_RXID)
1092 				break;
1093 
1094 			if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1095 				priv->ports_config.sgmii_tx_clk_falling_edge = true;
1096 
1097 			if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1098 				priv->ports_config.sgmii_rx_clk_falling_edge = true;
1099 
1100 			if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1101 				priv->ports_config.sgmii_enable_pll = true;
1102 
1103 				if (priv->switch_id == QCA8K_ID_QCA8327) {
1104 					dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1105 					priv->ports_config.sgmii_enable_pll = false;
1106 				}
1107 
1108 				if (priv->switch_revision < 2)
1109 					dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1110 			}
1111 
1112 			break;
1113 		default:
1114 			continue;
1115 		}
1116 	}
1117 
1118 	return 0;
1119 }
1120 
1121 static void
1122 qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1123 				      u32 reg)
1124 {
1125 	u32 delay, val = 0;
1126 	int ret;
1127 
1128 	/* Delay can be declared in 3 different way.
1129 	 * Mode to rgmii and internal-delay standard binding defined
1130 	 * rgmii-id or rgmii-tx/rx phy mode set.
1131 	 * The parse logic set a delay different than 0 only when one
1132 	 * of the 3 different way is used. In all other case delay is
1133 	 * not enabled. With ID or TX/RXID delay is enabled and set
1134 	 * to the default and recommended value.
1135 	 */
1136 	if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1137 		delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1138 
1139 		val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1140 			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1141 	}
1142 
1143 	if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1144 		delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1145 
1146 		val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1147 			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1148 	}
1149 
1150 	/* Set RGMII delay based on the selected values */
1151 	ret = qca8k_rmw(priv, reg,
1152 			QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1153 			QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1154 			QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1155 			QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1156 			val);
1157 	if (ret)
1158 		dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1159 			cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1160 }
1161 
1162 static struct phylink_pcs *
1163 qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1164 			     phy_interface_t interface)
1165 {
1166 	struct qca8k_priv *priv = ds->priv;
1167 	struct phylink_pcs *pcs = NULL;
1168 
1169 	switch (interface) {
1170 	case PHY_INTERFACE_MODE_SGMII:
1171 	case PHY_INTERFACE_MODE_1000BASEX:
1172 		switch (port) {
1173 		case 0:
1174 			pcs = &priv->pcs_port_0.pcs;
1175 			break;
1176 
1177 		case 6:
1178 			pcs = &priv->pcs_port_6.pcs;
1179 			break;
1180 		}
1181 		break;
1182 
1183 	default:
1184 		break;
1185 	}
1186 
1187 	return pcs;
1188 }
1189 
1190 static void
1191 qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1192 			 const struct phylink_link_state *state)
1193 {
1194 	struct qca8k_priv *priv = ds->priv;
1195 	int cpu_port_index;
1196 	u32 reg;
1197 
1198 	switch (port) {
1199 	case 0: /* 1st CPU port */
1200 		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1201 		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1202 		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1203 		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1204 		    state->interface != PHY_INTERFACE_MODE_SGMII)
1205 			return;
1206 
1207 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1208 		cpu_port_index = QCA8K_CPU_PORT0;
1209 		break;
1210 	case 1:
1211 	case 2:
1212 	case 3:
1213 	case 4:
1214 	case 5:
1215 		/* Internal PHY, nothing to do */
1216 		return;
1217 	case 6: /* 2nd CPU port / external PHY */
1218 		if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1219 		    state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1220 		    state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1221 		    state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1222 		    state->interface != PHY_INTERFACE_MODE_SGMII &&
1223 		    state->interface != PHY_INTERFACE_MODE_1000BASEX)
1224 			return;
1225 
1226 		reg = QCA8K_REG_PORT6_PAD_CTRL;
1227 		cpu_port_index = QCA8K_CPU_PORT6;
1228 		break;
1229 	default:
1230 		dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1231 		return;
1232 	}
1233 
1234 	if (port != 6 && phylink_autoneg_inband(mode)) {
1235 		dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1236 			__func__);
1237 		return;
1238 	}
1239 
1240 	switch (state->interface) {
1241 	case PHY_INTERFACE_MODE_RGMII:
1242 	case PHY_INTERFACE_MODE_RGMII_ID:
1243 	case PHY_INTERFACE_MODE_RGMII_TXID:
1244 	case PHY_INTERFACE_MODE_RGMII_RXID:
1245 		qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1246 
1247 		/* Configure rgmii delay */
1248 		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1249 
1250 		/* QCA8337 requires to set rgmii rx delay for all ports.
1251 		 * This is enabled through PORT5_PAD_CTRL for all ports,
1252 		 * rather than individual port registers.
1253 		 */
1254 		if (priv->switch_id == QCA8K_ID_QCA8337)
1255 			qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1256 				    QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1257 		break;
1258 	case PHY_INTERFACE_MODE_SGMII:
1259 	case PHY_INTERFACE_MODE_1000BASEX:
1260 		/* Enable SGMII on the port */
1261 		qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1262 		break;
1263 	default:
1264 		dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1265 			phy_modes(state->interface), port);
1266 		return;
1267 	}
1268 }
1269 
1270 static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1271 				   struct phylink_config *config)
1272 {
1273 	switch (port) {
1274 	case 0: /* 1st CPU port */
1275 		phy_interface_set_rgmii(config->supported_interfaces);
1276 		__set_bit(PHY_INTERFACE_MODE_SGMII,
1277 			  config->supported_interfaces);
1278 		break;
1279 
1280 	case 1:
1281 	case 2:
1282 	case 3:
1283 	case 4:
1284 	case 5:
1285 		/* Internal PHY */
1286 		__set_bit(PHY_INTERFACE_MODE_GMII,
1287 			  config->supported_interfaces);
1288 		__set_bit(PHY_INTERFACE_MODE_INTERNAL,
1289 			  config->supported_interfaces);
1290 		break;
1291 
1292 	case 6: /* 2nd CPU port / external PHY */
1293 		phy_interface_set_rgmii(config->supported_interfaces);
1294 		__set_bit(PHY_INTERFACE_MODE_SGMII,
1295 			  config->supported_interfaces);
1296 		__set_bit(PHY_INTERFACE_MODE_1000BASEX,
1297 			  config->supported_interfaces);
1298 		break;
1299 	}
1300 
1301 	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1302 		MAC_10 | MAC_100 | MAC_1000FD;
1303 
1304 	config->legacy_pre_march2020 = false;
1305 }
1306 
1307 static void
1308 qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1309 			    phy_interface_t interface)
1310 {
1311 	struct qca8k_priv *priv = ds->priv;
1312 
1313 	qca8k_port_set_status(priv, port, 0);
1314 }
1315 
1316 static void
1317 qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1318 			  phy_interface_t interface, struct phy_device *phydev,
1319 			  int speed, int duplex, bool tx_pause, bool rx_pause)
1320 {
1321 	struct qca8k_priv *priv = ds->priv;
1322 	u32 reg;
1323 
1324 	if (phylink_autoneg_inband(mode)) {
1325 		reg = QCA8K_PORT_STATUS_LINK_AUTO;
1326 	} else {
1327 		switch (speed) {
1328 		case SPEED_10:
1329 			reg = QCA8K_PORT_STATUS_SPEED_10;
1330 			break;
1331 		case SPEED_100:
1332 			reg = QCA8K_PORT_STATUS_SPEED_100;
1333 			break;
1334 		case SPEED_1000:
1335 			reg = QCA8K_PORT_STATUS_SPEED_1000;
1336 			break;
1337 		default:
1338 			reg = QCA8K_PORT_STATUS_LINK_AUTO;
1339 			break;
1340 		}
1341 
1342 		if (duplex == DUPLEX_FULL)
1343 			reg |= QCA8K_PORT_STATUS_DUPLEX;
1344 
1345 		if (rx_pause || dsa_is_cpu_port(ds, port))
1346 			reg |= QCA8K_PORT_STATUS_RXFLOW;
1347 
1348 		if (tx_pause || dsa_is_cpu_port(ds, port))
1349 			reg |= QCA8K_PORT_STATUS_TXFLOW;
1350 	}
1351 
1352 	reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1353 
1354 	qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1355 }
1356 
1357 static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1358 {
1359 	return container_of(pcs, struct qca8k_pcs, pcs);
1360 }
1361 
1362 static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1363 				struct phylink_link_state *state)
1364 {
1365 	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1366 	int port = pcs_to_qca8k_pcs(pcs)->port;
1367 	u32 reg;
1368 	int ret;
1369 
1370 	ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
1371 	if (ret < 0) {
1372 		state->link = false;
1373 		return;
1374 	}
1375 
1376 	state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1377 	state->an_complete = state->link;
1378 	state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1379 	state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1380 							   DUPLEX_HALF;
1381 
1382 	switch (reg & QCA8K_PORT_STATUS_SPEED) {
1383 	case QCA8K_PORT_STATUS_SPEED_10:
1384 		state->speed = SPEED_10;
1385 		break;
1386 	case QCA8K_PORT_STATUS_SPEED_100:
1387 		state->speed = SPEED_100;
1388 		break;
1389 	case QCA8K_PORT_STATUS_SPEED_1000:
1390 		state->speed = SPEED_1000;
1391 		break;
1392 	default:
1393 		state->speed = SPEED_UNKNOWN;
1394 		break;
1395 	}
1396 
1397 	if (reg & QCA8K_PORT_STATUS_RXFLOW)
1398 		state->pause |= MLO_PAUSE_RX;
1399 	if (reg & QCA8K_PORT_STATUS_TXFLOW)
1400 		state->pause |= MLO_PAUSE_TX;
1401 }
1402 
1403 static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1404 			    phy_interface_t interface,
1405 			    const unsigned long *advertising,
1406 			    bool permit_pause_to_mac)
1407 {
1408 	struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1409 	int cpu_port_index, ret, port;
1410 	u32 reg, val;
1411 
1412 	port = pcs_to_qca8k_pcs(pcs)->port;
1413 	switch (port) {
1414 	case 0:
1415 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1416 		cpu_port_index = QCA8K_CPU_PORT0;
1417 		break;
1418 
1419 	case 6:
1420 		reg = QCA8K_REG_PORT6_PAD_CTRL;
1421 		cpu_port_index = QCA8K_CPU_PORT6;
1422 		break;
1423 
1424 	default:
1425 		WARN_ON(1);
1426 		return -EINVAL;
1427 	}
1428 
1429 	/* Enable/disable SerDes auto-negotiation as necessary */
1430 	ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1431 	if (ret)
1432 		return ret;
1433 	if (phylink_autoneg_inband(mode))
1434 		val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1435 	else
1436 		val |= QCA8K_PWS_SERDES_AEN_DIS;
1437 	qca8k_write(priv, QCA8K_REG_PWS, val);
1438 
1439 	/* Configure the SGMII parameters */
1440 	ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1441 	if (ret)
1442 		return ret;
1443 
1444 	val |= QCA8K_SGMII_EN_SD;
1445 
1446 	if (priv->ports_config.sgmii_enable_pll)
1447 		val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1448 		       QCA8K_SGMII_EN_TX;
1449 
1450 	if (dsa_is_cpu_port(priv->ds, port)) {
1451 		/* CPU port, we're talking to the CPU MAC, be a PHY */
1452 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1453 		val |= QCA8K_SGMII_MODE_CTRL_PHY;
1454 	} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1455 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1456 		val |= QCA8K_SGMII_MODE_CTRL_MAC;
1457 	} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1458 		val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1459 		val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1460 	}
1461 
1462 	qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1463 
1464 	/* From original code is reported port instability as SGMII also
1465 	 * require delay set. Apply advised values here or take them from DT.
1466 	 */
1467 	if (interface == PHY_INTERFACE_MODE_SGMII)
1468 		qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1469 	/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1470 	 * falling edge is set writing in the PORT0 PAD reg
1471 	 */
1472 	if (priv->switch_id == QCA8K_ID_QCA8327 ||
1473 	    priv->switch_id == QCA8K_ID_QCA8337)
1474 		reg = QCA8K_REG_PORT0_PAD_CTRL;
1475 
1476 	val = 0;
1477 
1478 	/* SGMII Clock phase configuration */
1479 	if (priv->ports_config.sgmii_rx_clk_falling_edge)
1480 		val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1481 
1482 	if (priv->ports_config.sgmii_tx_clk_falling_edge)
1483 		val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1484 
1485 	if (val)
1486 		ret = qca8k_rmw(priv, reg,
1487 				QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
1488 				QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
1489 				val);
1490 
1491 	return 0;
1492 }
1493 
1494 static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
1495 {
1496 }
1497 
1498 static const struct phylink_pcs_ops qca8k_pcs_ops = {
1499 	.pcs_get_state = qca8k_pcs_get_state,
1500 	.pcs_config = qca8k_pcs_config,
1501 	.pcs_an_restart = qca8k_pcs_an_restart,
1502 };
1503 
1504 static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
1505 			    int port)
1506 {
1507 	qpcs->pcs.ops = &qca8k_pcs_ops;
1508 
1509 	/* We don't have interrupts for link changes, so we need to poll */
1510 	qpcs->pcs.poll = true;
1511 	qpcs->priv = priv;
1512 	qpcs->port = port;
1513 }
1514 
1515 static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
1516 {
1517 	struct qca8k_mib_eth_data *mib_eth_data;
1518 	struct qca8k_priv *priv = ds->priv;
1519 	const struct qca8k_mib_desc *mib;
1520 	struct mib_ethhdr *mib_ethhdr;
1521 	__le32 *data2;
1522 	u8 port;
1523 	int i;
1524 
1525 	mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
1526 	mib_eth_data = &priv->mib_eth_data;
1527 
1528 	/* The switch autocast every port. Ignore other packet and
1529 	 * parse only the requested one.
1530 	 */
1531 	port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
1532 	if (port != mib_eth_data->req_port)
1533 		goto exit;
1534 
1535 	data2 = (__le32 *)skb->data;
1536 
1537 	for (i = 0; i < priv->info->mib_count; i++) {
1538 		mib = &ar8327_mib[i];
1539 
1540 		/* First 3 mib are present in the skb head */
1541 		if (i < 3) {
1542 			mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
1543 			continue;
1544 		}
1545 
1546 		/* Some mib are 64 bit wide */
1547 		if (mib->size == 2)
1548 			mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
1549 		else
1550 			mib_eth_data->data[i] = get_unaligned_le32(data2);
1551 
1552 		data2 += mib->size;
1553 	}
1554 
1555 exit:
1556 	/* Complete on receiving all the mib packet */
1557 	if (refcount_dec_and_test(&mib_eth_data->port_parsed))
1558 		complete(&mib_eth_data->rw_done);
1559 }
1560 
1561 static int
1562 qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
1563 {
1564 	struct dsa_port *dp = dsa_to_port(ds, port);
1565 	struct qca8k_mib_eth_data *mib_eth_data;
1566 	struct qca8k_priv *priv = ds->priv;
1567 	int ret;
1568 
1569 	mib_eth_data = &priv->mib_eth_data;
1570 
1571 	mutex_lock(&mib_eth_data->mutex);
1572 
1573 	reinit_completion(&mib_eth_data->rw_done);
1574 
1575 	mib_eth_data->req_port = dp->index;
1576 	mib_eth_data->data = data;
1577 	refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
1578 
1579 	mutex_lock(&priv->reg_mutex);
1580 
1581 	/* Send mib autocast request */
1582 	ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
1583 				 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
1584 				 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
1585 				 QCA8K_MIB_BUSY);
1586 
1587 	mutex_unlock(&priv->reg_mutex);
1588 
1589 	if (ret)
1590 		goto exit;
1591 
1592 	ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
1593 
1594 exit:
1595 	mutex_unlock(&mib_eth_data->mutex);
1596 
1597 	return ret;
1598 }
1599 
1600 static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
1601 {
1602 	struct qca8k_priv *priv = ds->priv;
1603 
1604 	/* Communicate to the phy internal driver the switch revision.
1605 	 * Based on the switch revision different values needs to be
1606 	 * set to the dbg and mmd reg on the phy.
1607 	 * The first 2 bit are used to communicate the switch revision
1608 	 * to the phy driver.
1609 	 */
1610 	if (port > 0 && port < 6)
1611 		return priv->switch_revision;
1612 
1613 	return 0;
1614 }
1615 
1616 static enum dsa_tag_protocol
1617 qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
1618 		       enum dsa_tag_protocol mp)
1619 {
1620 	return DSA_TAG_PROTO_QCA;
1621 }
1622 
1623 static void
1624 qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
1625 		    bool operational)
1626 {
1627 	struct dsa_port *dp = master->dsa_ptr;
1628 	struct qca8k_priv *priv = ds->priv;
1629 
1630 	/* Ethernet MIB/MDIO is only supported for CPU port 0 */
1631 	if (dp->index != 0)
1632 		return;
1633 
1634 	mutex_lock(&priv->mgmt_eth_data.mutex);
1635 	mutex_lock(&priv->mib_eth_data.mutex);
1636 
1637 	priv->mgmt_master = operational ? (struct net_device *)master : NULL;
1638 
1639 	mutex_unlock(&priv->mib_eth_data.mutex);
1640 	mutex_unlock(&priv->mgmt_eth_data.mutex);
1641 }
1642 
1643 static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
1644 				      enum dsa_tag_protocol proto)
1645 {
1646 	struct qca_tagger_data *tagger_data;
1647 
1648 	switch (proto) {
1649 	case DSA_TAG_PROTO_QCA:
1650 		tagger_data = ds->tagger_data;
1651 
1652 		tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
1653 		tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
1654 
1655 		break;
1656 	default:
1657 		return -EOPNOTSUPP;
1658 	}
1659 
1660 	return 0;
1661 }
1662 
1663 static int
1664 qca8k_setup(struct dsa_switch *ds)
1665 {
1666 	struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1667 	int cpu_port, ret, i;
1668 	u32 mask;
1669 
1670 	cpu_port = qca8k_find_cpu_port(ds);
1671 	if (cpu_port < 0) {
1672 		dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
1673 		return cpu_port;
1674 	}
1675 
1676 	/* Parse CPU port config to be later used in phy_link mac_config */
1677 	ret = qca8k_parse_port_config(priv);
1678 	if (ret)
1679 		return ret;
1680 
1681 	ret = qca8k_setup_mdio_bus(priv);
1682 	if (ret)
1683 		return ret;
1684 
1685 	ret = qca8k_setup_of_pws_reg(priv);
1686 	if (ret)
1687 		return ret;
1688 
1689 	ret = qca8k_setup_mac_pwr_sel(priv);
1690 	if (ret)
1691 		return ret;
1692 
1693 	qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
1694 	qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
1695 
1696 	/* Make sure MAC06 is disabled */
1697 	ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
1698 				QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
1699 	if (ret) {
1700 		dev_err(priv->dev, "failed disabling MAC06 exchange");
1701 		return ret;
1702 	}
1703 
1704 	/* Enable CPU Port */
1705 	ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
1706 			      QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
1707 	if (ret) {
1708 		dev_err(priv->dev, "failed enabling CPU port");
1709 		return ret;
1710 	}
1711 
1712 	/* Enable MIB counters */
1713 	ret = qca8k_mib_init(priv);
1714 	if (ret)
1715 		dev_warn(priv->dev, "mib init failed");
1716 
1717 	/* Initial setup of all ports */
1718 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1719 		/* Disable forwarding by default on all ports */
1720 		ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1721 				QCA8K_PORT_LOOKUP_MEMBER, 0);
1722 		if (ret)
1723 			return ret;
1724 
1725 		/* Enable QCA header mode on all cpu ports */
1726 		if (dsa_is_cpu_port(ds, i)) {
1727 			ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
1728 					  FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
1729 					  FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
1730 			if (ret) {
1731 				dev_err(priv->dev, "failed enabling QCA header mode");
1732 				return ret;
1733 			}
1734 		}
1735 
1736 		/* Disable MAC by default on all user ports */
1737 		if (dsa_is_user_port(ds, i))
1738 			qca8k_port_set_status(priv, i, 0);
1739 	}
1740 
1741 	/* Forward all unknown frames to CPU port for Linux processing
1742 	 * Notice that in multi-cpu config only one port should be set
1743 	 * for igmp, unknown, multicast and broadcast packet
1744 	 */
1745 	ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
1746 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
1747 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
1748 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
1749 			  FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
1750 	if (ret)
1751 		return ret;
1752 
1753 	/* Setup connection between CPU port & user ports
1754 	 * Configure specific switch configuration for ports
1755 	 */
1756 	for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1757 		/* CPU port gets connected to all user ports of the switch */
1758 		if (dsa_is_cpu_port(ds, i)) {
1759 			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1760 					QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
1761 			if (ret)
1762 				return ret;
1763 		}
1764 
1765 		/* Individual user ports get connected to CPU port only */
1766 		if (dsa_is_user_port(ds, i)) {
1767 			ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
1768 					QCA8K_PORT_LOOKUP_MEMBER,
1769 					BIT(cpu_port));
1770 			if (ret)
1771 				return ret;
1772 
1773 			/* Enable ARP Auto-learning by default */
1774 			ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
1775 					      QCA8K_PORT_LOOKUP_LEARN);
1776 			if (ret)
1777 				return ret;
1778 
1779 			/* For port based vlans to work we need to set the
1780 			 * default egress vid
1781 			 */
1782 			ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
1783 					QCA8K_EGREES_VLAN_PORT_MASK(i),
1784 					QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
1785 			if (ret)
1786 				return ret;
1787 
1788 			ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
1789 					  QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
1790 					  QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
1791 			if (ret)
1792 				return ret;
1793 		}
1794 
1795 		/* The port 5 of the qca8337 have some problem in flood condition. The
1796 		 * original legacy driver had some specific buffer and priority settings
1797 		 * for the different port suggested by the QCA switch team. Add this
1798 		 * missing settings to improve switch stability under load condition.
1799 		 * This problem is limited to qca8337 and other qca8k switch are not affected.
1800 		 */
1801 		if (priv->switch_id == QCA8K_ID_QCA8337) {
1802 			switch (i) {
1803 			/* The 2 CPU port and port 5 requires some different
1804 			 * priority than any other ports.
1805 			 */
1806 			case 0:
1807 			case 5:
1808 			case 6:
1809 				mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1810 					QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1811 					QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
1812 					QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
1813 					QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
1814 					QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
1815 					QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
1816 				break;
1817 			default:
1818 				mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
1819 					QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
1820 					QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
1821 					QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
1822 					QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
1823 			}
1824 			qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
1825 
1826 			mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
1827 			QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1828 			QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1829 			QCA8K_PORT_HOL_CTRL1_WRED_EN;
1830 			qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
1831 				  QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
1832 				  QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
1833 				  QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
1834 				  QCA8K_PORT_HOL_CTRL1_WRED_EN,
1835 				  mask);
1836 		}
1837 	}
1838 
1839 	/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
1840 	if (priv->switch_id == QCA8K_ID_QCA8327) {
1841 		mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
1842 		       QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
1843 		qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
1844 			  QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
1845 			  QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
1846 			  mask);
1847 	}
1848 
1849 	/* Setup our port MTUs to match power on defaults */
1850 	ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
1851 	if (ret)
1852 		dev_warn(priv->dev, "failed setting MTU settings");
1853 
1854 	/* Flush the FDB table */
1855 	qca8k_fdb_flush(priv);
1856 
1857 	/* Set min a max ageing value supported */
1858 	ds->ageing_time_min = 7000;
1859 	ds->ageing_time_max = 458745000;
1860 
1861 	/* Set max number of LAGs supported */
1862 	ds->num_lag_ids = QCA8K_NUM_LAGS;
1863 
1864 	return 0;
1865 }
1866 
1867 static const struct dsa_switch_ops qca8k_switch_ops = {
1868 	.get_tag_protocol	= qca8k_get_tag_protocol,
1869 	.setup			= qca8k_setup,
1870 	.get_strings		= qca8k_get_strings,
1871 	.get_ethtool_stats	= qca8k_get_ethtool_stats,
1872 	.get_sset_count		= qca8k_get_sset_count,
1873 	.set_ageing_time	= qca8k_set_ageing_time,
1874 	.get_mac_eee		= qca8k_get_mac_eee,
1875 	.set_mac_eee		= qca8k_set_mac_eee,
1876 	.port_enable		= qca8k_port_enable,
1877 	.port_disable		= qca8k_port_disable,
1878 	.port_change_mtu	= qca8k_port_change_mtu,
1879 	.port_max_mtu		= qca8k_port_max_mtu,
1880 	.port_stp_state_set	= qca8k_port_stp_state_set,
1881 	.port_bridge_join	= qca8k_port_bridge_join,
1882 	.port_bridge_leave	= qca8k_port_bridge_leave,
1883 	.port_fast_age		= qca8k_port_fast_age,
1884 	.port_fdb_add		= qca8k_port_fdb_add,
1885 	.port_fdb_del		= qca8k_port_fdb_del,
1886 	.port_fdb_dump		= qca8k_port_fdb_dump,
1887 	.port_mdb_add		= qca8k_port_mdb_add,
1888 	.port_mdb_del		= qca8k_port_mdb_del,
1889 	.port_mirror_add	= qca8k_port_mirror_add,
1890 	.port_mirror_del	= qca8k_port_mirror_del,
1891 	.port_vlan_filtering	= qca8k_port_vlan_filtering,
1892 	.port_vlan_add		= qca8k_port_vlan_add,
1893 	.port_vlan_del		= qca8k_port_vlan_del,
1894 	.phylink_get_caps	= qca8k_phylink_get_caps,
1895 	.phylink_mac_select_pcs	= qca8k_phylink_mac_select_pcs,
1896 	.phylink_mac_config	= qca8k_phylink_mac_config,
1897 	.phylink_mac_link_down	= qca8k_phylink_mac_link_down,
1898 	.phylink_mac_link_up	= qca8k_phylink_mac_link_up,
1899 	.get_phy_flags		= qca8k_get_phy_flags,
1900 	.port_lag_join		= qca8k_port_lag_join,
1901 	.port_lag_leave		= qca8k_port_lag_leave,
1902 	.master_state_change	= qca8k_master_change,
1903 	.connect_tag_protocol	= qca8k_connect_tag_protocol,
1904 };
1905 
1906 static int
1907 qca8k_sw_probe(struct mdio_device *mdiodev)
1908 {
1909 	struct qca8k_priv *priv;
1910 	int ret;
1911 
1912 	/* allocate the private data struct so that we can probe the switches
1913 	 * ID register
1914 	 */
1915 	priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
1916 	if (!priv)
1917 		return -ENOMEM;
1918 
1919 	priv->bus = mdiodev->bus;
1920 	priv->dev = &mdiodev->dev;
1921 	priv->info = of_device_get_match_data(priv->dev);
1922 
1923 	priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
1924 						   GPIOD_ASIS);
1925 	if (IS_ERR(priv->reset_gpio))
1926 		return PTR_ERR(priv->reset_gpio);
1927 
1928 	if (priv->reset_gpio) {
1929 		gpiod_set_value_cansleep(priv->reset_gpio, 1);
1930 		/* The active low duration must be greater than 10 ms
1931 		 * and checkpatch.pl wants 20 ms.
1932 		 */
1933 		msleep(20);
1934 		gpiod_set_value_cansleep(priv->reset_gpio, 0);
1935 	}
1936 
1937 	/* Start by setting up the register mapping */
1938 	priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
1939 					&qca8k_regmap_config);
1940 	if (IS_ERR(priv->regmap)) {
1941 		dev_err(priv->dev, "regmap initialization failed");
1942 		return PTR_ERR(priv->regmap);
1943 	}
1944 
1945 	priv->mdio_cache.page = 0xffff;
1946 	priv->mdio_cache.lo = 0xffff;
1947 	priv->mdio_cache.hi = 0xffff;
1948 
1949 	/* Check the detected switch id */
1950 	ret = qca8k_read_switch_id(priv);
1951 	if (ret)
1952 		return ret;
1953 
1954 	priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
1955 	if (!priv->ds)
1956 		return -ENOMEM;
1957 
1958 	mutex_init(&priv->mgmt_eth_data.mutex);
1959 	init_completion(&priv->mgmt_eth_data.rw_done);
1960 
1961 	mutex_init(&priv->mib_eth_data.mutex);
1962 	init_completion(&priv->mib_eth_data.rw_done);
1963 
1964 	priv->ds->dev = &mdiodev->dev;
1965 	priv->ds->num_ports = QCA8K_NUM_PORTS;
1966 	priv->ds->priv = priv;
1967 	priv->ds->ops = &qca8k_switch_ops;
1968 	mutex_init(&priv->reg_mutex);
1969 	dev_set_drvdata(&mdiodev->dev, priv);
1970 
1971 	return dsa_register_switch(priv->ds);
1972 }
1973 
1974 static void
1975 qca8k_sw_remove(struct mdio_device *mdiodev)
1976 {
1977 	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
1978 	int i;
1979 
1980 	if (!priv)
1981 		return;
1982 
1983 	for (i = 0; i < QCA8K_NUM_PORTS; i++)
1984 		qca8k_port_set_status(priv, i, 0);
1985 
1986 	dsa_unregister_switch(priv->ds);
1987 }
1988 
1989 static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
1990 {
1991 	struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
1992 
1993 	if (!priv)
1994 		return;
1995 
1996 	dsa_switch_shutdown(priv->ds);
1997 
1998 	dev_set_drvdata(&mdiodev->dev, NULL);
1999 }
2000 
2001 #ifdef CONFIG_PM_SLEEP
2002 static void
2003 qca8k_set_pm(struct qca8k_priv *priv, int enable)
2004 {
2005 	int port;
2006 
2007 	for (port = 0; port < QCA8K_NUM_PORTS; port++) {
2008 		/* Do not enable on resume if the port was
2009 		 * disabled before.
2010 		 */
2011 		if (!(priv->port_enabled_map & BIT(port)))
2012 			continue;
2013 
2014 		qca8k_port_set_status(priv, port, enable);
2015 	}
2016 }
2017 
2018 static int qca8k_suspend(struct device *dev)
2019 {
2020 	struct qca8k_priv *priv = dev_get_drvdata(dev);
2021 
2022 	qca8k_set_pm(priv, 0);
2023 
2024 	return dsa_switch_suspend(priv->ds);
2025 }
2026 
2027 static int qca8k_resume(struct device *dev)
2028 {
2029 	struct qca8k_priv *priv = dev_get_drvdata(dev);
2030 
2031 	qca8k_set_pm(priv, 1);
2032 
2033 	return dsa_switch_resume(priv->ds);
2034 }
2035 #endif /* CONFIG_PM_SLEEP */
2036 
2037 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
2038 			 qca8k_suspend, qca8k_resume);
2039 
2040 static const struct qca8k_info_ops qca8xxx_ops = {
2041 	.autocast_mib = qca8k_get_ethtool_stats_eth,
2042 	.read_eth = qca8k_read_eth,
2043 	.write_eth = qca8k_write_eth,
2044 };
2045 
2046 static const struct qca8k_match_data qca8327 = {
2047 	.id = QCA8K_ID_QCA8327,
2048 	.reduced_package = true,
2049 	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2050 	.ops = &qca8xxx_ops,
2051 };
2052 
2053 static const struct qca8k_match_data qca8328 = {
2054 	.id = QCA8K_ID_QCA8327,
2055 	.mib_count = QCA8K_QCA832X_MIB_COUNT,
2056 	.ops = &qca8xxx_ops,
2057 };
2058 
2059 static const struct qca8k_match_data qca833x = {
2060 	.id = QCA8K_ID_QCA8337,
2061 	.mib_count = QCA8K_QCA833X_MIB_COUNT,
2062 	.ops = &qca8xxx_ops,
2063 };
2064 
2065 static const struct of_device_id qca8k_of_match[] = {
2066 	{ .compatible = "qca,qca8327", .data = &qca8327 },
2067 	{ .compatible = "qca,qca8328", .data = &qca8328 },
2068 	{ .compatible = "qca,qca8334", .data = &qca833x },
2069 	{ .compatible = "qca,qca8337", .data = &qca833x },
2070 	{ /* sentinel */ },
2071 };
2072 
2073 static struct mdio_driver qca8kmdio_driver = {
2074 	.probe  = qca8k_sw_probe,
2075 	.remove = qca8k_sw_remove,
2076 	.shutdown = qca8k_sw_shutdown,
2077 	.mdiodrv.driver = {
2078 		.name = "qca8k",
2079 		.of_match_table = qca8k_of_match,
2080 		.pm = &qca8k_pm_ops,
2081 	},
2082 };
2083 
2084 mdio_module_driver(qca8kmdio_driver);
2085 
2086 MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
2087 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
2088 MODULE_LICENSE("GPL v2");
2089 MODULE_ALIAS("platform:qca8k");
2090