xref: /linux/drivers/net/ethernet/broadcom/bcmsysport.c (revision 83a37b3292f4aca799b355179ad6fbdd78a08e10)
1 /*
2  * Broadcom BCM7xxx System Port Ethernet MAC driver
3  *
4  * Copyright (C) 2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
12 
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
20 #include <linux/of.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
25 #include <net/dsa.h>
26 #include <net/ip.h>
27 #include <net/ipv6.h>
28 
29 #include "bcmsysport.h"
30 
31 /* I/O accessors register helpers */
32 #define BCM_SYSPORT_IO_MACRO(name, offset) \
33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\
34 {									\
35 	u32 reg = readl_relaxed(priv->base + offset + off);		\
36 	return reg;							\
37 }									\
38 static inline void name##_writel(struct bcm_sysport_priv *priv,		\
39 				  u32 val, u32 off)			\
40 {									\
41 	writel_relaxed(val, priv->base + offset + off);			\
42 }									\
43 
44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
46 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
47 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
48 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
49 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
51 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
53 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
54 
55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56  * same layout, except it has been moved by 4 bytes up, *sigh*
57  */
58 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
59 {
60 	if (priv->is_lite && off >= RDMA_STATUS)
61 		off += 4;
62 	return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
63 }
64 
65 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
66 {
67 	if (priv->is_lite && off >= RDMA_STATUS)
68 		off += 4;
69 	writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
70 }
71 
72 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
73 {
74 	if (!priv->is_lite) {
75 		return BIT(bit);
76 	} else {
77 		if (bit >= ACB_ALGO)
78 			return BIT(bit + 1);
79 		else
80 			return BIT(bit);
81 	}
82 }
83 
84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85  * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
86   */
87 #define BCM_SYSPORT_INTR_L2(which)	\
88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
89 						u32 mask)		\
90 {									\
91 	priv->irq##which##_mask &= ~(mask);				\
92 	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
93 }									\
94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
95 						u32 mask)		\
96 {									\
97 	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\
98 	priv->irq##which##_mask |= (mask);				\
99 }									\
100 
101 BCM_SYSPORT_INTR_L2(0)
102 BCM_SYSPORT_INTR_L2(1)
103 
104 /* Register accesses to GISB/RBUS registers are expensive (few hundred
105  * nanoseconds), so keep the check for 64-bits explicit here to save
106  * one register write per-packet on 32-bits platforms.
107  */
108 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
109 				     void __iomem *d,
110 				     dma_addr_t addr)
111 {
112 #ifdef CONFIG_PHYS_ADDR_T_64BIT
113 	writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
114 		     d + DESC_ADDR_HI_STATUS_LEN);
115 #endif
116 	writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
117 }
118 
119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
120 					     struct dma_desc *desc,
121 					     unsigned int port)
122 {
123 	/* Ports are latched, so write upper address first */
124 	tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
125 	tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
126 }
127 
128 /* Ethtool operations */
129 static int bcm_sysport_set_rx_csum(struct net_device *dev,
130 				   netdev_features_t wanted)
131 {
132 	struct bcm_sysport_priv *priv = netdev_priv(dev);
133 	u32 reg;
134 
135 	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
136 	reg = rxchk_readl(priv, RXCHK_CONTROL);
137 	if (priv->rx_chk_en)
138 		reg |= RXCHK_EN;
139 	else
140 		reg &= ~RXCHK_EN;
141 
142 	/* If UniMAC forwards CRC, we need to skip over it to get
143 	 * a valid CHK bit to be set in the per-packet status word
144 	 */
145 	if (priv->rx_chk_en && priv->crc_fwd)
146 		reg |= RXCHK_SKIP_FCS;
147 	else
148 		reg &= ~RXCHK_SKIP_FCS;
149 
150 	/* If Broadcom tags are enabled (e.g: using a switch), make
151 	 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
152 	 * tag after the Ethernet MAC Source Address.
153 	 */
154 	if (netdev_uses_dsa(dev))
155 		reg |= RXCHK_BRCM_TAG_EN;
156 	else
157 		reg &= ~RXCHK_BRCM_TAG_EN;
158 
159 	rxchk_writel(priv, reg, RXCHK_CONTROL);
160 
161 	return 0;
162 }
163 
164 static int bcm_sysport_set_tx_csum(struct net_device *dev,
165 				   netdev_features_t wanted)
166 {
167 	struct bcm_sysport_priv *priv = netdev_priv(dev);
168 	u32 reg;
169 
170 	/* Hardware transmit checksum requires us to enable the Transmit status
171 	 * block prepended to the packet contents
172 	 */
173 	priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
174 	reg = tdma_readl(priv, TDMA_CONTROL);
175 	if (priv->tsb_en)
176 		reg |= tdma_control_bit(priv, TSB_EN);
177 	else
178 		reg &= ~tdma_control_bit(priv, TSB_EN);
179 	tdma_writel(priv, reg, TDMA_CONTROL);
180 
181 	return 0;
182 }
183 
184 static int bcm_sysport_set_features(struct net_device *dev,
185 				    netdev_features_t features)
186 {
187 	netdev_features_t changed = features ^ dev->features;
188 	netdev_features_t wanted = dev->wanted_features;
189 	int ret = 0;
190 
191 	if (changed & NETIF_F_RXCSUM)
192 		ret = bcm_sysport_set_rx_csum(dev, wanted);
193 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
194 		ret = bcm_sysport_set_tx_csum(dev, wanted);
195 
196 	return ret;
197 }
198 
199 /* Hardware counters must be kept in sync because the order/offset
200  * is important here (order in structure declaration = order in hardware)
201  */
202 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
203 	/* general stats */
204 	STAT_NETDEV64(rx_packets),
205 	STAT_NETDEV64(tx_packets),
206 	STAT_NETDEV64(rx_bytes),
207 	STAT_NETDEV64(tx_bytes),
208 	STAT_NETDEV(rx_errors),
209 	STAT_NETDEV(tx_errors),
210 	STAT_NETDEV(rx_dropped),
211 	STAT_NETDEV(tx_dropped),
212 	STAT_NETDEV(multicast),
213 	/* UniMAC RSV counters */
214 	STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
215 	STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
216 	STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
217 	STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
218 	STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
219 	STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
220 	STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
221 	STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
222 	STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
223 	STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
224 	STAT_MIB_RX("rx_pkts", mib.rx.pkt),
225 	STAT_MIB_RX("rx_bytes", mib.rx.bytes),
226 	STAT_MIB_RX("rx_multicast", mib.rx.mca),
227 	STAT_MIB_RX("rx_broadcast", mib.rx.bca),
228 	STAT_MIB_RX("rx_fcs", mib.rx.fcs),
229 	STAT_MIB_RX("rx_control", mib.rx.cf),
230 	STAT_MIB_RX("rx_pause", mib.rx.pf),
231 	STAT_MIB_RX("rx_unknown", mib.rx.uo),
232 	STAT_MIB_RX("rx_align", mib.rx.aln),
233 	STAT_MIB_RX("rx_outrange", mib.rx.flr),
234 	STAT_MIB_RX("rx_code", mib.rx.cde),
235 	STAT_MIB_RX("rx_carrier", mib.rx.fcr),
236 	STAT_MIB_RX("rx_oversize", mib.rx.ovr),
237 	STAT_MIB_RX("rx_jabber", mib.rx.jbr),
238 	STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
239 	STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
240 	STAT_MIB_RX("rx_unicast", mib.rx.uc),
241 	STAT_MIB_RX("rx_ppp", mib.rx.ppp),
242 	STAT_MIB_RX("rx_crc", mib.rx.rcrc),
243 	/* UniMAC TSV counters */
244 	STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
245 	STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
246 	STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
247 	STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
248 	STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
249 	STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
250 	STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
251 	STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
252 	STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
253 	STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
254 	STAT_MIB_TX("tx_pkts", mib.tx.pkts),
255 	STAT_MIB_TX("tx_multicast", mib.tx.mca),
256 	STAT_MIB_TX("tx_broadcast", mib.tx.bca),
257 	STAT_MIB_TX("tx_pause", mib.tx.pf),
258 	STAT_MIB_TX("tx_control", mib.tx.cf),
259 	STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
260 	STAT_MIB_TX("tx_oversize", mib.tx.ovr),
261 	STAT_MIB_TX("tx_defer", mib.tx.drf),
262 	STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
263 	STAT_MIB_TX("tx_single_col", mib.tx.scl),
264 	STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
265 	STAT_MIB_TX("tx_late_col", mib.tx.lcl),
266 	STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
267 	STAT_MIB_TX("tx_frags", mib.tx.frg),
268 	STAT_MIB_TX("tx_total_col", mib.tx.ncl),
269 	STAT_MIB_TX("tx_jabber", mib.tx.jbr),
270 	STAT_MIB_TX("tx_bytes", mib.tx.bytes),
271 	STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
272 	STAT_MIB_TX("tx_unicast", mib.tx.uc),
273 	/* UniMAC RUNT counters */
274 	STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
275 	STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
276 	STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
277 	STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
278 	/* RXCHK misc statistics */
279 	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
280 	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
281 		   RXCHK_OTHER_DISC_CNTR),
282 	/* RBUF misc statistics */
283 	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
284 	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
285 	STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
286 	STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
287 	STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
288 	/* Per TX-queue statistics are dynamically appended */
289 };
290 
291 #define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)
292 
293 static void bcm_sysport_get_drvinfo(struct net_device *dev,
294 				    struct ethtool_drvinfo *info)
295 {
296 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
297 	strlcpy(info->version, "0.1", sizeof(info->version));
298 	strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
299 }
300 
301 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
302 {
303 	struct bcm_sysport_priv *priv = netdev_priv(dev);
304 
305 	return priv->msg_enable;
306 }
307 
308 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
309 {
310 	struct bcm_sysport_priv *priv = netdev_priv(dev);
311 
312 	priv->msg_enable = enable;
313 }
314 
315 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
316 {
317 	switch (type) {
318 	case BCM_SYSPORT_STAT_NETDEV:
319 	case BCM_SYSPORT_STAT_NETDEV64:
320 	case BCM_SYSPORT_STAT_RXCHK:
321 	case BCM_SYSPORT_STAT_RBUF:
322 	case BCM_SYSPORT_STAT_SOFT:
323 		return true;
324 	default:
325 		return false;
326 	}
327 }
328 
329 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
330 {
331 	struct bcm_sysport_priv *priv = netdev_priv(dev);
332 	const struct bcm_sysport_stats *s;
333 	unsigned int i, j;
334 
335 	switch (string_set) {
336 	case ETH_SS_STATS:
337 		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
338 			s = &bcm_sysport_gstrings_stats[i];
339 			if (priv->is_lite &&
340 			    !bcm_sysport_lite_stat_valid(s->type))
341 				continue;
342 			j++;
343 		}
344 		/* Include per-queue statistics */
345 		return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
346 	default:
347 		return -EOPNOTSUPP;
348 	}
349 }
350 
351 static void bcm_sysport_get_strings(struct net_device *dev,
352 				    u32 stringset, u8 *data)
353 {
354 	struct bcm_sysport_priv *priv = netdev_priv(dev);
355 	const struct bcm_sysport_stats *s;
356 	char buf[128];
357 	int i, j;
358 
359 	switch (stringset) {
360 	case ETH_SS_STATS:
361 		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
362 			s = &bcm_sysport_gstrings_stats[i];
363 			if (priv->is_lite &&
364 			    !bcm_sysport_lite_stat_valid(s->type))
365 				continue;
366 
367 			memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
368 			       ETH_GSTRING_LEN);
369 			j++;
370 		}
371 
372 		for (i = 0; i < dev->num_tx_queues; i++) {
373 			snprintf(buf, sizeof(buf), "txq%d_packets", i);
374 			memcpy(data + j * ETH_GSTRING_LEN, buf,
375 			       ETH_GSTRING_LEN);
376 			j++;
377 
378 			snprintf(buf, sizeof(buf), "txq%d_bytes", i);
379 			memcpy(data + j * ETH_GSTRING_LEN, buf,
380 			       ETH_GSTRING_LEN);
381 			j++;
382 		}
383 		break;
384 	default:
385 		break;
386 	}
387 }
388 
389 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
390 {
391 	int i, j = 0;
392 
393 	for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
394 		const struct bcm_sysport_stats *s;
395 		u8 offset = 0;
396 		u32 val = 0;
397 		char *p;
398 
399 		s = &bcm_sysport_gstrings_stats[i];
400 		switch (s->type) {
401 		case BCM_SYSPORT_STAT_NETDEV:
402 		case BCM_SYSPORT_STAT_NETDEV64:
403 		case BCM_SYSPORT_STAT_SOFT:
404 			continue;
405 		case BCM_SYSPORT_STAT_MIB_RX:
406 		case BCM_SYSPORT_STAT_MIB_TX:
407 		case BCM_SYSPORT_STAT_RUNT:
408 			if (priv->is_lite)
409 				continue;
410 
411 			if (s->type != BCM_SYSPORT_STAT_MIB_RX)
412 				offset = UMAC_MIB_STAT_OFFSET;
413 			val = umac_readl(priv, UMAC_MIB_START + j + offset);
414 			break;
415 		case BCM_SYSPORT_STAT_RXCHK:
416 			val = rxchk_readl(priv, s->reg_offset);
417 			if (val == ~0)
418 				rxchk_writel(priv, 0, s->reg_offset);
419 			break;
420 		case BCM_SYSPORT_STAT_RBUF:
421 			val = rbuf_readl(priv, s->reg_offset);
422 			if (val == ~0)
423 				rbuf_writel(priv, 0, s->reg_offset);
424 			break;
425 		}
426 
427 		j += s->stat_sizeof;
428 		p = (char *)priv + s->stat_offset;
429 		*(u32 *)p = val;
430 	}
431 
432 	netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
433 }
434 
435 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
436 					u64 *tx_bytes, u64 *tx_packets)
437 {
438 	struct bcm_sysport_tx_ring *ring;
439 	u64 bytes = 0, packets = 0;
440 	unsigned int start;
441 	unsigned int q;
442 
443 	for (q = 0; q < priv->netdev->num_tx_queues; q++) {
444 		ring = &priv->tx_rings[q];
445 		do {
446 			start = u64_stats_fetch_begin_irq(&priv->syncp);
447 			bytes = ring->bytes;
448 			packets = ring->packets;
449 		} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
450 
451 		*tx_bytes += bytes;
452 		*tx_packets += packets;
453 	}
454 }
455 
456 static void bcm_sysport_get_stats(struct net_device *dev,
457 				  struct ethtool_stats *stats, u64 *data)
458 {
459 	struct bcm_sysport_priv *priv = netdev_priv(dev);
460 	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
461 	struct u64_stats_sync *syncp = &priv->syncp;
462 	struct bcm_sysport_tx_ring *ring;
463 	u64 tx_bytes = 0, tx_packets = 0;
464 	unsigned int start;
465 	int i, j;
466 
467 	if (netif_running(dev)) {
468 		bcm_sysport_update_mib_counters(priv);
469 		bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
470 		stats64->tx_bytes = tx_bytes;
471 		stats64->tx_packets = tx_packets;
472 	}
473 
474 	for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
475 		const struct bcm_sysport_stats *s;
476 		char *p;
477 
478 		s = &bcm_sysport_gstrings_stats[i];
479 		if (s->type == BCM_SYSPORT_STAT_NETDEV)
480 			p = (char *)&dev->stats;
481 		else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
482 			p = (char *)stats64;
483 		else
484 			p = (char *)priv;
485 
486 		if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
487 			continue;
488 		p += s->stat_offset;
489 
490 		if (s->stat_sizeof == sizeof(u64) &&
491 		    s->type == BCM_SYSPORT_STAT_NETDEV64) {
492 			do {
493 				start = u64_stats_fetch_begin_irq(syncp);
494 				data[i] = *(u64 *)p;
495 			} while (u64_stats_fetch_retry_irq(syncp, start));
496 		} else
497 			data[i] = *(u32 *)p;
498 		j++;
499 	}
500 
501 	/* For SYSTEMPORT Lite since we have holes in our statistics, j would
502 	 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
503 	 * needs to point to how many total statistics we have minus the
504 	 * number of per TX queue statistics
505 	 */
506 	j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
507 	    dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
508 
509 	for (i = 0; i < dev->num_tx_queues; i++) {
510 		ring = &priv->tx_rings[i];
511 		data[j] = ring->packets;
512 		j++;
513 		data[j] = ring->bytes;
514 		j++;
515 	}
516 }
517 
518 static void bcm_sysport_get_wol(struct net_device *dev,
519 				struct ethtool_wolinfo *wol)
520 {
521 	struct bcm_sysport_priv *priv = netdev_priv(dev);
522 	u32 reg;
523 
524 	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
525 	wol->wolopts = priv->wolopts;
526 
527 	if (!(priv->wolopts & WAKE_MAGICSECURE))
528 		return;
529 
530 	/* Return the programmed SecureOn password */
531 	reg = umac_readl(priv, UMAC_PSW_MS);
532 	put_unaligned_be16(reg, &wol->sopass[0]);
533 	reg = umac_readl(priv, UMAC_PSW_LS);
534 	put_unaligned_be32(reg, &wol->sopass[2]);
535 }
536 
537 static int bcm_sysport_set_wol(struct net_device *dev,
538 			       struct ethtool_wolinfo *wol)
539 {
540 	struct bcm_sysport_priv *priv = netdev_priv(dev);
541 	struct device *kdev = &priv->pdev->dev;
542 	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
543 
544 	if (!device_can_wakeup(kdev))
545 		return -ENOTSUPP;
546 
547 	if (wol->wolopts & ~supported)
548 		return -EINVAL;
549 
550 	/* Program the SecureOn password */
551 	if (wol->wolopts & WAKE_MAGICSECURE) {
552 		umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
553 			    UMAC_PSW_MS);
554 		umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
555 			    UMAC_PSW_LS);
556 	}
557 
558 	/* Flag the device and relevant IRQ as wakeup capable */
559 	if (wol->wolopts) {
560 		device_set_wakeup_enable(kdev, 1);
561 		if (priv->wol_irq_disabled)
562 			enable_irq_wake(priv->wol_irq);
563 		priv->wol_irq_disabled = 0;
564 	} else {
565 		device_set_wakeup_enable(kdev, 0);
566 		/* Avoid unbalanced disable_irq_wake calls */
567 		if (!priv->wol_irq_disabled)
568 			disable_irq_wake(priv->wol_irq);
569 		priv->wol_irq_disabled = 1;
570 	}
571 
572 	priv->wolopts = wol->wolopts;
573 
574 	return 0;
575 }
576 
577 static int bcm_sysport_get_coalesce(struct net_device *dev,
578 				    struct ethtool_coalesce *ec)
579 {
580 	struct bcm_sysport_priv *priv = netdev_priv(dev);
581 	u32 reg;
582 
583 	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
584 
585 	ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
586 	ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
587 
588 	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
589 
590 	ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
591 	ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
592 
593 	return 0;
594 }
595 
596 static int bcm_sysport_set_coalesce(struct net_device *dev,
597 				    struct ethtool_coalesce *ec)
598 {
599 	struct bcm_sysport_priv *priv = netdev_priv(dev);
600 	unsigned int i;
601 	u32 reg;
602 
603 	/* Base system clock is 125Mhz, DMA timeout is this reference clock
604 	 * divided by 1024, which yield roughly 8.192 us, our maximum value has
605 	 * to fit in the RING_TIMEOUT_MASK (16 bits).
606 	 */
607 	if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
608 	    ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
609 	    ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
610 	    ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
611 		return -EINVAL;
612 
613 	if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
614 	    (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
615 		return -EINVAL;
616 
617 	for (i = 0; i < dev->num_tx_queues; i++) {
618 		reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
619 		reg &= ~(RING_INTR_THRESH_MASK |
620 			 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
621 		reg |= ec->tx_max_coalesced_frames;
622 		reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
623 			 RING_TIMEOUT_SHIFT;
624 		tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
625 	}
626 
627 	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
628 	reg &= ~(RDMA_INTR_THRESH_MASK |
629 		 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
630 	reg |= ec->rx_max_coalesced_frames;
631 	reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
632 			    RDMA_TIMEOUT_SHIFT;
633 	rdma_writel(priv, reg, RDMA_MBDONE_INTR);
634 
635 	return 0;
636 }
637 
638 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
639 {
640 	dev_consume_skb_any(cb->skb);
641 	cb->skb = NULL;
642 	dma_unmap_addr_set(cb, dma_addr, 0);
643 }
644 
645 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
646 					     struct bcm_sysport_cb *cb)
647 {
648 	struct device *kdev = &priv->pdev->dev;
649 	struct net_device *ndev = priv->netdev;
650 	struct sk_buff *skb, *rx_skb;
651 	dma_addr_t mapping;
652 
653 	/* Allocate a new SKB for a new packet */
654 	skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
655 	if (!skb) {
656 		priv->mib.alloc_rx_buff_failed++;
657 		netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
658 		return NULL;
659 	}
660 
661 	mapping = dma_map_single(kdev, skb->data,
662 				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
663 	if (dma_mapping_error(kdev, mapping)) {
664 		priv->mib.rx_dma_failed++;
665 		dev_kfree_skb_any(skb);
666 		netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
667 		return NULL;
668 	}
669 
670 	/* Grab the current SKB on the ring */
671 	rx_skb = cb->skb;
672 	if (likely(rx_skb))
673 		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
674 				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
675 
676 	/* Put the new SKB on the ring */
677 	cb->skb = skb;
678 	dma_unmap_addr_set(cb, dma_addr, mapping);
679 	dma_desc_set_addr(priv, cb->bd_addr, mapping);
680 
681 	netif_dbg(priv, rx_status, ndev, "RX refill\n");
682 
683 	/* Return the current SKB to the caller */
684 	return rx_skb;
685 }
686 
687 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
688 {
689 	struct bcm_sysport_cb *cb;
690 	struct sk_buff *skb;
691 	unsigned int i;
692 
693 	for (i = 0; i < priv->num_rx_bds; i++) {
694 		cb = &priv->rx_cbs[i];
695 		skb = bcm_sysport_rx_refill(priv, cb);
696 		if (skb)
697 			dev_kfree_skb(skb);
698 		if (!cb->skb)
699 			return -ENOMEM;
700 	}
701 
702 	return 0;
703 }
704 
705 /* Poll the hardware for up to budget packets to process */
706 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
707 					unsigned int budget)
708 {
709 	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
710 	struct net_device *ndev = priv->netdev;
711 	unsigned int processed = 0, to_process;
712 	struct bcm_sysport_cb *cb;
713 	struct sk_buff *skb;
714 	unsigned int p_index;
715 	u16 len, status;
716 	struct bcm_rsb *rsb;
717 
718 	/* Clear status before servicing to reduce spurious interrupts */
719 	intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
720 
721 	/* Determine how much we should process since last call, SYSTEMPORT Lite
722 	 * groups the producer and consumer indexes into the same 32-bit
723 	 * which we access using RDMA_CONS_INDEX
724 	 */
725 	if (!priv->is_lite)
726 		p_index = rdma_readl(priv, RDMA_PROD_INDEX);
727 	else
728 		p_index = rdma_readl(priv, RDMA_CONS_INDEX);
729 	p_index &= RDMA_PROD_INDEX_MASK;
730 
731 	to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
732 
733 	netif_dbg(priv, rx_status, ndev,
734 		  "p_index=%d rx_c_index=%d to_process=%d\n",
735 		  p_index, priv->rx_c_index, to_process);
736 
737 	while ((processed < to_process) && (processed < budget)) {
738 		cb = &priv->rx_cbs[priv->rx_read_ptr];
739 		skb = bcm_sysport_rx_refill(priv, cb);
740 
741 
742 		/* We do not have a backing SKB, so we do not a corresponding
743 		 * DMA mapping for this incoming packet since
744 		 * bcm_sysport_rx_refill always either has both skb and mapping
745 		 * or none.
746 		 */
747 		if (unlikely(!skb)) {
748 			netif_err(priv, rx_err, ndev, "out of memory!\n");
749 			ndev->stats.rx_dropped++;
750 			ndev->stats.rx_errors++;
751 			goto next;
752 		}
753 
754 		/* Extract the Receive Status Block prepended */
755 		rsb = (struct bcm_rsb *)skb->data;
756 		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
757 		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
758 			  DESC_STATUS_MASK;
759 
760 		netif_dbg(priv, rx_status, ndev,
761 			  "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
762 			  p_index, priv->rx_c_index, priv->rx_read_ptr,
763 			  len, status);
764 
765 		if (unlikely(len > RX_BUF_LENGTH)) {
766 			netif_err(priv, rx_status, ndev, "oversized packet\n");
767 			ndev->stats.rx_length_errors++;
768 			ndev->stats.rx_errors++;
769 			dev_kfree_skb_any(skb);
770 			goto next;
771 		}
772 
773 		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
774 			netif_err(priv, rx_status, ndev, "fragmented packet!\n");
775 			ndev->stats.rx_dropped++;
776 			ndev->stats.rx_errors++;
777 			dev_kfree_skb_any(skb);
778 			goto next;
779 		}
780 
781 		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
782 			netif_err(priv, rx_err, ndev, "error packet\n");
783 			if (status & RX_STATUS_OVFLOW)
784 				ndev->stats.rx_over_errors++;
785 			ndev->stats.rx_dropped++;
786 			ndev->stats.rx_errors++;
787 			dev_kfree_skb_any(skb);
788 			goto next;
789 		}
790 
791 		skb_put(skb, len);
792 
793 		/* Hardware validated our checksum */
794 		if (likely(status & DESC_L4_CSUM))
795 			skb->ip_summed = CHECKSUM_UNNECESSARY;
796 
797 		/* Hardware pre-pends packets with 2bytes before Ethernet
798 		 * header plus we have the Receive Status Block, strip off all
799 		 * of this from the SKB.
800 		 */
801 		skb_pull(skb, sizeof(*rsb) + 2);
802 		len -= (sizeof(*rsb) + 2);
803 
804 		/* UniMAC may forward CRC */
805 		if (priv->crc_fwd) {
806 			skb_trim(skb, len - ETH_FCS_LEN);
807 			len -= ETH_FCS_LEN;
808 		}
809 
810 		skb->protocol = eth_type_trans(skb, ndev);
811 		ndev->stats.rx_packets++;
812 		ndev->stats.rx_bytes += len;
813 		u64_stats_update_begin(&priv->syncp);
814 		stats64->rx_packets++;
815 		stats64->rx_bytes += len;
816 		u64_stats_update_end(&priv->syncp);
817 
818 		napi_gro_receive(&priv->napi, skb);
819 next:
820 		processed++;
821 		priv->rx_read_ptr++;
822 
823 		if (priv->rx_read_ptr == priv->num_rx_bds)
824 			priv->rx_read_ptr = 0;
825 	}
826 
827 	return processed;
828 }
829 
830 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
831 				       struct bcm_sysport_cb *cb,
832 				       unsigned int *bytes_compl,
833 				       unsigned int *pkts_compl)
834 {
835 	struct bcm_sysport_priv *priv = ring->priv;
836 	struct device *kdev = &priv->pdev->dev;
837 
838 	if (cb->skb) {
839 		*bytes_compl += cb->skb->len;
840 		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
841 				 dma_unmap_len(cb, dma_len),
842 				 DMA_TO_DEVICE);
843 		(*pkts_compl)++;
844 		bcm_sysport_free_cb(cb);
845 	/* SKB fragment */
846 	} else if (dma_unmap_addr(cb, dma_addr)) {
847 		*bytes_compl += dma_unmap_len(cb, dma_len);
848 		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
849 			       dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
850 		dma_unmap_addr_set(cb, dma_addr, 0);
851 	}
852 }
853 
854 /* Reclaim queued SKBs for transmission completion, lockless version */
855 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
856 					     struct bcm_sysport_tx_ring *ring)
857 {
858 	unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
859 	unsigned int pkts_compl = 0, bytes_compl = 0;
860 	struct net_device *ndev = priv->netdev;
861 	struct bcm_sysport_cb *cb;
862 	u32 hw_ind;
863 
864 	/* Clear status before servicing to reduce spurious interrupts */
865 	if (!ring->priv->is_lite)
866 		intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
867 	else
868 		intrl2_0_writel(ring->priv, BIT(ring->index +
869 				INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
870 
871 	/* Compute how many descriptors have been processed since last call */
872 	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
873 	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
874 	ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
875 
876 	last_c_index = ring->c_index;
877 	num_tx_cbs = ring->size;
878 
879 	c_index &= (num_tx_cbs - 1);
880 
881 	if (c_index >= last_c_index)
882 		last_tx_cn = c_index - last_c_index;
883 	else
884 		last_tx_cn = num_tx_cbs - last_c_index + c_index;
885 
886 	netif_dbg(priv, tx_done, ndev,
887 		  "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
888 		  ring->index, c_index, last_tx_cn, last_c_index);
889 
890 	while (last_tx_cn-- > 0) {
891 		cb = ring->cbs + last_c_index;
892 		bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
893 
894 		ring->desc_count++;
895 		last_c_index++;
896 		last_c_index &= (num_tx_cbs - 1);
897 	}
898 
899 	u64_stats_update_begin(&priv->syncp);
900 	ring->packets += pkts_compl;
901 	ring->bytes += bytes_compl;
902 	u64_stats_update_end(&priv->syncp);
903 
904 	ring->c_index = c_index;
905 
906 	netif_dbg(priv, tx_done, ndev,
907 		  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
908 		  ring->index, ring->c_index, pkts_compl, bytes_compl);
909 
910 	return pkts_compl;
911 }
912 
913 /* Locked version of the per-ring TX reclaim routine */
914 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
915 					   struct bcm_sysport_tx_ring *ring)
916 {
917 	struct netdev_queue *txq;
918 	unsigned int released;
919 	unsigned long flags;
920 
921 	txq = netdev_get_tx_queue(priv->netdev, ring->index);
922 
923 	spin_lock_irqsave(&ring->lock, flags);
924 	released = __bcm_sysport_tx_reclaim(priv, ring);
925 	if (released)
926 		netif_tx_wake_queue(txq);
927 
928 	spin_unlock_irqrestore(&ring->lock, flags);
929 
930 	return released;
931 }
932 
933 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
934 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
935 				 struct bcm_sysport_tx_ring *ring)
936 {
937 	unsigned long flags;
938 
939 	spin_lock_irqsave(&ring->lock, flags);
940 	__bcm_sysport_tx_reclaim(priv, ring);
941 	spin_unlock_irqrestore(&ring->lock, flags);
942 }
943 
944 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
945 {
946 	struct bcm_sysport_tx_ring *ring =
947 		container_of(napi, struct bcm_sysport_tx_ring, napi);
948 	unsigned int work_done = 0;
949 
950 	work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
951 
952 	if (work_done == 0) {
953 		napi_complete(napi);
954 		/* re-enable TX interrupt */
955 		if (!ring->priv->is_lite)
956 			intrl2_1_mask_clear(ring->priv, BIT(ring->index));
957 		else
958 			intrl2_0_mask_clear(ring->priv, BIT(ring->index +
959 					    INTRL2_0_TDMA_MBDONE_SHIFT));
960 
961 		return 0;
962 	}
963 
964 	return budget;
965 }
966 
967 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
968 {
969 	unsigned int q;
970 
971 	for (q = 0; q < priv->netdev->num_tx_queues; q++)
972 		bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
973 }
974 
975 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
976 {
977 	struct bcm_sysport_priv *priv =
978 		container_of(napi, struct bcm_sysport_priv, napi);
979 	unsigned int work_done = 0;
980 
981 	work_done = bcm_sysport_desc_rx(priv, budget);
982 
983 	priv->rx_c_index += work_done;
984 	priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
985 
986 	/* SYSTEMPORT Lite groups the producer/consumer index, producer is
987 	 * maintained by HW, but writes to it will be ignore while RDMA
988 	 * is active
989 	 */
990 	if (!priv->is_lite)
991 		rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
992 	else
993 		rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
994 
995 	if (work_done < budget) {
996 		napi_complete_done(napi, work_done);
997 		/* re-enable RX interrupts */
998 		intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
999 	}
1000 
1001 	return work_done;
1002 }
1003 
1004 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1005 {
1006 	u32 reg;
1007 
1008 	/* Stop monitoring MPD interrupt */
1009 	intrl2_0_mask_set(priv, INTRL2_0_MPD);
1010 
1011 	/* Clear the MagicPacket detection logic */
1012 	reg = umac_readl(priv, UMAC_MPD_CTRL);
1013 	reg &= ~MPD_EN;
1014 	umac_writel(priv, reg, UMAC_MPD_CTRL);
1015 
1016 	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1017 }
1018 
1019 /* RX and misc interrupt routine */
1020 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1021 {
1022 	struct net_device *dev = dev_id;
1023 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1024 	struct bcm_sysport_tx_ring *txr;
1025 	unsigned int ring, ring_bit;
1026 
1027 	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1028 			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1029 	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1030 
1031 	if (unlikely(priv->irq0_stat == 0)) {
1032 		netdev_warn(priv->netdev, "spurious RX interrupt\n");
1033 		return IRQ_NONE;
1034 	}
1035 
1036 	if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1037 		if (likely(napi_schedule_prep(&priv->napi))) {
1038 			/* disable RX interrupts */
1039 			intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1040 			__napi_schedule_irqoff(&priv->napi);
1041 		}
1042 	}
1043 
1044 	/* TX ring is full, perform a full reclaim since we do not know
1045 	 * which one would trigger this interrupt
1046 	 */
1047 	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1048 		bcm_sysport_tx_reclaim_all(priv);
1049 
1050 	if (priv->irq0_stat & INTRL2_0_MPD) {
1051 		netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
1052 		bcm_sysport_resume_from_wol(priv);
1053 	}
1054 
1055 	if (!priv->is_lite)
1056 		goto out;
1057 
1058 	for (ring = 0; ring < dev->num_tx_queues; ring++) {
1059 		ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1060 		if (!(priv->irq0_stat & ring_bit))
1061 			continue;
1062 
1063 		txr = &priv->tx_rings[ring];
1064 
1065 		if (likely(napi_schedule_prep(&txr->napi))) {
1066 			intrl2_0_mask_set(priv, ring_bit);
1067 			__napi_schedule(&txr->napi);
1068 		}
1069 	}
1070 out:
1071 	return IRQ_HANDLED;
1072 }
1073 
1074 /* TX interrupt service routine */
1075 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1076 {
1077 	struct net_device *dev = dev_id;
1078 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1079 	struct bcm_sysport_tx_ring *txr;
1080 	unsigned int ring;
1081 
1082 	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1083 				~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1084 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1085 
1086 	if (unlikely(priv->irq1_stat == 0)) {
1087 		netdev_warn(priv->netdev, "spurious TX interrupt\n");
1088 		return IRQ_NONE;
1089 	}
1090 
1091 	for (ring = 0; ring < dev->num_tx_queues; ring++) {
1092 		if (!(priv->irq1_stat & BIT(ring)))
1093 			continue;
1094 
1095 		txr = &priv->tx_rings[ring];
1096 
1097 		if (likely(napi_schedule_prep(&txr->napi))) {
1098 			intrl2_1_mask_set(priv, BIT(ring));
1099 			__napi_schedule_irqoff(&txr->napi);
1100 		}
1101 	}
1102 
1103 	return IRQ_HANDLED;
1104 }
1105 
1106 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1107 {
1108 	struct bcm_sysport_priv *priv = dev_id;
1109 
1110 	pm_wakeup_event(&priv->pdev->dev, 0);
1111 
1112 	return IRQ_HANDLED;
1113 }
1114 
1115 #ifdef CONFIG_NET_POLL_CONTROLLER
1116 static void bcm_sysport_poll_controller(struct net_device *dev)
1117 {
1118 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1119 
1120 	disable_irq(priv->irq0);
1121 	bcm_sysport_rx_isr(priv->irq0, priv);
1122 	enable_irq(priv->irq0);
1123 
1124 	if (!priv->is_lite) {
1125 		disable_irq(priv->irq1);
1126 		bcm_sysport_tx_isr(priv->irq1, priv);
1127 		enable_irq(priv->irq1);
1128 	}
1129 }
1130 #endif
1131 
1132 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1133 					      struct net_device *dev)
1134 {
1135 	struct sk_buff *nskb;
1136 	struct bcm_tsb *tsb;
1137 	u32 csum_info;
1138 	u8 ip_proto;
1139 	u16 csum_start;
1140 	u16 ip_ver;
1141 
1142 	/* Re-allocate SKB if needed */
1143 	if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1144 		nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1145 		dev_kfree_skb(skb);
1146 		if (!nskb) {
1147 			dev->stats.tx_errors++;
1148 			dev->stats.tx_dropped++;
1149 			return NULL;
1150 		}
1151 		skb = nskb;
1152 	}
1153 
1154 	tsb = skb_push(skb, sizeof(*tsb));
1155 	/* Zero-out TSB by default */
1156 	memset(tsb, 0, sizeof(*tsb));
1157 
1158 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1159 		ip_ver = htons(skb->protocol);
1160 		switch (ip_ver) {
1161 		case ETH_P_IP:
1162 			ip_proto = ip_hdr(skb)->protocol;
1163 			break;
1164 		case ETH_P_IPV6:
1165 			ip_proto = ipv6_hdr(skb)->nexthdr;
1166 			break;
1167 		default:
1168 			return skb;
1169 		}
1170 
1171 		/* Get the checksum offset and the L4 (transport) offset */
1172 		csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1173 		csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1174 		csum_info |= (csum_start << L4_PTR_SHIFT);
1175 
1176 		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1177 			csum_info |= L4_LENGTH_VALID;
1178 			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1179 				csum_info |= L4_UDP;
1180 		} else {
1181 			csum_info = 0;
1182 		}
1183 
1184 		tsb->l4_ptr_dest_map = csum_info;
1185 	}
1186 
1187 	return skb;
1188 }
1189 
1190 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1191 				    struct net_device *dev)
1192 {
1193 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1194 	struct device *kdev = &priv->pdev->dev;
1195 	struct bcm_sysport_tx_ring *ring;
1196 	struct bcm_sysport_cb *cb;
1197 	struct netdev_queue *txq;
1198 	struct dma_desc *desc;
1199 	unsigned int skb_len;
1200 	unsigned long flags;
1201 	dma_addr_t mapping;
1202 	u32 len_status;
1203 	u16 queue;
1204 	int ret;
1205 
1206 	queue = skb_get_queue_mapping(skb);
1207 	txq = netdev_get_tx_queue(dev, queue);
1208 	ring = &priv->tx_rings[queue];
1209 
1210 	/* lock against tx reclaim in BH context and TX ring full interrupt */
1211 	spin_lock_irqsave(&ring->lock, flags);
1212 	if (unlikely(ring->desc_count == 0)) {
1213 		netif_tx_stop_queue(txq);
1214 		netdev_err(dev, "queue %d awake and ring full!\n", queue);
1215 		ret = NETDEV_TX_BUSY;
1216 		goto out;
1217 	}
1218 
1219 	/* The Ethernet switch we are interfaced with needs packets to be at
1220 	 * least 64 bytes (including FCS) otherwise they will be discarded when
1221 	 * they enter the switch port logic. When Broadcom tags are enabled, we
1222 	 * need to make sure that packets are at least 68 bytes
1223 	 * (including FCS and tag) because the length verification is done after
1224 	 * the Broadcom tag is stripped off the ingress packet.
1225 	 */
1226 	if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
1227 		ret = NETDEV_TX_OK;
1228 		goto out;
1229 	}
1230 
1231 	/* Insert TSB and checksum infos */
1232 	if (priv->tsb_en) {
1233 		skb = bcm_sysport_insert_tsb(skb, dev);
1234 		if (!skb) {
1235 			ret = NETDEV_TX_OK;
1236 			goto out;
1237 		}
1238 	}
1239 
1240 	skb_len = skb->len;
1241 
1242 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1243 	if (dma_mapping_error(kdev, mapping)) {
1244 		priv->mib.tx_dma_failed++;
1245 		netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1246 			  skb->data, skb_len);
1247 		ret = NETDEV_TX_OK;
1248 		goto out;
1249 	}
1250 
1251 	/* Remember the SKB for future freeing */
1252 	cb = &ring->cbs[ring->curr_desc];
1253 	cb->skb = skb;
1254 	dma_unmap_addr_set(cb, dma_addr, mapping);
1255 	dma_unmap_len_set(cb, dma_len, skb_len);
1256 
1257 	/* Fetch a descriptor entry from our pool */
1258 	desc = ring->desc_cpu;
1259 
1260 	desc->addr_lo = lower_32_bits(mapping);
1261 	len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1262 	len_status |= (skb_len << DESC_LEN_SHIFT);
1263 	len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1264 		       DESC_STATUS_SHIFT;
1265 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1266 		len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1267 
1268 	ring->curr_desc++;
1269 	if (ring->curr_desc == ring->size)
1270 		ring->curr_desc = 0;
1271 	ring->desc_count--;
1272 
1273 	/* Ensure write completion of the descriptor status/length
1274 	 * in DRAM before the System Port WRITE_PORT register latches
1275 	 * the value
1276 	 */
1277 	wmb();
1278 	desc->addr_status_len = len_status;
1279 	wmb();
1280 
1281 	/* Write this descriptor address to the RING write port */
1282 	tdma_port_write_desc_addr(priv, desc, ring->index);
1283 
1284 	/* Check ring space and update SW control flow */
1285 	if (ring->desc_count == 0)
1286 		netif_tx_stop_queue(txq);
1287 
1288 	netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1289 		  ring->index, ring->desc_count, ring->curr_desc);
1290 
1291 	ret = NETDEV_TX_OK;
1292 out:
1293 	spin_unlock_irqrestore(&ring->lock, flags);
1294 	return ret;
1295 }
1296 
1297 static void bcm_sysport_tx_timeout(struct net_device *dev)
1298 {
1299 	netdev_warn(dev, "transmit timeout!\n");
1300 
1301 	netif_trans_update(dev);
1302 	dev->stats.tx_errors++;
1303 
1304 	netif_tx_wake_all_queues(dev);
1305 }
1306 
1307 /* phylib adjust link callback */
1308 static void bcm_sysport_adj_link(struct net_device *dev)
1309 {
1310 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1311 	struct phy_device *phydev = dev->phydev;
1312 	unsigned int changed = 0;
1313 	u32 cmd_bits = 0, reg;
1314 
1315 	if (priv->old_link != phydev->link) {
1316 		changed = 1;
1317 		priv->old_link = phydev->link;
1318 	}
1319 
1320 	if (priv->old_duplex != phydev->duplex) {
1321 		changed = 1;
1322 		priv->old_duplex = phydev->duplex;
1323 	}
1324 
1325 	if (priv->is_lite)
1326 		goto out;
1327 
1328 	switch (phydev->speed) {
1329 	case SPEED_2500:
1330 		cmd_bits = CMD_SPEED_2500;
1331 		break;
1332 	case SPEED_1000:
1333 		cmd_bits = CMD_SPEED_1000;
1334 		break;
1335 	case SPEED_100:
1336 		cmd_bits = CMD_SPEED_100;
1337 		break;
1338 	case SPEED_10:
1339 		cmd_bits = CMD_SPEED_10;
1340 		break;
1341 	default:
1342 		break;
1343 	}
1344 	cmd_bits <<= CMD_SPEED_SHIFT;
1345 
1346 	if (phydev->duplex == DUPLEX_HALF)
1347 		cmd_bits |= CMD_HD_EN;
1348 
1349 	if (priv->old_pause != phydev->pause) {
1350 		changed = 1;
1351 		priv->old_pause = phydev->pause;
1352 	}
1353 
1354 	if (!phydev->pause)
1355 		cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1356 
1357 	if (!changed)
1358 		return;
1359 
1360 	if (phydev->link) {
1361 		reg = umac_readl(priv, UMAC_CMD);
1362 		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1363 			CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1364 			CMD_TX_PAUSE_IGNORE);
1365 		reg |= cmd_bits;
1366 		umac_writel(priv, reg, UMAC_CMD);
1367 	}
1368 out:
1369 	if (changed)
1370 		phy_print_status(phydev);
1371 }
1372 
1373 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1374 				    unsigned int index)
1375 {
1376 	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1377 	struct device *kdev = &priv->pdev->dev;
1378 	size_t size;
1379 	void *p;
1380 	u32 reg;
1381 
1382 	/* Simple descriptors partitioning for now */
1383 	size = 256;
1384 
1385 	/* We just need one DMA descriptor which is DMA-able, since writing to
1386 	 * the port will allocate a new descriptor in its internal linked-list
1387 	 */
1388 	p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1389 				GFP_KERNEL);
1390 	if (!p) {
1391 		netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1392 		return -ENOMEM;
1393 	}
1394 
1395 	ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1396 	if (!ring->cbs) {
1397 		dma_free_coherent(kdev, sizeof(struct dma_desc),
1398 				  ring->desc_cpu, ring->desc_dma);
1399 		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1400 		return -ENOMEM;
1401 	}
1402 
1403 	/* Initialize SW view of the ring */
1404 	spin_lock_init(&ring->lock);
1405 	ring->priv = priv;
1406 	netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1407 	ring->index = index;
1408 	ring->size = size;
1409 	ring->alloc_size = ring->size;
1410 	ring->desc_cpu = p;
1411 	ring->desc_count = ring->size;
1412 	ring->curr_desc = 0;
1413 
1414 	/* Initialize HW ring */
1415 	tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1416 	tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1417 	tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1418 	tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1419 
1420 	/* Configure QID and port mapping */
1421 	reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1422 	reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1423 	reg |= ring->switch_queue & RING_QID_MASK;
1424 	reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1425 	tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1426 	tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1427 
1428 	/* Enable ACB algorithm 2 */
1429 	reg = tdma_readl(priv, TDMA_CONTROL);
1430 	reg |= tdma_control_bit(priv, ACB_ALGO);
1431 	tdma_writel(priv, reg, TDMA_CONTROL);
1432 
1433 	/* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1434 	 * with the original definition of ACB_ALGO
1435 	 */
1436 	reg = tdma_readl(priv, TDMA_CONTROL);
1437 	if (priv->is_lite)
1438 		reg &= ~BIT(TSB_SWAP1);
1439 	/* Set a correct TSB format based on host endian */
1440 	if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1441 		reg |= tdma_control_bit(priv, TSB_SWAP0);
1442 	else
1443 		reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1444 	tdma_writel(priv, reg, TDMA_CONTROL);
1445 
1446 	/* Program the number of descriptors as MAX_THRESHOLD and half of
1447 	 * its size for the hysteresis trigger
1448 	 */
1449 	tdma_writel(priv, ring->size |
1450 			1 << RING_HYST_THRESH_SHIFT,
1451 			TDMA_DESC_RING_MAX_HYST(index));
1452 
1453 	/* Enable the ring queue in the arbiter */
1454 	reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1455 	reg |= (1 << index);
1456 	tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1457 
1458 	napi_enable(&ring->napi);
1459 
1460 	netif_dbg(priv, hw, priv->netdev,
1461 		  "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
1462 		  ring->size, ring->desc_cpu, ring->switch_queue,
1463 		  ring->switch_port);
1464 
1465 	return 0;
1466 }
1467 
1468 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1469 				     unsigned int index)
1470 {
1471 	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1472 	struct device *kdev = &priv->pdev->dev;
1473 	u32 reg;
1474 
1475 	/* Caller should stop the TDMA engine */
1476 	reg = tdma_readl(priv, TDMA_STATUS);
1477 	if (!(reg & TDMA_DISABLED))
1478 		netdev_warn(priv->netdev, "TDMA not stopped!\n");
1479 
1480 	/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1481 	 * fail, so by checking this pointer we know whether the TX ring was
1482 	 * fully initialized or not.
1483 	 */
1484 	if (!ring->cbs)
1485 		return;
1486 
1487 	napi_disable(&ring->napi);
1488 	netif_napi_del(&ring->napi);
1489 
1490 	bcm_sysport_tx_clean(priv, ring);
1491 
1492 	kfree(ring->cbs);
1493 	ring->cbs = NULL;
1494 
1495 	if (ring->desc_dma) {
1496 		dma_free_coherent(kdev, sizeof(struct dma_desc),
1497 				  ring->desc_cpu, ring->desc_dma);
1498 		ring->desc_dma = 0;
1499 	}
1500 	ring->size = 0;
1501 	ring->alloc_size = 0;
1502 
1503 	netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1504 }
1505 
1506 /* RDMA helper */
1507 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1508 				  unsigned int enable)
1509 {
1510 	unsigned int timeout = 1000;
1511 	u32 reg;
1512 
1513 	reg = rdma_readl(priv, RDMA_CONTROL);
1514 	if (enable)
1515 		reg |= RDMA_EN;
1516 	else
1517 		reg &= ~RDMA_EN;
1518 	rdma_writel(priv, reg, RDMA_CONTROL);
1519 
1520 	/* Poll for RMDA disabling completion */
1521 	do {
1522 		reg = rdma_readl(priv, RDMA_STATUS);
1523 		if (!!(reg & RDMA_DISABLED) == !enable)
1524 			return 0;
1525 		usleep_range(1000, 2000);
1526 	} while (timeout-- > 0);
1527 
1528 	netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1529 
1530 	return -ETIMEDOUT;
1531 }
1532 
1533 /* TDMA helper */
1534 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1535 				  unsigned int enable)
1536 {
1537 	unsigned int timeout = 1000;
1538 	u32 reg;
1539 
1540 	reg = tdma_readl(priv, TDMA_CONTROL);
1541 	if (enable)
1542 		reg |= tdma_control_bit(priv, TDMA_EN);
1543 	else
1544 		reg &= ~tdma_control_bit(priv, TDMA_EN);
1545 	tdma_writel(priv, reg, TDMA_CONTROL);
1546 
1547 	/* Poll for TMDA disabling completion */
1548 	do {
1549 		reg = tdma_readl(priv, TDMA_STATUS);
1550 		if (!!(reg & TDMA_DISABLED) == !enable)
1551 			return 0;
1552 
1553 		usleep_range(1000, 2000);
1554 	} while (timeout-- > 0);
1555 
1556 	netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1557 
1558 	return -ETIMEDOUT;
1559 }
1560 
1561 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1562 {
1563 	struct bcm_sysport_cb *cb;
1564 	u32 reg;
1565 	int ret;
1566 	int i;
1567 
1568 	/* Initialize SW view of the RX ring */
1569 	priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1570 	priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1571 	priv->rx_c_index = 0;
1572 	priv->rx_read_ptr = 0;
1573 	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1574 				GFP_KERNEL);
1575 	if (!priv->rx_cbs) {
1576 		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1577 		return -ENOMEM;
1578 	}
1579 
1580 	for (i = 0; i < priv->num_rx_bds; i++) {
1581 		cb = priv->rx_cbs + i;
1582 		cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1583 	}
1584 
1585 	ret = bcm_sysport_alloc_rx_bufs(priv);
1586 	if (ret) {
1587 		netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1588 		return ret;
1589 	}
1590 
1591 	/* Initialize HW, ensure RDMA is disabled */
1592 	reg = rdma_readl(priv, RDMA_STATUS);
1593 	if (!(reg & RDMA_DISABLED))
1594 		rdma_enable_set(priv, 0);
1595 
1596 	rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1597 	rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1598 	rdma_writel(priv, 0, RDMA_PROD_INDEX);
1599 	rdma_writel(priv, 0, RDMA_CONS_INDEX);
1600 	rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1601 			  RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1602 	/* Operate the queue in ring mode */
1603 	rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1604 	rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1605 	rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1606 	rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1607 
1608 	rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1609 
1610 	netif_dbg(priv, hw, priv->netdev,
1611 		  "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1612 		  priv->num_rx_bds, priv->rx_bds);
1613 
1614 	return 0;
1615 }
1616 
1617 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1618 {
1619 	struct bcm_sysport_cb *cb;
1620 	unsigned int i;
1621 	u32 reg;
1622 
1623 	/* Caller should ensure RDMA is disabled */
1624 	reg = rdma_readl(priv, RDMA_STATUS);
1625 	if (!(reg & RDMA_DISABLED))
1626 		netdev_warn(priv->netdev, "RDMA not stopped!\n");
1627 
1628 	for (i = 0; i < priv->num_rx_bds; i++) {
1629 		cb = &priv->rx_cbs[i];
1630 		if (dma_unmap_addr(cb, dma_addr))
1631 			dma_unmap_single(&priv->pdev->dev,
1632 					 dma_unmap_addr(cb, dma_addr),
1633 					 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1634 		bcm_sysport_free_cb(cb);
1635 	}
1636 
1637 	kfree(priv->rx_cbs);
1638 	priv->rx_cbs = NULL;
1639 
1640 	netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1641 }
1642 
1643 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1644 {
1645 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1646 	u32 reg;
1647 
1648 	if (priv->is_lite)
1649 		return;
1650 
1651 	reg = umac_readl(priv, UMAC_CMD);
1652 	if (dev->flags & IFF_PROMISC)
1653 		reg |= CMD_PROMISC;
1654 	else
1655 		reg &= ~CMD_PROMISC;
1656 	umac_writel(priv, reg, UMAC_CMD);
1657 
1658 	/* No support for ALLMULTI */
1659 	if (dev->flags & IFF_ALLMULTI)
1660 		return;
1661 }
1662 
1663 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1664 				   u32 mask, unsigned int enable)
1665 {
1666 	u32 reg;
1667 
1668 	if (!priv->is_lite) {
1669 		reg = umac_readl(priv, UMAC_CMD);
1670 		if (enable)
1671 			reg |= mask;
1672 		else
1673 			reg &= ~mask;
1674 		umac_writel(priv, reg, UMAC_CMD);
1675 	} else {
1676 		reg = gib_readl(priv, GIB_CONTROL);
1677 		if (enable)
1678 			reg |= mask;
1679 		else
1680 			reg &= ~mask;
1681 		gib_writel(priv, reg, GIB_CONTROL);
1682 	}
1683 
1684 	/* UniMAC stops on a packet boundary, wait for a full-sized packet
1685 	 * to be processed (1 msec).
1686 	 */
1687 	if (enable == 0)
1688 		usleep_range(1000, 2000);
1689 }
1690 
1691 static inline void umac_reset(struct bcm_sysport_priv *priv)
1692 {
1693 	u32 reg;
1694 
1695 	if (priv->is_lite)
1696 		return;
1697 
1698 	reg = umac_readl(priv, UMAC_CMD);
1699 	reg |= CMD_SW_RESET;
1700 	umac_writel(priv, reg, UMAC_CMD);
1701 	udelay(10);
1702 	reg = umac_readl(priv, UMAC_CMD);
1703 	reg &= ~CMD_SW_RESET;
1704 	umac_writel(priv, reg, UMAC_CMD);
1705 }
1706 
1707 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1708 			     unsigned char *addr)
1709 {
1710 	u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1711 		    addr[3];
1712 	u32 mac1 = (addr[4] << 8) | addr[5];
1713 
1714 	if (!priv->is_lite) {
1715 		umac_writel(priv, mac0, UMAC_MAC0);
1716 		umac_writel(priv, mac1, UMAC_MAC1);
1717 	} else {
1718 		gib_writel(priv, mac0, GIB_MAC0);
1719 		gib_writel(priv, mac1, GIB_MAC1);
1720 	}
1721 }
1722 
1723 static void topctrl_flush(struct bcm_sysport_priv *priv)
1724 {
1725 	topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1726 	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1727 	mdelay(1);
1728 	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1729 	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1730 }
1731 
1732 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1733 {
1734 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1735 	struct sockaddr *addr = p;
1736 
1737 	if (!is_valid_ether_addr(addr->sa_data))
1738 		return -EINVAL;
1739 
1740 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1741 
1742 	/* interface is disabled, changes to MAC will be reflected on next
1743 	 * open call
1744 	 */
1745 	if (!netif_running(dev))
1746 		return 0;
1747 
1748 	umac_set_hw_addr(priv, dev->dev_addr);
1749 
1750 	return 0;
1751 }
1752 
1753 static void bcm_sysport_get_stats64(struct net_device *dev,
1754 				    struct rtnl_link_stats64 *stats)
1755 {
1756 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1757 	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1758 	unsigned int start;
1759 
1760 	netdev_stats_to_stats64(stats, &dev->stats);
1761 
1762 	bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1763 				    &stats->tx_packets);
1764 
1765 	do {
1766 		start = u64_stats_fetch_begin_irq(&priv->syncp);
1767 		stats->rx_packets = stats64->rx_packets;
1768 		stats->rx_bytes = stats64->rx_bytes;
1769 	} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1770 }
1771 
1772 static void bcm_sysport_netif_start(struct net_device *dev)
1773 {
1774 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1775 
1776 	/* Enable NAPI */
1777 	napi_enable(&priv->napi);
1778 
1779 	/* Enable RX interrupt and TX ring full interrupt */
1780 	intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1781 
1782 	phy_start(dev->phydev);
1783 
1784 	/* Enable TX interrupts for the TXQs */
1785 	if (!priv->is_lite)
1786 		intrl2_1_mask_clear(priv, 0xffffffff);
1787 	else
1788 		intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1789 
1790 	/* Last call before we start the real business */
1791 	netif_tx_start_all_queues(dev);
1792 }
1793 
1794 static void rbuf_init(struct bcm_sysport_priv *priv)
1795 {
1796 	u32 reg;
1797 
1798 	reg = rbuf_readl(priv, RBUF_CONTROL);
1799 	reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1800 	/* Set a correct RSB format on SYSTEMPORT Lite */
1801 	if (priv->is_lite)
1802 		reg &= ~RBUF_RSB_SWAP1;
1803 
1804 	/* Set a correct RSB format based on host endian */
1805 	if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1806 		reg |= RBUF_RSB_SWAP0;
1807 	else
1808 		reg &= ~RBUF_RSB_SWAP0;
1809 	rbuf_writel(priv, reg, RBUF_CONTROL);
1810 }
1811 
1812 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1813 {
1814 	intrl2_0_mask_set(priv, 0xffffffff);
1815 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1816 	if (!priv->is_lite) {
1817 		intrl2_1_mask_set(priv, 0xffffffff);
1818 		intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1819 	}
1820 }
1821 
1822 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1823 {
1824 	u32 __maybe_unused reg;
1825 
1826 	/* Include Broadcom tag in pad extension */
1827 	if (netdev_uses_dsa(priv->netdev)) {
1828 		reg = gib_readl(priv, GIB_CONTROL);
1829 		reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1830 		reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1831 		gib_writel(priv, reg, GIB_CONTROL);
1832 	}
1833 }
1834 
1835 static int bcm_sysport_open(struct net_device *dev)
1836 {
1837 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1838 	struct phy_device *phydev;
1839 	unsigned int i;
1840 	int ret;
1841 
1842 	/* Reset UniMAC */
1843 	umac_reset(priv);
1844 
1845 	/* Flush TX and RX FIFOs at TOPCTRL level */
1846 	topctrl_flush(priv);
1847 
1848 	/* Disable the UniMAC RX/TX */
1849 	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1850 
1851 	/* Enable RBUF 2bytes alignment and Receive Status Block */
1852 	rbuf_init(priv);
1853 
1854 	/* Set maximum frame length */
1855 	if (!priv->is_lite)
1856 		umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1857 	else
1858 		gib_set_pad_extension(priv);
1859 
1860 	/* Set MAC address */
1861 	umac_set_hw_addr(priv, dev->dev_addr);
1862 
1863 	/* Read CRC forward */
1864 	if (!priv->is_lite)
1865 		priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1866 	else
1867 		priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
1868 				   GIB_FCS_STRIP);
1869 
1870 	phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1871 				0, priv->phy_interface);
1872 	if (!phydev) {
1873 		netdev_err(dev, "could not attach to PHY\n");
1874 		return -ENODEV;
1875 	}
1876 
1877 	/* Reset house keeping link status */
1878 	priv->old_duplex = -1;
1879 	priv->old_link = -1;
1880 	priv->old_pause = -1;
1881 
1882 	/* mask all interrupts and request them */
1883 	bcm_sysport_mask_all_intrs(priv);
1884 
1885 	ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1886 	if (ret) {
1887 		netdev_err(dev, "failed to request RX interrupt\n");
1888 		goto out_phy_disconnect;
1889 	}
1890 
1891 	if (!priv->is_lite) {
1892 		ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
1893 				  dev->name, dev);
1894 		if (ret) {
1895 			netdev_err(dev, "failed to request TX interrupt\n");
1896 			goto out_free_irq0;
1897 		}
1898 	}
1899 
1900 	/* Initialize both hardware and software ring */
1901 	for (i = 0; i < dev->num_tx_queues; i++) {
1902 		ret = bcm_sysport_init_tx_ring(priv, i);
1903 		if (ret) {
1904 			netdev_err(dev, "failed to initialize TX ring %d\n",
1905 				   i);
1906 			goto out_free_tx_ring;
1907 		}
1908 	}
1909 
1910 	/* Initialize linked-list */
1911 	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1912 
1913 	/* Initialize RX ring */
1914 	ret = bcm_sysport_init_rx_ring(priv);
1915 	if (ret) {
1916 		netdev_err(dev, "failed to initialize RX ring\n");
1917 		goto out_free_rx_ring;
1918 	}
1919 
1920 	/* Turn on RDMA */
1921 	ret = rdma_enable_set(priv, 1);
1922 	if (ret)
1923 		goto out_free_rx_ring;
1924 
1925 	/* Turn on TDMA */
1926 	ret = tdma_enable_set(priv, 1);
1927 	if (ret)
1928 		goto out_clear_rx_int;
1929 
1930 	/* Turn on UniMAC TX/RX */
1931 	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
1932 
1933 	bcm_sysport_netif_start(dev);
1934 
1935 	return 0;
1936 
1937 out_clear_rx_int:
1938 	intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1939 out_free_rx_ring:
1940 	bcm_sysport_fini_rx_ring(priv);
1941 out_free_tx_ring:
1942 	for (i = 0; i < dev->num_tx_queues; i++)
1943 		bcm_sysport_fini_tx_ring(priv, i);
1944 	if (!priv->is_lite)
1945 		free_irq(priv->irq1, dev);
1946 out_free_irq0:
1947 	free_irq(priv->irq0, dev);
1948 out_phy_disconnect:
1949 	phy_disconnect(phydev);
1950 	return ret;
1951 }
1952 
1953 static void bcm_sysport_netif_stop(struct net_device *dev)
1954 {
1955 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1956 
1957 	/* stop all software from updating hardware */
1958 	netif_tx_stop_all_queues(dev);
1959 	napi_disable(&priv->napi);
1960 	phy_stop(dev->phydev);
1961 
1962 	/* mask all interrupts */
1963 	bcm_sysport_mask_all_intrs(priv);
1964 }
1965 
1966 static int bcm_sysport_stop(struct net_device *dev)
1967 {
1968 	struct bcm_sysport_priv *priv = netdev_priv(dev);
1969 	unsigned int i;
1970 	int ret;
1971 
1972 	bcm_sysport_netif_stop(dev);
1973 
1974 	/* Disable UniMAC RX */
1975 	umac_enable_set(priv, CMD_RX_EN, 0);
1976 
1977 	ret = tdma_enable_set(priv, 0);
1978 	if (ret) {
1979 		netdev_err(dev, "timeout disabling RDMA\n");
1980 		return ret;
1981 	}
1982 
1983 	/* Wait for a maximum packet size to be drained */
1984 	usleep_range(2000, 3000);
1985 
1986 	ret = rdma_enable_set(priv, 0);
1987 	if (ret) {
1988 		netdev_err(dev, "timeout disabling TDMA\n");
1989 		return ret;
1990 	}
1991 
1992 	/* Disable UniMAC TX */
1993 	umac_enable_set(priv, CMD_TX_EN, 0);
1994 
1995 	/* Free RX/TX rings SW structures */
1996 	for (i = 0; i < dev->num_tx_queues; i++)
1997 		bcm_sysport_fini_tx_ring(priv, i);
1998 	bcm_sysport_fini_rx_ring(priv);
1999 
2000 	free_irq(priv->irq0, dev);
2001 	if (!priv->is_lite)
2002 		free_irq(priv->irq1, dev);
2003 
2004 	/* Disconnect from PHY */
2005 	phy_disconnect(dev->phydev);
2006 
2007 	return 0;
2008 }
2009 
2010 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2011 	.get_drvinfo		= bcm_sysport_get_drvinfo,
2012 	.get_msglevel		= bcm_sysport_get_msglvl,
2013 	.set_msglevel		= bcm_sysport_set_msglvl,
2014 	.get_link		= ethtool_op_get_link,
2015 	.get_strings		= bcm_sysport_get_strings,
2016 	.get_ethtool_stats	= bcm_sysport_get_stats,
2017 	.get_sset_count		= bcm_sysport_get_sset_count,
2018 	.get_wol		= bcm_sysport_get_wol,
2019 	.set_wol		= bcm_sysport_set_wol,
2020 	.get_coalesce		= bcm_sysport_get_coalesce,
2021 	.set_coalesce		= bcm_sysport_set_coalesce,
2022 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
2023 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
2024 };
2025 
2026 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2027 				    void *accel_priv,
2028 				    select_queue_fallback_t fallback)
2029 {
2030 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2031 	u16 queue = skb_get_queue_mapping(skb);
2032 	struct bcm_sysport_tx_ring *tx_ring;
2033 	unsigned int q, port;
2034 
2035 	if (!netdev_uses_dsa(dev))
2036 		return fallback(dev, skb);
2037 
2038 	/* DSA tagging layer will have configured the correct queue */
2039 	q = BRCM_TAG_GET_QUEUE(queue);
2040 	port = BRCM_TAG_GET_PORT(queue);
2041 	tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2042 
2043 	return tx_ring->index;
2044 }
2045 
2046 static int bcm_sysport_map_queues(struct net_device *dev,
2047 				  struct dsa_notifier_register_info *info)
2048 {
2049 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2050 	struct bcm_sysport_tx_ring *ring;
2051 	struct net_device *slave_dev;
2052 	unsigned int num_tx_queues;
2053 	unsigned int q, start, port;
2054 
2055 	/* We can't be setting up queue inspection for non directly attached
2056 	 * switches
2057 	 */
2058 	if (info->switch_number)
2059 		return 0;
2060 
2061 	port = info->port_number;
2062 	slave_dev = info->info.dev;
2063 
2064 	/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2065 	 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2066 	 * per-port (slave_dev) network devices queue, we achieve just that.
2067 	 * This need to happen now before any slave network device is used such
2068 	 * it accurately reflects the number of real TX queues.
2069 	 */
2070 	if (priv->is_lite)
2071 		netif_set_real_num_tx_queues(slave_dev,
2072 					     slave_dev->num_tx_queues / 2);
2073 	num_tx_queues = slave_dev->real_num_tx_queues;
2074 
2075 	if (priv->per_port_num_tx_queues &&
2076 	    priv->per_port_num_tx_queues != num_tx_queues)
2077 		netdev_warn(slave_dev, "asymetric number of per-port queues\n");
2078 
2079 	priv->per_port_num_tx_queues = num_tx_queues;
2080 
2081 	start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
2082 	for (q = 0; q < num_tx_queues; q++) {
2083 		ring = &priv->tx_rings[q + start];
2084 
2085 		/* Just remember the mapping actual programming done
2086 		 * during bcm_sysport_init_tx_ring
2087 		 */
2088 		ring->switch_queue = q;
2089 		ring->switch_port = port;
2090 		priv->ring_map[q + port * num_tx_queues] = ring;
2091 
2092 		/* Set all queues as being used now */
2093 		set_bit(q + start, &priv->queue_bitmap);
2094 	}
2095 
2096 	return 0;
2097 }
2098 
2099 static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
2100 				    unsigned long event, void *ptr)
2101 {
2102 	struct dsa_notifier_register_info *info;
2103 
2104 	if (event != DSA_PORT_REGISTER)
2105 		return NOTIFY_DONE;
2106 
2107 	info = ptr;
2108 
2109 	return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
2110 }
2111 
2112 static const struct net_device_ops bcm_sysport_netdev_ops = {
2113 	.ndo_start_xmit		= bcm_sysport_xmit,
2114 	.ndo_tx_timeout		= bcm_sysport_tx_timeout,
2115 	.ndo_open		= bcm_sysport_open,
2116 	.ndo_stop		= bcm_sysport_stop,
2117 	.ndo_set_features	= bcm_sysport_set_features,
2118 	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode,
2119 	.ndo_set_mac_address	= bcm_sysport_change_mac,
2120 #ifdef CONFIG_NET_POLL_CONTROLLER
2121 	.ndo_poll_controller	= bcm_sysport_poll_controller,
2122 #endif
2123 	.ndo_get_stats64	= bcm_sysport_get_stats64,
2124 	.ndo_select_queue	= bcm_sysport_select_queue,
2125 };
2126 
2127 #define REV_FMT	"v%2x.%02x"
2128 
2129 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2130 	[SYSTEMPORT] = {
2131 		.is_lite = false,
2132 		.num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2133 	},
2134 	[SYSTEMPORT_LITE] = {
2135 		.is_lite = true,
2136 		.num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2137 	},
2138 };
2139 
2140 static const struct of_device_id bcm_sysport_of_match[] = {
2141 	{ .compatible = "brcm,systemportlite-v1.00",
2142 	  .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2143 	{ .compatible = "brcm,systemport-v1.00",
2144 	  .data = &bcm_sysport_params[SYSTEMPORT] },
2145 	{ .compatible = "brcm,systemport",
2146 	  .data = &bcm_sysport_params[SYSTEMPORT] },
2147 	{ /* sentinel */ }
2148 };
2149 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2150 
2151 static int bcm_sysport_probe(struct platform_device *pdev)
2152 {
2153 	const struct bcm_sysport_hw_params *params;
2154 	const struct of_device_id *of_id = NULL;
2155 	struct bcm_sysport_priv *priv;
2156 	struct device_node *dn;
2157 	struct net_device *dev;
2158 	const void *macaddr;
2159 	struct resource *r;
2160 	u32 txq, rxq;
2161 	int ret;
2162 
2163 	dn = pdev->dev.of_node;
2164 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2165 	of_id = of_match_node(bcm_sysport_of_match, dn);
2166 	if (!of_id || !of_id->data)
2167 		return -EINVAL;
2168 
2169 	/* Fairly quickly we need to know the type of adapter we have */
2170 	params = of_id->data;
2171 
2172 	/* Read the Transmit/Receive Queue properties */
2173 	if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2174 		txq = TDMA_NUM_RINGS;
2175 	if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2176 		rxq = 1;
2177 
2178 	/* Sanity check the number of transmit queues */
2179 	if (!txq || txq > TDMA_NUM_RINGS)
2180 		return -EINVAL;
2181 
2182 	dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2183 	if (!dev)
2184 		return -ENOMEM;
2185 
2186 	/* Initialize private members */
2187 	priv = netdev_priv(dev);
2188 
2189 	/* Allocate number of TX rings */
2190 	priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2191 				      sizeof(struct bcm_sysport_tx_ring),
2192 				      GFP_KERNEL);
2193 	if (!priv->tx_rings)
2194 		return -ENOMEM;
2195 
2196 	priv->is_lite = params->is_lite;
2197 	priv->num_rx_desc_words = params->num_rx_desc_words;
2198 
2199 	priv->irq0 = platform_get_irq(pdev, 0);
2200 	if (!priv->is_lite) {
2201 		priv->irq1 = platform_get_irq(pdev, 1);
2202 		priv->wol_irq = platform_get_irq(pdev, 2);
2203 	} else {
2204 		priv->wol_irq = platform_get_irq(pdev, 1);
2205 	}
2206 	if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2207 		dev_err(&pdev->dev, "invalid interrupts\n");
2208 		ret = -EINVAL;
2209 		goto err_free_netdev;
2210 	}
2211 
2212 	priv->base = devm_ioremap_resource(&pdev->dev, r);
2213 	if (IS_ERR(priv->base)) {
2214 		ret = PTR_ERR(priv->base);
2215 		goto err_free_netdev;
2216 	}
2217 
2218 	priv->netdev = dev;
2219 	priv->pdev = pdev;
2220 
2221 	priv->phy_interface = of_get_phy_mode(dn);
2222 	/* Default to GMII interface mode */
2223 	if (priv->phy_interface < 0)
2224 		priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2225 
2226 	/* In the case of a fixed PHY, the DT node associated
2227 	 * to the PHY is the Ethernet MAC DT node.
2228 	 */
2229 	if (of_phy_is_fixed_link(dn)) {
2230 		ret = of_phy_register_fixed_link(dn);
2231 		if (ret) {
2232 			dev_err(&pdev->dev, "failed to register fixed PHY\n");
2233 			goto err_free_netdev;
2234 		}
2235 
2236 		priv->phy_dn = dn;
2237 	}
2238 
2239 	/* Initialize netdevice members */
2240 	macaddr = of_get_mac_address(dn);
2241 	if (!macaddr || !is_valid_ether_addr(macaddr)) {
2242 		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2243 		eth_hw_addr_random(dev);
2244 	} else {
2245 		ether_addr_copy(dev->dev_addr, macaddr);
2246 	}
2247 
2248 	SET_NETDEV_DEV(dev, &pdev->dev);
2249 	dev_set_drvdata(&pdev->dev, dev);
2250 	dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2251 	dev->netdev_ops = &bcm_sysport_netdev_ops;
2252 	netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2253 
2254 	/* HW supported features, none enabled by default */
2255 	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2256 				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2257 
2258 	/* Request the WOL interrupt and advertise suspend if available */
2259 	priv->wol_irq_disabled = 1;
2260 	ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2261 			       bcm_sysport_wol_isr, 0, dev->name, priv);
2262 	if (!ret)
2263 		device_set_wakeup_capable(&pdev->dev, 1);
2264 
2265 	/* Set the needed headroom once and for all */
2266 	BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2267 	dev->needed_headroom += sizeof(struct bcm_tsb);
2268 
2269 	/* libphy will adjust the link state accordingly */
2270 	netif_carrier_off(dev);
2271 
2272 	u64_stats_init(&priv->syncp);
2273 
2274 	priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
2275 
2276 	ret = register_dsa_notifier(&priv->dsa_notifier);
2277 	if (ret) {
2278 		dev_err(&pdev->dev, "failed to register DSA notifier\n");
2279 		goto err_deregister_fixed_link;
2280 	}
2281 
2282 	ret = register_netdev(dev);
2283 	if (ret) {
2284 		dev_err(&pdev->dev, "failed to register net_device\n");
2285 		goto err_deregister_notifier;
2286 	}
2287 
2288 	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2289 	dev_info(&pdev->dev,
2290 		 "Broadcom SYSTEMPORT%s" REV_FMT
2291 		 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2292 		 priv->is_lite ? " Lite" : "",
2293 		 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2294 		 priv->base, priv->irq0, priv->irq1, txq, rxq);
2295 
2296 	return 0;
2297 
2298 err_deregister_notifier:
2299 	unregister_dsa_notifier(&priv->dsa_notifier);
2300 err_deregister_fixed_link:
2301 	if (of_phy_is_fixed_link(dn))
2302 		of_phy_deregister_fixed_link(dn);
2303 err_free_netdev:
2304 	free_netdev(dev);
2305 	return ret;
2306 }
2307 
2308 static int bcm_sysport_remove(struct platform_device *pdev)
2309 {
2310 	struct net_device *dev = dev_get_drvdata(&pdev->dev);
2311 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2312 	struct device_node *dn = pdev->dev.of_node;
2313 
2314 	/* Not much to do, ndo_close has been called
2315 	 * and we use managed allocations
2316 	 */
2317 	unregister_dsa_notifier(&priv->dsa_notifier);
2318 	unregister_netdev(dev);
2319 	if (of_phy_is_fixed_link(dn))
2320 		of_phy_deregister_fixed_link(dn);
2321 	free_netdev(dev);
2322 	dev_set_drvdata(&pdev->dev, NULL);
2323 
2324 	return 0;
2325 }
2326 
2327 #ifdef CONFIG_PM_SLEEP
2328 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2329 {
2330 	struct net_device *ndev = priv->netdev;
2331 	unsigned int timeout = 1000;
2332 	u32 reg;
2333 
2334 	/* Password has already been programmed */
2335 	reg = umac_readl(priv, UMAC_MPD_CTRL);
2336 	reg |= MPD_EN;
2337 	reg &= ~PSW_EN;
2338 	if (priv->wolopts & WAKE_MAGICSECURE)
2339 		reg |= PSW_EN;
2340 	umac_writel(priv, reg, UMAC_MPD_CTRL);
2341 
2342 	/* Make sure RBUF entered WoL mode as result */
2343 	do {
2344 		reg = rbuf_readl(priv, RBUF_STATUS);
2345 		if (reg & RBUF_WOL_MODE)
2346 			break;
2347 
2348 		udelay(10);
2349 	} while (timeout-- > 0);
2350 
2351 	/* Do not leave the UniMAC RBUF matching only MPD packets */
2352 	if (!timeout) {
2353 		reg = umac_readl(priv, UMAC_MPD_CTRL);
2354 		reg &= ~MPD_EN;
2355 		umac_writel(priv, reg, UMAC_MPD_CTRL);
2356 		netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2357 		return -ETIMEDOUT;
2358 	}
2359 
2360 	/* UniMAC receive needs to be turned on */
2361 	umac_enable_set(priv, CMD_RX_EN, 1);
2362 
2363 	/* Enable the interrupt wake-up source */
2364 	intrl2_0_mask_clear(priv, INTRL2_0_MPD);
2365 
2366 	netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2367 
2368 	return 0;
2369 }
2370 
2371 static int bcm_sysport_suspend(struct device *d)
2372 {
2373 	struct net_device *dev = dev_get_drvdata(d);
2374 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2375 	unsigned int i;
2376 	int ret = 0;
2377 	u32 reg;
2378 
2379 	if (!netif_running(dev))
2380 		return 0;
2381 
2382 	bcm_sysport_netif_stop(dev);
2383 
2384 	phy_suspend(dev->phydev);
2385 
2386 	netif_device_detach(dev);
2387 
2388 	/* Disable UniMAC RX */
2389 	umac_enable_set(priv, CMD_RX_EN, 0);
2390 
2391 	ret = rdma_enable_set(priv, 0);
2392 	if (ret) {
2393 		netdev_err(dev, "RDMA timeout!\n");
2394 		return ret;
2395 	}
2396 
2397 	/* Disable RXCHK if enabled */
2398 	if (priv->rx_chk_en) {
2399 		reg = rxchk_readl(priv, RXCHK_CONTROL);
2400 		reg &= ~RXCHK_EN;
2401 		rxchk_writel(priv, reg, RXCHK_CONTROL);
2402 	}
2403 
2404 	/* Flush RX pipe */
2405 	if (!priv->wolopts)
2406 		topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2407 
2408 	ret = tdma_enable_set(priv, 0);
2409 	if (ret) {
2410 		netdev_err(dev, "TDMA timeout!\n");
2411 		return ret;
2412 	}
2413 
2414 	/* Wait for a packet boundary */
2415 	usleep_range(2000, 3000);
2416 
2417 	umac_enable_set(priv, CMD_TX_EN, 0);
2418 
2419 	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2420 
2421 	/* Free RX/TX rings SW structures */
2422 	for (i = 0; i < dev->num_tx_queues; i++)
2423 		bcm_sysport_fini_tx_ring(priv, i);
2424 	bcm_sysport_fini_rx_ring(priv);
2425 
2426 	/* Get prepared for Wake-on-LAN */
2427 	if (device_may_wakeup(d) && priv->wolopts)
2428 		ret = bcm_sysport_suspend_to_wol(priv);
2429 
2430 	return ret;
2431 }
2432 
2433 static int bcm_sysport_resume(struct device *d)
2434 {
2435 	struct net_device *dev = dev_get_drvdata(d);
2436 	struct bcm_sysport_priv *priv = netdev_priv(dev);
2437 	unsigned int i;
2438 	u32 reg;
2439 	int ret;
2440 
2441 	if (!netif_running(dev))
2442 		return 0;
2443 
2444 	umac_reset(priv);
2445 
2446 	/* We may have been suspended and never received a WOL event that
2447 	 * would turn off MPD detection, take care of that now
2448 	 */
2449 	bcm_sysport_resume_from_wol(priv);
2450 
2451 	/* Initialize both hardware and software ring */
2452 	for (i = 0; i < dev->num_tx_queues; i++) {
2453 		ret = bcm_sysport_init_tx_ring(priv, i);
2454 		if (ret) {
2455 			netdev_err(dev, "failed to initialize TX ring %d\n",
2456 				   i);
2457 			goto out_free_tx_rings;
2458 		}
2459 	}
2460 
2461 	/* Initialize linked-list */
2462 	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2463 
2464 	/* Initialize RX ring */
2465 	ret = bcm_sysport_init_rx_ring(priv);
2466 	if (ret) {
2467 		netdev_err(dev, "failed to initialize RX ring\n");
2468 		goto out_free_rx_ring;
2469 	}
2470 
2471 	netif_device_attach(dev);
2472 
2473 	/* RX pipe enable */
2474 	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2475 
2476 	ret = rdma_enable_set(priv, 1);
2477 	if (ret) {
2478 		netdev_err(dev, "failed to enable RDMA\n");
2479 		goto out_free_rx_ring;
2480 	}
2481 
2482 	/* Enable rxhck */
2483 	if (priv->rx_chk_en) {
2484 		reg = rxchk_readl(priv, RXCHK_CONTROL);
2485 		reg |= RXCHK_EN;
2486 		rxchk_writel(priv, reg, RXCHK_CONTROL);
2487 	}
2488 
2489 	rbuf_init(priv);
2490 
2491 	/* Set maximum frame length */
2492 	if (!priv->is_lite)
2493 		umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2494 	else
2495 		gib_set_pad_extension(priv);
2496 
2497 	/* Set MAC address */
2498 	umac_set_hw_addr(priv, dev->dev_addr);
2499 
2500 	umac_enable_set(priv, CMD_RX_EN, 1);
2501 
2502 	/* TX pipe enable */
2503 	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2504 
2505 	umac_enable_set(priv, CMD_TX_EN, 1);
2506 
2507 	ret = tdma_enable_set(priv, 1);
2508 	if (ret) {
2509 		netdev_err(dev, "TDMA timeout!\n");
2510 		goto out_free_rx_ring;
2511 	}
2512 
2513 	phy_resume(dev->phydev);
2514 
2515 	bcm_sysport_netif_start(dev);
2516 
2517 	return 0;
2518 
2519 out_free_rx_ring:
2520 	bcm_sysport_fini_rx_ring(priv);
2521 out_free_tx_rings:
2522 	for (i = 0; i < dev->num_tx_queues; i++)
2523 		bcm_sysport_fini_tx_ring(priv, i);
2524 	return ret;
2525 }
2526 #endif
2527 
2528 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2529 		bcm_sysport_suspend, bcm_sysport_resume);
2530 
2531 static struct platform_driver bcm_sysport_driver = {
2532 	.probe	= bcm_sysport_probe,
2533 	.remove	= bcm_sysport_remove,
2534 	.driver =  {
2535 		.name = "brcm-systemport",
2536 		.of_match_table = bcm_sysport_of_match,
2537 		.pm = &bcm_sysport_pm_ops,
2538 	},
2539 };
2540 module_platform_driver(bcm_sysport_driver);
2541 
2542 MODULE_AUTHOR("Broadcom Corporation");
2543 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2544 MODULE_ALIAS("platform:brcm-systemport");
2545 MODULE_LICENSE("GPL");
2546