xref: /linux/drivers/net/ethernet/broadcom/genet/bcmgenet.c (revision 7edbb0d389ccad68a75a2dcdbeb682014f1ccffe)
1 /*
2  * Broadcom GENET (Gigabit Ethernet) controller driver
3  *
4  * Copyright (c) 2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #define pr_fmt(fmt)				"bcmgenet: " fmt
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pm.h>
27 #include <linux/clk.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
33 #include <net/arp.h>
34 
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/in.h>
42 #include <linux/ip.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
46 
47 #include <asm/unaligned.h>
48 
49 #include "bcmgenet.h"
50 
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT	4
53 
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY	0
56 
57 #define GENET_Q16_RX_BD_CNT	\
58 	(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT	\
60 	(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
61 
62 #define RX_BUF_LENGTH		2048
63 #define SKB_ALIGNMENT		32
64 
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p)		(p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE		(WORDS_PER_BD(priv) * sizeof(u32))
68 
69 #define GENET_TDMA_REG_OFF	(priv->hw_params->tdma_offset + \
70 				TOTAL_DESC * DMA_DESC_SIZE)
71 
72 #define GENET_RDMA_REG_OFF	(priv->hw_params->rdma_offset + \
73 				TOTAL_DESC * DMA_DESC_SIZE)
74 
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
76 					     void __iomem *d, u32 value)
77 {
78 	__raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
79 }
80 
81 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
82 					    void __iomem *d)
83 {
84 	return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
85 }
86 
87 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
88 				    void __iomem *d,
89 				    dma_addr_t addr)
90 {
91 	__raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
92 
93 	/* Register writes to GISB bus can take couple hundred nanoseconds
94 	 * and are done for each packet, save these expensive writes unless
95 	 * the platform is explicitly configured for 64-bits/LPAE.
96 	 */
97 #ifdef CONFIG_PHYS_ADDR_T_64BIT
98 	if (priv->hw_params->flags & GENET_HAS_40BITS)
99 		__raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
100 #endif
101 }
102 
103 /* Combined address + length/status setter */
104 static inline void dmadesc_set(struct bcmgenet_priv *priv,
105 			       void __iomem *d, dma_addr_t addr, u32 val)
106 {
107 	dmadesc_set_length_status(priv, d, val);
108 	dmadesc_set_addr(priv, d, addr);
109 }
110 
111 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
112 					  void __iomem *d)
113 {
114 	dma_addr_t addr;
115 
116 	addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
117 
118 	/* Register writes to GISB bus can take couple hundred nanoseconds
119 	 * and are done for each packet, save these expensive writes unless
120 	 * the platform is explicitly configured for 64-bits/LPAE.
121 	 */
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123 	if (priv->hw_params->flags & GENET_HAS_40BITS)
124 		addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
125 #endif
126 	return addr;
127 }
128 
129 #define GENET_VER_FMT	"%1d.%1d EPHY: 0x%04x"
130 
131 #define GENET_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
132 				NETIF_MSG_LINK)
133 
134 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
135 {
136 	if (GENET_IS_V1(priv))
137 		return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
138 	else
139 		return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
140 }
141 
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
143 {
144 	if (GENET_IS_V1(priv))
145 		bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
146 	else
147 		bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
148 }
149 
150 /* These macros are defined to deal with register map change
151  * between GENET1.1 and GENET2. Only those currently being used
152  * by driver are defined.
153  */
154 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
155 {
156 	if (GENET_IS_V1(priv))
157 		return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
158 	else
159 		return __raw_readl(priv->base +
160 				priv->hw_params->tbuf_offset + TBUF_CTRL);
161 }
162 
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
164 {
165 	if (GENET_IS_V1(priv))
166 		bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
167 	else
168 		__raw_writel(val, priv->base +
169 				priv->hw_params->tbuf_offset + TBUF_CTRL);
170 }
171 
172 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
173 {
174 	if (GENET_IS_V1(priv))
175 		return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
176 	else
177 		return __raw_readl(priv->base +
178 				priv->hw_params->tbuf_offset + TBUF_BP_MC);
179 }
180 
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
182 {
183 	if (GENET_IS_V1(priv))
184 		bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
185 	else
186 		__raw_writel(val, priv->base +
187 				priv->hw_params->tbuf_offset + TBUF_BP_MC);
188 }
189 
190 /* RX/TX DMA register accessors */
191 enum dma_reg {
192 	DMA_RING_CFG = 0,
193 	DMA_CTRL,
194 	DMA_STATUS,
195 	DMA_SCB_BURST_SIZE,
196 	DMA_ARB_CTRL,
197 	DMA_PRIORITY_0,
198 	DMA_PRIORITY_1,
199 	DMA_PRIORITY_2,
200 	DMA_INDEX2RING_0,
201 	DMA_INDEX2RING_1,
202 	DMA_INDEX2RING_2,
203 	DMA_INDEX2RING_3,
204 	DMA_INDEX2RING_4,
205 	DMA_INDEX2RING_5,
206 	DMA_INDEX2RING_6,
207 	DMA_INDEX2RING_7,
208 	DMA_RING0_TIMEOUT,
209 	DMA_RING1_TIMEOUT,
210 	DMA_RING2_TIMEOUT,
211 	DMA_RING3_TIMEOUT,
212 	DMA_RING4_TIMEOUT,
213 	DMA_RING5_TIMEOUT,
214 	DMA_RING6_TIMEOUT,
215 	DMA_RING7_TIMEOUT,
216 	DMA_RING8_TIMEOUT,
217 	DMA_RING9_TIMEOUT,
218 	DMA_RING10_TIMEOUT,
219 	DMA_RING11_TIMEOUT,
220 	DMA_RING12_TIMEOUT,
221 	DMA_RING13_TIMEOUT,
222 	DMA_RING14_TIMEOUT,
223 	DMA_RING15_TIMEOUT,
224 	DMA_RING16_TIMEOUT,
225 };
226 
227 static const u8 bcmgenet_dma_regs_v3plus[] = {
228 	[DMA_RING_CFG]		= 0x00,
229 	[DMA_CTRL]		= 0x04,
230 	[DMA_STATUS]		= 0x08,
231 	[DMA_SCB_BURST_SIZE]	= 0x0C,
232 	[DMA_ARB_CTRL]		= 0x2C,
233 	[DMA_PRIORITY_0]	= 0x30,
234 	[DMA_PRIORITY_1]	= 0x34,
235 	[DMA_PRIORITY_2]	= 0x38,
236 	[DMA_RING0_TIMEOUT]	= 0x2C,
237 	[DMA_RING1_TIMEOUT]	= 0x30,
238 	[DMA_RING2_TIMEOUT]	= 0x34,
239 	[DMA_RING3_TIMEOUT]	= 0x38,
240 	[DMA_RING4_TIMEOUT]	= 0x3c,
241 	[DMA_RING5_TIMEOUT]	= 0x40,
242 	[DMA_RING6_TIMEOUT]	= 0x44,
243 	[DMA_RING7_TIMEOUT]	= 0x48,
244 	[DMA_RING8_TIMEOUT]	= 0x4c,
245 	[DMA_RING9_TIMEOUT]	= 0x50,
246 	[DMA_RING10_TIMEOUT]	= 0x54,
247 	[DMA_RING11_TIMEOUT]	= 0x58,
248 	[DMA_RING12_TIMEOUT]	= 0x5c,
249 	[DMA_RING13_TIMEOUT]	= 0x60,
250 	[DMA_RING14_TIMEOUT]	= 0x64,
251 	[DMA_RING15_TIMEOUT]	= 0x68,
252 	[DMA_RING16_TIMEOUT]	= 0x6C,
253 	[DMA_INDEX2RING_0]	= 0x70,
254 	[DMA_INDEX2RING_1]	= 0x74,
255 	[DMA_INDEX2RING_2]	= 0x78,
256 	[DMA_INDEX2RING_3]	= 0x7C,
257 	[DMA_INDEX2RING_4]	= 0x80,
258 	[DMA_INDEX2RING_5]	= 0x84,
259 	[DMA_INDEX2RING_6]	= 0x88,
260 	[DMA_INDEX2RING_7]	= 0x8C,
261 };
262 
263 static const u8 bcmgenet_dma_regs_v2[] = {
264 	[DMA_RING_CFG]		= 0x00,
265 	[DMA_CTRL]		= 0x04,
266 	[DMA_STATUS]		= 0x08,
267 	[DMA_SCB_BURST_SIZE]	= 0x0C,
268 	[DMA_ARB_CTRL]		= 0x30,
269 	[DMA_PRIORITY_0]	= 0x34,
270 	[DMA_PRIORITY_1]	= 0x38,
271 	[DMA_PRIORITY_2]	= 0x3C,
272 	[DMA_RING0_TIMEOUT]	= 0x2C,
273 	[DMA_RING1_TIMEOUT]	= 0x30,
274 	[DMA_RING2_TIMEOUT]	= 0x34,
275 	[DMA_RING3_TIMEOUT]	= 0x38,
276 	[DMA_RING4_TIMEOUT]	= 0x3c,
277 	[DMA_RING5_TIMEOUT]	= 0x40,
278 	[DMA_RING6_TIMEOUT]	= 0x44,
279 	[DMA_RING7_TIMEOUT]	= 0x48,
280 	[DMA_RING8_TIMEOUT]	= 0x4c,
281 	[DMA_RING9_TIMEOUT]	= 0x50,
282 	[DMA_RING10_TIMEOUT]	= 0x54,
283 	[DMA_RING11_TIMEOUT]	= 0x58,
284 	[DMA_RING12_TIMEOUT]	= 0x5c,
285 	[DMA_RING13_TIMEOUT]	= 0x60,
286 	[DMA_RING14_TIMEOUT]	= 0x64,
287 	[DMA_RING15_TIMEOUT]	= 0x68,
288 	[DMA_RING16_TIMEOUT]	= 0x6C,
289 };
290 
291 static const u8 bcmgenet_dma_regs_v1[] = {
292 	[DMA_CTRL]		= 0x00,
293 	[DMA_STATUS]		= 0x04,
294 	[DMA_SCB_BURST_SIZE]	= 0x0C,
295 	[DMA_ARB_CTRL]		= 0x30,
296 	[DMA_PRIORITY_0]	= 0x34,
297 	[DMA_PRIORITY_1]	= 0x38,
298 	[DMA_PRIORITY_2]	= 0x3C,
299 	[DMA_RING0_TIMEOUT]	= 0x2C,
300 	[DMA_RING1_TIMEOUT]	= 0x30,
301 	[DMA_RING2_TIMEOUT]	= 0x34,
302 	[DMA_RING3_TIMEOUT]	= 0x38,
303 	[DMA_RING4_TIMEOUT]	= 0x3c,
304 	[DMA_RING5_TIMEOUT]	= 0x40,
305 	[DMA_RING6_TIMEOUT]	= 0x44,
306 	[DMA_RING7_TIMEOUT]	= 0x48,
307 	[DMA_RING8_TIMEOUT]	= 0x4c,
308 	[DMA_RING9_TIMEOUT]	= 0x50,
309 	[DMA_RING10_TIMEOUT]	= 0x54,
310 	[DMA_RING11_TIMEOUT]	= 0x58,
311 	[DMA_RING12_TIMEOUT]	= 0x5c,
312 	[DMA_RING13_TIMEOUT]	= 0x60,
313 	[DMA_RING14_TIMEOUT]	= 0x64,
314 	[DMA_RING15_TIMEOUT]	= 0x68,
315 	[DMA_RING16_TIMEOUT]	= 0x6C,
316 };
317 
318 /* Set at runtime once bcmgenet version is known */
319 static const u8 *bcmgenet_dma_regs;
320 
321 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
322 {
323 	return netdev_priv(dev_get_drvdata(dev));
324 }
325 
326 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
327 				      enum dma_reg r)
328 {
329 	return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
330 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
331 }
332 
333 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
334 					u32 val, enum dma_reg r)
335 {
336 	__raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
337 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
338 }
339 
340 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
341 				      enum dma_reg r)
342 {
343 	return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
344 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
345 }
346 
347 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
348 					u32 val, enum dma_reg r)
349 {
350 	__raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
351 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
352 }
353 
354 /* RDMA/TDMA ring registers and accessors
355  * we merge the common fields and just prefix with T/D the registers
356  * having different meaning depending on the direction
357  */
358 enum dma_ring_reg {
359 	TDMA_READ_PTR = 0,
360 	RDMA_WRITE_PTR = TDMA_READ_PTR,
361 	TDMA_READ_PTR_HI,
362 	RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
363 	TDMA_CONS_INDEX,
364 	RDMA_PROD_INDEX = TDMA_CONS_INDEX,
365 	TDMA_PROD_INDEX,
366 	RDMA_CONS_INDEX = TDMA_PROD_INDEX,
367 	DMA_RING_BUF_SIZE,
368 	DMA_START_ADDR,
369 	DMA_START_ADDR_HI,
370 	DMA_END_ADDR,
371 	DMA_END_ADDR_HI,
372 	DMA_MBUF_DONE_THRESH,
373 	TDMA_FLOW_PERIOD,
374 	RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
375 	TDMA_WRITE_PTR,
376 	RDMA_READ_PTR = TDMA_WRITE_PTR,
377 	TDMA_WRITE_PTR_HI,
378 	RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
379 };
380 
381 /* GENET v4 supports 40-bits pointer addressing
382  * for obvious reasons the LO and HI word parts
383  * are contiguous, but this offsets the other
384  * registers.
385  */
386 static const u8 genet_dma_ring_regs_v4[] = {
387 	[TDMA_READ_PTR]			= 0x00,
388 	[TDMA_READ_PTR_HI]		= 0x04,
389 	[TDMA_CONS_INDEX]		= 0x08,
390 	[TDMA_PROD_INDEX]		= 0x0C,
391 	[DMA_RING_BUF_SIZE]		= 0x10,
392 	[DMA_START_ADDR]		= 0x14,
393 	[DMA_START_ADDR_HI]		= 0x18,
394 	[DMA_END_ADDR]			= 0x1C,
395 	[DMA_END_ADDR_HI]		= 0x20,
396 	[DMA_MBUF_DONE_THRESH]		= 0x24,
397 	[TDMA_FLOW_PERIOD]		= 0x28,
398 	[TDMA_WRITE_PTR]		= 0x2C,
399 	[TDMA_WRITE_PTR_HI]		= 0x30,
400 };
401 
402 static const u8 genet_dma_ring_regs_v123[] = {
403 	[TDMA_READ_PTR]			= 0x00,
404 	[TDMA_CONS_INDEX]		= 0x04,
405 	[TDMA_PROD_INDEX]		= 0x08,
406 	[DMA_RING_BUF_SIZE]		= 0x0C,
407 	[DMA_START_ADDR]		= 0x10,
408 	[DMA_END_ADDR]			= 0x14,
409 	[DMA_MBUF_DONE_THRESH]		= 0x18,
410 	[TDMA_FLOW_PERIOD]		= 0x1C,
411 	[TDMA_WRITE_PTR]		= 0x20,
412 };
413 
414 /* Set at runtime once GENET version is known */
415 static const u8 *genet_dma_ring_regs;
416 
417 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
418 					   unsigned int ring,
419 					   enum dma_ring_reg r)
420 {
421 	return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
422 			(DMA_RING_SIZE * ring) +
423 			genet_dma_ring_regs[r]);
424 }
425 
426 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
427 					     unsigned int ring, u32 val,
428 					     enum dma_ring_reg r)
429 {
430 	__raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
431 			(DMA_RING_SIZE * ring) +
432 			genet_dma_ring_regs[r]);
433 }
434 
435 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
436 					   unsigned int ring,
437 					   enum dma_ring_reg r)
438 {
439 	return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
440 			(DMA_RING_SIZE * ring) +
441 			genet_dma_ring_regs[r]);
442 }
443 
444 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
445 					     unsigned int ring, u32 val,
446 					     enum dma_ring_reg r)
447 {
448 	__raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
449 			(DMA_RING_SIZE * ring) +
450 			genet_dma_ring_regs[r]);
451 }
452 
453 static int bcmgenet_get_settings(struct net_device *dev,
454 				 struct ethtool_cmd *cmd)
455 {
456 	struct bcmgenet_priv *priv = netdev_priv(dev);
457 
458 	if (!netif_running(dev))
459 		return -EINVAL;
460 
461 	if (!priv->phydev)
462 		return -ENODEV;
463 
464 	return phy_ethtool_gset(priv->phydev, cmd);
465 }
466 
467 static int bcmgenet_set_settings(struct net_device *dev,
468 				 struct ethtool_cmd *cmd)
469 {
470 	struct bcmgenet_priv *priv = netdev_priv(dev);
471 
472 	if (!netif_running(dev))
473 		return -EINVAL;
474 
475 	if (!priv->phydev)
476 		return -ENODEV;
477 
478 	return phy_ethtool_sset(priv->phydev, cmd);
479 }
480 
481 static int bcmgenet_set_rx_csum(struct net_device *dev,
482 				netdev_features_t wanted)
483 {
484 	struct bcmgenet_priv *priv = netdev_priv(dev);
485 	u32 rbuf_chk_ctrl;
486 	bool rx_csum_en;
487 
488 	rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
489 
490 	rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
491 
492 	/* enable rx checksumming */
493 	if (rx_csum_en)
494 		rbuf_chk_ctrl |= RBUF_RXCHK_EN;
495 	else
496 		rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
497 	priv->desc_rxchk_en = rx_csum_en;
498 
499 	/* If UniMAC forwards CRC, we need to skip over it to get
500 	 * a valid CHK bit to be set in the per-packet status word
501 	*/
502 	if (rx_csum_en && priv->crc_fwd_en)
503 		rbuf_chk_ctrl |= RBUF_SKIP_FCS;
504 	else
505 		rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
506 
507 	bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
508 
509 	return 0;
510 }
511 
512 static int bcmgenet_set_tx_csum(struct net_device *dev,
513 				netdev_features_t wanted)
514 {
515 	struct bcmgenet_priv *priv = netdev_priv(dev);
516 	bool desc_64b_en;
517 	u32 tbuf_ctrl, rbuf_ctrl;
518 
519 	tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
520 	rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
521 
522 	desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
523 
524 	/* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
525 	if (desc_64b_en) {
526 		tbuf_ctrl |= RBUF_64B_EN;
527 		rbuf_ctrl |= RBUF_64B_EN;
528 	} else {
529 		tbuf_ctrl &= ~RBUF_64B_EN;
530 		rbuf_ctrl &= ~RBUF_64B_EN;
531 	}
532 	priv->desc_64b_en = desc_64b_en;
533 
534 	bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
535 	bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
536 
537 	return 0;
538 }
539 
540 static int bcmgenet_set_features(struct net_device *dev,
541 				 netdev_features_t features)
542 {
543 	netdev_features_t changed = features ^ dev->features;
544 	netdev_features_t wanted = dev->wanted_features;
545 	int ret = 0;
546 
547 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
548 		ret = bcmgenet_set_tx_csum(dev, wanted);
549 	if (changed & (NETIF_F_RXCSUM))
550 		ret = bcmgenet_set_rx_csum(dev, wanted);
551 
552 	return ret;
553 }
554 
555 static u32 bcmgenet_get_msglevel(struct net_device *dev)
556 {
557 	struct bcmgenet_priv *priv = netdev_priv(dev);
558 
559 	return priv->msg_enable;
560 }
561 
562 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
563 {
564 	struct bcmgenet_priv *priv = netdev_priv(dev);
565 
566 	priv->msg_enable = level;
567 }
568 
569 static int bcmgenet_get_coalesce(struct net_device *dev,
570 				 struct ethtool_coalesce *ec)
571 {
572 	struct bcmgenet_priv *priv = netdev_priv(dev);
573 
574 	ec->tx_max_coalesced_frames =
575 		bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
576 					 DMA_MBUF_DONE_THRESH);
577 	ec->rx_max_coalesced_frames =
578 		bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
579 					 DMA_MBUF_DONE_THRESH);
580 	ec->rx_coalesce_usecs =
581 		bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
582 
583 	return 0;
584 }
585 
586 static int bcmgenet_set_coalesce(struct net_device *dev,
587 				 struct ethtool_coalesce *ec)
588 {
589 	struct bcmgenet_priv *priv = netdev_priv(dev);
590 	unsigned int i;
591 	u32 reg;
592 
593 	/* Base system clock is 125Mhz, DMA timeout is this reference clock
594 	 * divided by 1024, which yields roughly 8.192us, our maximum value
595 	 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
596 	 */
597 	if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
598 	    ec->tx_max_coalesced_frames == 0 ||
599 	    ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
600 	    ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
601 		return -EINVAL;
602 
603 	if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
604 		return -EINVAL;
605 
606 	/* GENET TDMA hardware does not support a configurable timeout, but will
607 	 * always generate an interrupt either after MBDONE packets have been
608 	 * transmitted, or when the ring is emtpy.
609 	 */
610 	if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
611 	    ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
612 		return -EOPNOTSUPP;
613 
614 	/* Program all TX queues with the same values, as there is no
615 	 * ethtool knob to do coalescing on a per-queue basis
616 	 */
617 	for (i = 0; i < priv->hw_params->tx_queues; i++)
618 		bcmgenet_tdma_ring_writel(priv, i,
619 					  ec->tx_max_coalesced_frames,
620 					  DMA_MBUF_DONE_THRESH);
621 	bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
622 				  ec->tx_max_coalesced_frames,
623 				  DMA_MBUF_DONE_THRESH);
624 
625 	for (i = 0; i < priv->hw_params->rx_queues; i++) {
626 		bcmgenet_rdma_ring_writel(priv, i,
627 					  ec->rx_max_coalesced_frames,
628 					  DMA_MBUF_DONE_THRESH);
629 
630 		reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
631 		reg &= ~DMA_TIMEOUT_MASK;
632 		reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
633 		bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
634 	}
635 
636 	bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
637 				  ec->rx_max_coalesced_frames,
638 				  DMA_MBUF_DONE_THRESH);
639 
640 	reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
641 	reg &= ~DMA_TIMEOUT_MASK;
642 	reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
643 	bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
644 
645 	return 0;
646 }
647 
648 /* standard ethtool support functions. */
649 enum bcmgenet_stat_type {
650 	BCMGENET_STAT_NETDEV = -1,
651 	BCMGENET_STAT_MIB_RX,
652 	BCMGENET_STAT_MIB_TX,
653 	BCMGENET_STAT_RUNT,
654 	BCMGENET_STAT_MISC,
655 	BCMGENET_STAT_SOFT,
656 };
657 
658 struct bcmgenet_stats {
659 	char stat_string[ETH_GSTRING_LEN];
660 	int stat_sizeof;
661 	int stat_offset;
662 	enum bcmgenet_stat_type type;
663 	/* reg offset from UMAC base for misc counters */
664 	u16 reg_offset;
665 };
666 
667 #define STAT_NETDEV(m) { \
668 	.stat_string = __stringify(m), \
669 	.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
670 	.stat_offset = offsetof(struct net_device_stats, m), \
671 	.type = BCMGENET_STAT_NETDEV, \
672 }
673 
674 #define STAT_GENET_MIB(str, m, _type) { \
675 	.stat_string = str, \
676 	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
677 	.stat_offset = offsetof(struct bcmgenet_priv, m), \
678 	.type = _type, \
679 }
680 
681 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
682 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
683 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
684 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
685 
686 #define STAT_GENET_MISC(str, m, offset) { \
687 	.stat_string = str, \
688 	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
689 	.stat_offset = offsetof(struct bcmgenet_priv, m), \
690 	.type = BCMGENET_STAT_MISC, \
691 	.reg_offset = offset, \
692 }
693 
694 
695 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
696  * between the end of TX stats and the beginning of the RX RUNT
697  */
698 #define BCMGENET_STAT_OFFSET	0xc
699 
700 /* Hardware counters must be kept in sync because the order/offset
701  * is important here (order in structure declaration = order in hardware)
702  */
703 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
704 	/* general stats */
705 	STAT_NETDEV(rx_packets),
706 	STAT_NETDEV(tx_packets),
707 	STAT_NETDEV(rx_bytes),
708 	STAT_NETDEV(tx_bytes),
709 	STAT_NETDEV(rx_errors),
710 	STAT_NETDEV(tx_errors),
711 	STAT_NETDEV(rx_dropped),
712 	STAT_NETDEV(tx_dropped),
713 	STAT_NETDEV(multicast),
714 	/* UniMAC RSV counters */
715 	STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
716 	STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
717 	STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
718 	STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
719 	STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
720 	STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
721 	STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
722 	STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
723 	STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
724 	STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
725 	STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
726 	STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
727 	STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
728 	STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
729 	STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
730 	STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
731 	STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
732 	STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
733 	STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
734 	STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
735 	STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
736 	STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
737 	STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
738 	STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
739 	STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
740 	STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
741 	STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
742 	STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
743 	STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
744 	/* UniMAC TSV counters */
745 	STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
746 	STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
747 	STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
748 	STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
749 	STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
750 	STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
751 	STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
752 	STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
753 	STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
754 	STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
755 	STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
756 	STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
757 	STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
758 	STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
759 	STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
760 	STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
761 	STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
762 	STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
763 	STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
764 	STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
765 	STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
766 	STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
767 	STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
768 	STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
769 	STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
770 	STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
771 	STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
772 	STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
773 	STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
774 	/* UniMAC RUNT counters */
775 	STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
776 	STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
777 	STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
778 	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
779 	/* Misc UniMAC counters */
780 	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
781 			UMAC_RBUF_OVFL_CNT),
782 	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
783 	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
784 	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
785 	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
786 	STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
787 };
788 
789 #define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
790 
791 static void bcmgenet_get_drvinfo(struct net_device *dev,
792 				 struct ethtool_drvinfo *info)
793 {
794 	strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
795 	strlcpy(info->version, "v2.0", sizeof(info->version));
796 }
797 
798 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
799 {
800 	switch (string_set) {
801 	case ETH_SS_STATS:
802 		return BCMGENET_STATS_LEN;
803 	default:
804 		return -EOPNOTSUPP;
805 	}
806 }
807 
808 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
809 				 u8 *data)
810 {
811 	int i;
812 
813 	switch (stringset) {
814 	case ETH_SS_STATS:
815 		for (i = 0; i < BCMGENET_STATS_LEN; i++) {
816 			memcpy(data + i * ETH_GSTRING_LEN,
817 			       bcmgenet_gstrings_stats[i].stat_string,
818 			       ETH_GSTRING_LEN);
819 		}
820 		break;
821 	}
822 }
823 
824 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
825 {
826 	int i, j = 0;
827 
828 	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
829 		const struct bcmgenet_stats *s;
830 		u8 offset = 0;
831 		u32 val = 0;
832 		char *p;
833 
834 		s = &bcmgenet_gstrings_stats[i];
835 		switch (s->type) {
836 		case BCMGENET_STAT_NETDEV:
837 		case BCMGENET_STAT_SOFT:
838 			continue;
839 		case BCMGENET_STAT_MIB_RX:
840 		case BCMGENET_STAT_MIB_TX:
841 		case BCMGENET_STAT_RUNT:
842 			if (s->type != BCMGENET_STAT_MIB_RX)
843 				offset = BCMGENET_STAT_OFFSET;
844 			val = bcmgenet_umac_readl(priv,
845 						  UMAC_MIB_START + j + offset);
846 			break;
847 		case BCMGENET_STAT_MISC:
848 			val = bcmgenet_umac_readl(priv, s->reg_offset);
849 			/* clear if overflowed */
850 			if (val == ~0)
851 				bcmgenet_umac_writel(priv, 0, s->reg_offset);
852 			break;
853 		}
854 
855 		j += s->stat_sizeof;
856 		p = (char *)priv + s->stat_offset;
857 		*(u32 *)p = val;
858 	}
859 }
860 
861 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
862 				       struct ethtool_stats *stats,
863 				       u64 *data)
864 {
865 	struct bcmgenet_priv *priv = netdev_priv(dev);
866 	int i;
867 
868 	if (netif_running(dev))
869 		bcmgenet_update_mib_counters(priv);
870 
871 	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
872 		const struct bcmgenet_stats *s;
873 		char *p;
874 
875 		s = &bcmgenet_gstrings_stats[i];
876 		if (s->type == BCMGENET_STAT_NETDEV)
877 			p = (char *)&dev->stats;
878 		else
879 			p = (char *)priv;
880 		p += s->stat_offset;
881 		data[i] = *(u32 *)p;
882 	}
883 }
884 
885 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
886 {
887 	struct bcmgenet_priv *priv = netdev_priv(dev);
888 	u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
889 	u32 reg;
890 
891 	if (enable && !priv->clk_eee_enabled) {
892 		clk_prepare_enable(priv->clk_eee);
893 		priv->clk_eee_enabled = true;
894 	}
895 
896 	reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
897 	if (enable)
898 		reg |= EEE_EN;
899 	else
900 		reg &= ~EEE_EN;
901 	bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
902 
903 	/* Enable EEE and switch to a 27Mhz clock automatically */
904 	reg = __raw_readl(priv->base + off);
905 	if (enable)
906 		reg |= TBUF_EEE_EN | TBUF_PM_EN;
907 	else
908 		reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
909 	__raw_writel(reg, priv->base + off);
910 
911 	/* Do the same for thing for RBUF */
912 	reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
913 	if (enable)
914 		reg |= RBUF_EEE_EN | RBUF_PM_EN;
915 	else
916 		reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
917 	bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
918 
919 	if (!enable && priv->clk_eee_enabled) {
920 		clk_disable_unprepare(priv->clk_eee);
921 		priv->clk_eee_enabled = false;
922 	}
923 
924 	priv->eee.eee_enabled = enable;
925 	priv->eee.eee_active = enable;
926 }
927 
928 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
929 {
930 	struct bcmgenet_priv *priv = netdev_priv(dev);
931 	struct ethtool_eee *p = &priv->eee;
932 
933 	if (GENET_IS_V1(priv))
934 		return -EOPNOTSUPP;
935 
936 	e->eee_enabled = p->eee_enabled;
937 	e->eee_active = p->eee_active;
938 	e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
939 
940 	return phy_ethtool_get_eee(priv->phydev, e);
941 }
942 
943 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
944 {
945 	struct bcmgenet_priv *priv = netdev_priv(dev);
946 	struct ethtool_eee *p = &priv->eee;
947 	int ret = 0;
948 
949 	if (GENET_IS_V1(priv))
950 		return -EOPNOTSUPP;
951 
952 	p->eee_enabled = e->eee_enabled;
953 
954 	if (!p->eee_enabled) {
955 		bcmgenet_eee_enable_set(dev, false);
956 	} else {
957 		ret = phy_init_eee(priv->phydev, 0);
958 		if (ret) {
959 			netif_err(priv, hw, dev, "EEE initialization failed\n");
960 			return ret;
961 		}
962 
963 		bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
964 		bcmgenet_eee_enable_set(dev, true);
965 	}
966 
967 	return phy_ethtool_set_eee(priv->phydev, e);
968 }
969 
970 static int bcmgenet_nway_reset(struct net_device *dev)
971 {
972 	struct bcmgenet_priv *priv = netdev_priv(dev);
973 
974 	return genphy_restart_aneg(priv->phydev);
975 }
976 
977 /* standard ethtool support functions. */
978 static struct ethtool_ops bcmgenet_ethtool_ops = {
979 	.get_strings		= bcmgenet_get_strings,
980 	.get_sset_count		= bcmgenet_get_sset_count,
981 	.get_ethtool_stats	= bcmgenet_get_ethtool_stats,
982 	.get_settings		= bcmgenet_get_settings,
983 	.set_settings		= bcmgenet_set_settings,
984 	.get_drvinfo		= bcmgenet_get_drvinfo,
985 	.get_link		= ethtool_op_get_link,
986 	.get_msglevel		= bcmgenet_get_msglevel,
987 	.set_msglevel		= bcmgenet_set_msglevel,
988 	.get_wol		= bcmgenet_get_wol,
989 	.set_wol		= bcmgenet_set_wol,
990 	.get_eee		= bcmgenet_get_eee,
991 	.set_eee		= bcmgenet_set_eee,
992 	.nway_reset		= bcmgenet_nway_reset,
993 	.get_coalesce		= bcmgenet_get_coalesce,
994 	.set_coalesce		= bcmgenet_set_coalesce,
995 };
996 
997 /* Power down the unimac, based on mode. */
998 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
999 				enum bcmgenet_power_mode mode)
1000 {
1001 	int ret = 0;
1002 	u32 reg;
1003 
1004 	switch (mode) {
1005 	case GENET_POWER_CABLE_SENSE:
1006 		phy_detach(priv->phydev);
1007 		break;
1008 
1009 	case GENET_POWER_WOL_MAGIC:
1010 		ret = bcmgenet_wol_power_down_cfg(priv, mode);
1011 		break;
1012 
1013 	case GENET_POWER_PASSIVE:
1014 		/* Power down LED */
1015 		if (priv->hw_params->flags & GENET_HAS_EXT) {
1016 			reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1017 			reg |= (EXT_PWR_DOWN_PHY |
1018 				EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1019 			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1020 
1021 			bcmgenet_phy_power_set(priv->dev, false);
1022 		}
1023 		break;
1024 	default:
1025 		break;
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1032 			      enum bcmgenet_power_mode mode)
1033 {
1034 	u32 reg;
1035 
1036 	if (!(priv->hw_params->flags & GENET_HAS_EXT))
1037 		return;
1038 
1039 	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1040 
1041 	switch (mode) {
1042 	case GENET_POWER_PASSIVE:
1043 		reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
1044 				EXT_PWR_DOWN_BIAS);
1045 		/* fallthrough */
1046 	case GENET_POWER_CABLE_SENSE:
1047 		/* enable APD */
1048 		reg |= EXT_PWR_DN_EN_LD;
1049 		break;
1050 	case GENET_POWER_WOL_MAGIC:
1051 		bcmgenet_wol_power_up_cfg(priv, mode);
1052 		return;
1053 	default:
1054 		break;
1055 	}
1056 
1057 	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1058 	if (mode == GENET_POWER_PASSIVE) {
1059 		bcmgenet_phy_power_set(priv->dev, true);
1060 		bcmgenet_mii_reset(priv->dev);
1061 	}
1062 }
1063 
1064 /* ioctl handle special commands that are not present in ethtool. */
1065 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1066 {
1067 	struct bcmgenet_priv *priv = netdev_priv(dev);
1068 	int val = 0;
1069 
1070 	if (!netif_running(dev))
1071 		return -EINVAL;
1072 
1073 	switch (cmd) {
1074 	case SIOCGMIIPHY:
1075 	case SIOCGMIIREG:
1076 	case SIOCSMIIREG:
1077 		if (!priv->phydev)
1078 			val = -ENODEV;
1079 		else
1080 			val = phy_mii_ioctl(priv->phydev, rq, cmd);
1081 		break;
1082 
1083 	default:
1084 		val = -EINVAL;
1085 		break;
1086 	}
1087 
1088 	return val;
1089 }
1090 
1091 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1092 					 struct bcmgenet_tx_ring *ring)
1093 {
1094 	struct enet_cb *tx_cb_ptr;
1095 
1096 	tx_cb_ptr = ring->cbs;
1097 	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1098 
1099 	/* Advancing local write pointer */
1100 	if (ring->write_ptr == ring->end_ptr)
1101 		ring->write_ptr = ring->cb_ptr;
1102 	else
1103 		ring->write_ptr++;
1104 
1105 	return tx_cb_ptr;
1106 }
1107 
1108 /* Simple helper to free a control block's resources */
1109 static void bcmgenet_free_cb(struct enet_cb *cb)
1110 {
1111 	dev_kfree_skb_any(cb->skb);
1112 	cb->skb = NULL;
1113 	dma_unmap_addr_set(cb, dma_addr, 0);
1114 }
1115 
1116 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1117 {
1118 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1119 				 INTRL2_CPU_MASK_SET);
1120 }
1121 
1122 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1123 {
1124 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1125 				 INTRL2_CPU_MASK_CLEAR);
1126 }
1127 
1128 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1129 {
1130 	bcmgenet_intrl2_1_writel(ring->priv,
1131 				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1132 				 INTRL2_CPU_MASK_SET);
1133 }
1134 
1135 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1136 {
1137 	bcmgenet_intrl2_1_writel(ring->priv,
1138 				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1139 				 INTRL2_CPU_MASK_CLEAR);
1140 }
1141 
1142 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1143 {
1144 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1145 				 INTRL2_CPU_MASK_SET);
1146 }
1147 
1148 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1149 {
1150 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1151 				 INTRL2_CPU_MASK_CLEAR);
1152 }
1153 
1154 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1155 {
1156 	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1157 				 INTRL2_CPU_MASK_CLEAR);
1158 }
1159 
1160 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1161 {
1162 	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1163 				 INTRL2_CPU_MASK_SET);
1164 }
1165 
1166 /* Unlocked version of the reclaim routine */
1167 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1168 					  struct bcmgenet_tx_ring *ring)
1169 {
1170 	struct bcmgenet_priv *priv = netdev_priv(dev);
1171 	struct enet_cb *tx_cb_ptr;
1172 	struct netdev_queue *txq;
1173 	unsigned int pkts_compl = 0;
1174 	unsigned int bytes_compl = 0;
1175 	unsigned int c_index;
1176 	unsigned int txbds_ready;
1177 	unsigned int txbds_processed = 0;
1178 
1179 	/* Compute how many buffers are transmitted since last xmit call */
1180 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
1181 	c_index &= DMA_C_INDEX_MASK;
1182 
1183 	if (likely(c_index >= ring->c_index))
1184 		txbds_ready = c_index - ring->c_index;
1185 	else
1186 		txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
1187 
1188 	netif_dbg(priv, tx_done, dev,
1189 		  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1190 		  __func__, ring->index, ring->c_index, c_index, txbds_ready);
1191 
1192 	/* Reclaim transmitted buffers */
1193 	while (txbds_processed < txbds_ready) {
1194 		tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1195 		if (tx_cb_ptr->skb) {
1196 			pkts_compl++;
1197 			bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
1198 			dma_unmap_single(&dev->dev,
1199 					 dma_unmap_addr(tx_cb_ptr, dma_addr),
1200 					 dma_unmap_len(tx_cb_ptr, dma_len),
1201 					 DMA_TO_DEVICE);
1202 			bcmgenet_free_cb(tx_cb_ptr);
1203 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1204 			dma_unmap_page(&dev->dev,
1205 				       dma_unmap_addr(tx_cb_ptr, dma_addr),
1206 				       dma_unmap_len(tx_cb_ptr, dma_len),
1207 				       DMA_TO_DEVICE);
1208 			dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1209 		}
1210 
1211 		txbds_processed++;
1212 		if (likely(ring->clean_ptr < ring->end_ptr))
1213 			ring->clean_ptr++;
1214 		else
1215 			ring->clean_ptr = ring->cb_ptr;
1216 	}
1217 
1218 	ring->free_bds += txbds_processed;
1219 	ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1220 
1221 	dev->stats.tx_packets += pkts_compl;
1222 	dev->stats.tx_bytes += bytes_compl;
1223 
1224 	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1225 		txq = netdev_get_tx_queue(dev, ring->queue);
1226 		if (netif_tx_queue_stopped(txq))
1227 			netif_tx_wake_queue(txq);
1228 	}
1229 
1230 	return pkts_compl;
1231 }
1232 
1233 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1234 				struct bcmgenet_tx_ring *ring)
1235 {
1236 	unsigned int released;
1237 	unsigned long flags;
1238 
1239 	spin_lock_irqsave(&ring->lock, flags);
1240 	released = __bcmgenet_tx_reclaim(dev, ring);
1241 	spin_unlock_irqrestore(&ring->lock, flags);
1242 
1243 	return released;
1244 }
1245 
1246 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1247 {
1248 	struct bcmgenet_tx_ring *ring =
1249 		container_of(napi, struct bcmgenet_tx_ring, napi);
1250 	unsigned int work_done = 0;
1251 
1252 	work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1253 
1254 	if (work_done == 0) {
1255 		napi_complete(napi);
1256 		ring->int_enable(ring);
1257 
1258 		return 0;
1259 	}
1260 
1261 	return budget;
1262 }
1263 
1264 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1265 {
1266 	struct bcmgenet_priv *priv = netdev_priv(dev);
1267 	int i;
1268 
1269 	if (netif_is_multiqueue(dev)) {
1270 		for (i = 0; i < priv->hw_params->tx_queues; i++)
1271 			bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1272 	}
1273 
1274 	bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1275 }
1276 
1277 /* Transmits a single SKB (either head of a fragment or a single SKB)
1278  * caller must hold priv->lock
1279  */
1280 static int bcmgenet_xmit_single(struct net_device *dev,
1281 				struct sk_buff *skb,
1282 				u16 dma_desc_flags,
1283 				struct bcmgenet_tx_ring *ring)
1284 {
1285 	struct bcmgenet_priv *priv = netdev_priv(dev);
1286 	struct device *kdev = &priv->pdev->dev;
1287 	struct enet_cb *tx_cb_ptr;
1288 	unsigned int skb_len;
1289 	dma_addr_t mapping;
1290 	u32 length_status;
1291 	int ret;
1292 
1293 	tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1294 
1295 	if (unlikely(!tx_cb_ptr))
1296 		BUG();
1297 
1298 	tx_cb_ptr->skb = skb;
1299 
1300 	skb_len = skb_headlen(skb);
1301 
1302 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1303 	ret = dma_mapping_error(kdev, mapping);
1304 	if (ret) {
1305 		priv->mib.tx_dma_failed++;
1306 		netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1307 		dev_kfree_skb(skb);
1308 		return ret;
1309 	}
1310 
1311 	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1312 	dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
1313 	length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1314 			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1315 			DMA_TX_APPEND_CRC;
1316 
1317 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1318 		length_status |= DMA_TX_DO_CSUM;
1319 
1320 	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1321 
1322 	return 0;
1323 }
1324 
1325 /* Transmit a SKB fragment */
1326 static int bcmgenet_xmit_frag(struct net_device *dev,
1327 			      skb_frag_t *frag,
1328 			      u16 dma_desc_flags,
1329 			      struct bcmgenet_tx_ring *ring)
1330 {
1331 	struct bcmgenet_priv *priv = netdev_priv(dev);
1332 	struct device *kdev = &priv->pdev->dev;
1333 	struct enet_cb *tx_cb_ptr;
1334 	dma_addr_t mapping;
1335 	int ret;
1336 
1337 	tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1338 
1339 	if (unlikely(!tx_cb_ptr))
1340 		BUG();
1341 	tx_cb_ptr->skb = NULL;
1342 
1343 	mapping = skb_frag_dma_map(kdev, frag, 0,
1344 				   skb_frag_size(frag), DMA_TO_DEVICE);
1345 	ret = dma_mapping_error(kdev, mapping);
1346 	if (ret) {
1347 		priv->mib.tx_dma_failed++;
1348 		netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1349 			  __func__);
1350 		return ret;
1351 	}
1352 
1353 	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1354 	dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1355 
1356 	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1357 		    (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1358 		    (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1359 
1360 	return 0;
1361 }
1362 
1363 /* Reallocate the SKB to put enough headroom in front of it and insert
1364  * the transmit checksum offsets in the descriptors
1365  */
1366 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1367 					    struct sk_buff *skb)
1368 {
1369 	struct status_64 *status = NULL;
1370 	struct sk_buff *new_skb;
1371 	u16 offset;
1372 	u8 ip_proto;
1373 	u16 ip_ver;
1374 	u32 tx_csum_info;
1375 
1376 	if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1377 		/* If 64 byte status block enabled, must make sure skb has
1378 		 * enough headroom for us to insert 64B status block.
1379 		 */
1380 		new_skb = skb_realloc_headroom(skb, sizeof(*status));
1381 		dev_kfree_skb(skb);
1382 		if (!new_skb) {
1383 			dev->stats.tx_dropped++;
1384 			return NULL;
1385 		}
1386 		skb = new_skb;
1387 	}
1388 
1389 	skb_push(skb, sizeof(*status));
1390 	status = (struct status_64 *)skb->data;
1391 
1392 	if (skb->ip_summed  == CHECKSUM_PARTIAL) {
1393 		ip_ver = htons(skb->protocol);
1394 		switch (ip_ver) {
1395 		case ETH_P_IP:
1396 			ip_proto = ip_hdr(skb)->protocol;
1397 			break;
1398 		case ETH_P_IPV6:
1399 			ip_proto = ipv6_hdr(skb)->nexthdr;
1400 			break;
1401 		default:
1402 			return skb;
1403 		}
1404 
1405 		offset = skb_checksum_start_offset(skb) - sizeof(*status);
1406 		tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1407 				(offset + skb->csum_offset);
1408 
1409 		/* Set the length valid bit for TCP and UDP and just set
1410 		 * the special UDP flag for IPv4, else just set to 0.
1411 		 */
1412 		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1413 			tx_csum_info |= STATUS_TX_CSUM_LV;
1414 			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1415 				tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1416 		} else {
1417 			tx_csum_info = 0;
1418 		}
1419 
1420 		status->tx_csum_info = tx_csum_info;
1421 	}
1422 
1423 	return skb;
1424 }
1425 
1426 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1427 {
1428 	struct bcmgenet_priv *priv = netdev_priv(dev);
1429 	struct bcmgenet_tx_ring *ring = NULL;
1430 	struct netdev_queue *txq;
1431 	unsigned long flags = 0;
1432 	int nr_frags, index;
1433 	u16 dma_desc_flags;
1434 	int ret;
1435 	int i;
1436 
1437 	index = skb_get_queue_mapping(skb);
1438 	/* Mapping strategy:
1439 	 * queue_mapping = 0, unclassified, packet xmited through ring16
1440 	 * queue_mapping = 1, goes to ring 0. (highest priority queue
1441 	 * queue_mapping = 2, goes to ring 1.
1442 	 * queue_mapping = 3, goes to ring 2.
1443 	 * queue_mapping = 4, goes to ring 3.
1444 	 */
1445 	if (index == 0)
1446 		index = DESC_INDEX;
1447 	else
1448 		index -= 1;
1449 
1450 	nr_frags = skb_shinfo(skb)->nr_frags;
1451 	ring = &priv->tx_rings[index];
1452 	txq = netdev_get_tx_queue(dev, ring->queue);
1453 
1454 	spin_lock_irqsave(&ring->lock, flags);
1455 	if (ring->free_bds <= nr_frags + 1) {
1456 		netif_tx_stop_queue(txq);
1457 		netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1458 			   __func__, index, ring->queue);
1459 		ret = NETDEV_TX_BUSY;
1460 		goto out;
1461 	}
1462 
1463 	if (skb_padto(skb, ETH_ZLEN)) {
1464 		ret = NETDEV_TX_OK;
1465 		goto out;
1466 	}
1467 
1468 	/* Retain how many bytes will be sent on the wire, without TSB inserted
1469 	 * by transmit checksum offload
1470 	 */
1471 	GENET_CB(skb)->bytes_sent = skb->len;
1472 
1473 	/* set the SKB transmit checksum */
1474 	if (priv->desc_64b_en) {
1475 		skb = bcmgenet_put_tx_csum(dev, skb);
1476 		if (!skb) {
1477 			ret = NETDEV_TX_OK;
1478 			goto out;
1479 		}
1480 	}
1481 
1482 	dma_desc_flags = DMA_SOP;
1483 	if (nr_frags == 0)
1484 		dma_desc_flags |= DMA_EOP;
1485 
1486 	/* Transmit single SKB or head of fragment list */
1487 	ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1488 	if (ret) {
1489 		ret = NETDEV_TX_OK;
1490 		goto out;
1491 	}
1492 
1493 	/* xmit fragment */
1494 	for (i = 0; i < nr_frags; i++) {
1495 		ret = bcmgenet_xmit_frag(dev,
1496 					 &skb_shinfo(skb)->frags[i],
1497 					 (i == nr_frags - 1) ? DMA_EOP : 0,
1498 					 ring);
1499 		if (ret) {
1500 			ret = NETDEV_TX_OK;
1501 			goto out;
1502 		}
1503 	}
1504 
1505 	skb_tx_timestamp(skb);
1506 
1507 	/* Decrement total BD count and advance our write pointer */
1508 	ring->free_bds -= nr_frags + 1;
1509 	ring->prod_index += nr_frags + 1;
1510 	ring->prod_index &= DMA_P_INDEX_MASK;
1511 
1512 	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1513 		netif_tx_stop_queue(txq);
1514 
1515 	if (!skb->xmit_more || netif_xmit_stopped(txq))
1516 		/* Packets are ready, update producer index */
1517 		bcmgenet_tdma_ring_writel(priv, ring->index,
1518 					  ring->prod_index, TDMA_PROD_INDEX);
1519 out:
1520 	spin_unlock_irqrestore(&ring->lock, flags);
1521 
1522 	return ret;
1523 }
1524 
1525 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1526 					  struct enet_cb *cb)
1527 {
1528 	struct device *kdev = &priv->pdev->dev;
1529 	struct sk_buff *skb;
1530 	struct sk_buff *rx_skb;
1531 	dma_addr_t mapping;
1532 
1533 	/* Allocate a new Rx skb */
1534 	skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1535 	if (!skb) {
1536 		priv->mib.alloc_rx_buff_failed++;
1537 		netif_err(priv, rx_err, priv->dev,
1538 			  "%s: Rx skb allocation failed\n", __func__);
1539 		return NULL;
1540 	}
1541 
1542 	/* DMA-map the new Rx skb */
1543 	mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1544 				 DMA_FROM_DEVICE);
1545 	if (dma_mapping_error(kdev, mapping)) {
1546 		priv->mib.rx_dma_failed++;
1547 		dev_kfree_skb_any(skb);
1548 		netif_err(priv, rx_err, priv->dev,
1549 			  "%s: Rx skb DMA mapping failed\n", __func__);
1550 		return NULL;
1551 	}
1552 
1553 	/* Grab the current Rx skb from the ring and DMA-unmap it */
1554 	rx_skb = cb->skb;
1555 	if (likely(rx_skb))
1556 		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1557 				 priv->rx_buf_len, DMA_FROM_DEVICE);
1558 
1559 	/* Put the new Rx skb on the ring */
1560 	cb->skb = skb;
1561 	dma_unmap_addr_set(cb, dma_addr, mapping);
1562 	dmadesc_set_addr(priv, cb->bd_addr, mapping);
1563 
1564 	/* Return the current Rx skb to caller */
1565 	return rx_skb;
1566 }
1567 
1568 /* bcmgenet_desc_rx - descriptor based rx process.
1569  * this could be called from bottom half, or from NAPI polling method.
1570  */
1571 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1572 				     unsigned int budget)
1573 {
1574 	struct bcmgenet_priv *priv = ring->priv;
1575 	struct net_device *dev = priv->dev;
1576 	struct enet_cb *cb;
1577 	struct sk_buff *skb;
1578 	u32 dma_length_status;
1579 	unsigned long dma_flag;
1580 	int len;
1581 	unsigned int rxpktprocessed = 0, rxpkttoprocess;
1582 	unsigned int p_index;
1583 	unsigned int discards;
1584 	unsigned int chksum_ok = 0;
1585 
1586 	p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1587 
1588 	discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1589 		   DMA_P_INDEX_DISCARD_CNT_MASK;
1590 	if (discards > ring->old_discards) {
1591 		discards = discards - ring->old_discards;
1592 		dev->stats.rx_missed_errors += discards;
1593 		dev->stats.rx_errors += discards;
1594 		ring->old_discards += discards;
1595 
1596 		/* Clear HW register when we reach 75% of maximum 0xFFFF */
1597 		if (ring->old_discards >= 0xC000) {
1598 			ring->old_discards = 0;
1599 			bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1600 						  RDMA_PROD_INDEX);
1601 		}
1602 	}
1603 
1604 	p_index &= DMA_P_INDEX_MASK;
1605 
1606 	if (likely(p_index >= ring->c_index))
1607 		rxpkttoprocess = p_index - ring->c_index;
1608 	else
1609 		rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1610 				 p_index;
1611 
1612 	netif_dbg(priv, rx_status, dev,
1613 		  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1614 
1615 	while ((rxpktprocessed < rxpkttoprocess) &&
1616 	       (rxpktprocessed < budget)) {
1617 		cb = &priv->rx_cbs[ring->read_ptr];
1618 		skb = bcmgenet_rx_refill(priv, cb);
1619 
1620 		if (unlikely(!skb)) {
1621 			dev->stats.rx_dropped++;
1622 			goto next;
1623 		}
1624 
1625 		if (!priv->desc_64b_en) {
1626 			dma_length_status =
1627 				dmadesc_get_length_status(priv, cb->bd_addr);
1628 		} else {
1629 			struct status_64 *status;
1630 
1631 			status = (struct status_64 *)skb->data;
1632 			dma_length_status = status->length_status;
1633 		}
1634 
1635 		/* DMA flags and length are still valid no matter how
1636 		 * we got the Receive Status Vector (64B RSB or register)
1637 		 */
1638 		dma_flag = dma_length_status & 0xffff;
1639 		len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1640 
1641 		netif_dbg(priv, rx_status, dev,
1642 			  "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1643 			  __func__, p_index, ring->c_index,
1644 			  ring->read_ptr, dma_length_status);
1645 
1646 		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1647 			netif_err(priv, rx_status, dev,
1648 				  "dropping fragmented packet!\n");
1649 			dev->stats.rx_errors++;
1650 			dev_kfree_skb_any(skb);
1651 			goto next;
1652 		}
1653 
1654 		/* report errors */
1655 		if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1656 						DMA_RX_OV |
1657 						DMA_RX_NO |
1658 						DMA_RX_LG |
1659 						DMA_RX_RXER))) {
1660 			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1661 				  (unsigned int)dma_flag);
1662 			if (dma_flag & DMA_RX_CRC_ERROR)
1663 				dev->stats.rx_crc_errors++;
1664 			if (dma_flag & DMA_RX_OV)
1665 				dev->stats.rx_over_errors++;
1666 			if (dma_flag & DMA_RX_NO)
1667 				dev->stats.rx_frame_errors++;
1668 			if (dma_flag & DMA_RX_LG)
1669 				dev->stats.rx_length_errors++;
1670 			dev->stats.rx_errors++;
1671 			dev_kfree_skb_any(skb);
1672 			goto next;
1673 		} /* error packet */
1674 
1675 		chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1676 			     priv->desc_rxchk_en;
1677 
1678 		skb_put(skb, len);
1679 		if (priv->desc_64b_en) {
1680 			skb_pull(skb, 64);
1681 			len -= 64;
1682 		}
1683 
1684 		if (likely(chksum_ok))
1685 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1686 
1687 		/* remove hardware 2bytes added for IP alignment */
1688 		skb_pull(skb, 2);
1689 		len -= 2;
1690 
1691 		if (priv->crc_fwd_en) {
1692 			skb_trim(skb, len - ETH_FCS_LEN);
1693 			len -= ETH_FCS_LEN;
1694 		}
1695 
1696 		/*Finish setting up the received SKB and send it to the kernel*/
1697 		skb->protocol = eth_type_trans(skb, priv->dev);
1698 		dev->stats.rx_packets++;
1699 		dev->stats.rx_bytes += len;
1700 		if (dma_flag & DMA_RX_MULT)
1701 			dev->stats.multicast++;
1702 
1703 		/* Notify kernel */
1704 		napi_gro_receive(&ring->napi, skb);
1705 		netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1706 
1707 next:
1708 		rxpktprocessed++;
1709 		if (likely(ring->read_ptr < ring->end_ptr))
1710 			ring->read_ptr++;
1711 		else
1712 			ring->read_ptr = ring->cb_ptr;
1713 
1714 		ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1715 		bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1716 	}
1717 
1718 	return rxpktprocessed;
1719 }
1720 
1721 /* Rx NAPI polling method */
1722 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1723 {
1724 	struct bcmgenet_rx_ring *ring = container_of(napi,
1725 			struct bcmgenet_rx_ring, napi);
1726 	unsigned int work_done;
1727 
1728 	work_done = bcmgenet_desc_rx(ring, budget);
1729 
1730 	if (work_done < budget) {
1731 		napi_complete(napi);
1732 		ring->int_enable(ring);
1733 	}
1734 
1735 	return work_done;
1736 }
1737 
1738 /* Assign skb to RX DMA descriptor. */
1739 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1740 				     struct bcmgenet_rx_ring *ring)
1741 {
1742 	struct enet_cb *cb;
1743 	struct sk_buff *skb;
1744 	int i;
1745 
1746 	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1747 
1748 	/* loop here for each buffer needing assign */
1749 	for (i = 0; i < ring->size; i++) {
1750 		cb = ring->cbs + i;
1751 		skb = bcmgenet_rx_refill(priv, cb);
1752 		if (skb)
1753 			dev_kfree_skb_any(skb);
1754 		if (!cb->skb)
1755 			return -ENOMEM;
1756 	}
1757 
1758 	return 0;
1759 }
1760 
1761 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1762 {
1763 	struct enet_cb *cb;
1764 	int i;
1765 
1766 	for (i = 0; i < priv->num_rx_bds; i++) {
1767 		cb = &priv->rx_cbs[i];
1768 
1769 		if (dma_unmap_addr(cb, dma_addr)) {
1770 			dma_unmap_single(&priv->dev->dev,
1771 					 dma_unmap_addr(cb, dma_addr),
1772 					 priv->rx_buf_len, DMA_FROM_DEVICE);
1773 			dma_unmap_addr_set(cb, dma_addr, 0);
1774 		}
1775 
1776 		if (cb->skb)
1777 			bcmgenet_free_cb(cb);
1778 	}
1779 }
1780 
1781 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1782 {
1783 	u32 reg;
1784 
1785 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1786 	if (enable)
1787 		reg |= mask;
1788 	else
1789 		reg &= ~mask;
1790 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1791 
1792 	/* UniMAC stops on a packet boundary, wait for a full-size packet
1793 	 * to be processed
1794 	 */
1795 	if (enable == 0)
1796 		usleep_range(1000, 2000);
1797 }
1798 
1799 static int reset_umac(struct bcmgenet_priv *priv)
1800 {
1801 	struct device *kdev = &priv->pdev->dev;
1802 	unsigned int timeout = 0;
1803 	u32 reg;
1804 
1805 	/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1806 	bcmgenet_rbuf_ctrl_set(priv, 0);
1807 	udelay(10);
1808 
1809 	/* disable MAC while updating its registers */
1810 	bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1811 
1812 	/* issue soft reset, wait for it to complete */
1813 	bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1814 	while (timeout++ < 1000) {
1815 		reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1816 		if (!(reg & CMD_SW_RESET))
1817 			return 0;
1818 
1819 		udelay(1);
1820 	}
1821 
1822 	if (timeout == 1000) {
1823 		dev_err(kdev,
1824 			"timeout waiting for MAC to come out of reset\n");
1825 		return -ETIMEDOUT;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1832 {
1833 	/* Mask all interrupts.*/
1834 	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1835 	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1836 	bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1837 	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1838 	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1839 	bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1840 }
1841 
1842 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1843 {
1844 	u32 int0_enable = 0;
1845 
1846 	/* Monitor cable plug/unplugged event for internal PHY, external PHY
1847 	 * and MoCA PHY
1848 	 */
1849 	if (priv->internal_phy) {
1850 		int0_enable |= UMAC_IRQ_LINK_EVENT;
1851 	} else if (priv->ext_phy) {
1852 		int0_enable |= UMAC_IRQ_LINK_EVENT;
1853 	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1854 		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1855 			int0_enable |= UMAC_IRQ_LINK_EVENT;
1856 	}
1857 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1858 }
1859 
1860 static int init_umac(struct bcmgenet_priv *priv)
1861 {
1862 	struct device *kdev = &priv->pdev->dev;
1863 	int ret;
1864 	u32 reg;
1865 	u32 int0_enable = 0;
1866 	u32 int1_enable = 0;
1867 	int i;
1868 
1869 	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1870 
1871 	ret = reset_umac(priv);
1872 	if (ret)
1873 		return ret;
1874 
1875 	bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1876 	/* clear tx/rx counter */
1877 	bcmgenet_umac_writel(priv,
1878 			     MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1879 			     UMAC_MIB_CTRL);
1880 	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1881 
1882 	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1883 
1884 	/* init rx registers, enable ip header optimization */
1885 	reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1886 	reg |= RBUF_ALIGN_2B;
1887 	bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1888 
1889 	if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1890 		bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1891 
1892 	bcmgenet_intr_disable(priv);
1893 
1894 	/* Enable Rx default queue 16 interrupts */
1895 	int0_enable |= UMAC_IRQ_RXDMA_DONE;
1896 
1897 	/* Enable Tx default queue 16 interrupts */
1898 	int0_enable |= UMAC_IRQ_TXDMA_DONE;
1899 
1900 	/* Configure backpressure vectors for MoCA */
1901 	if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1902 		reg = bcmgenet_bp_mc_get(priv);
1903 		reg |= BIT(priv->hw_params->bp_in_en_shift);
1904 
1905 		/* bp_mask: back pressure mask */
1906 		if (netif_is_multiqueue(priv->dev))
1907 			reg |= priv->hw_params->bp_in_mask;
1908 		else
1909 			reg &= ~priv->hw_params->bp_in_mask;
1910 		bcmgenet_bp_mc_set(priv, reg);
1911 	}
1912 
1913 	/* Enable MDIO interrupts on GENET v3+ */
1914 	if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1915 		int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1916 
1917 	/* Enable Rx priority queue interrupts */
1918 	for (i = 0; i < priv->hw_params->rx_queues; ++i)
1919 		int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1920 
1921 	/* Enable Tx priority queue interrupts */
1922 	for (i = 0; i < priv->hw_params->tx_queues; ++i)
1923 		int1_enable |= (1 << i);
1924 
1925 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1926 	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
1927 
1928 	/* Enable rx/tx engine.*/
1929 	dev_dbg(kdev, "done init umac\n");
1930 
1931 	return 0;
1932 }
1933 
1934 /* Initialize a Tx ring along with corresponding hardware registers */
1935 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1936 				  unsigned int index, unsigned int size,
1937 				  unsigned int start_ptr, unsigned int end_ptr)
1938 {
1939 	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1940 	u32 words_per_bd = WORDS_PER_BD(priv);
1941 	u32 flow_period_val = 0;
1942 
1943 	spin_lock_init(&ring->lock);
1944 	ring->priv = priv;
1945 	ring->index = index;
1946 	if (index == DESC_INDEX) {
1947 		ring->queue = 0;
1948 		ring->int_enable = bcmgenet_tx_ring16_int_enable;
1949 		ring->int_disable = bcmgenet_tx_ring16_int_disable;
1950 	} else {
1951 		ring->queue = index + 1;
1952 		ring->int_enable = bcmgenet_tx_ring_int_enable;
1953 		ring->int_disable = bcmgenet_tx_ring_int_disable;
1954 	}
1955 	ring->cbs = priv->tx_cbs + start_ptr;
1956 	ring->size = size;
1957 	ring->clean_ptr = start_ptr;
1958 	ring->c_index = 0;
1959 	ring->free_bds = size;
1960 	ring->write_ptr = start_ptr;
1961 	ring->cb_ptr = start_ptr;
1962 	ring->end_ptr = end_ptr - 1;
1963 	ring->prod_index = 0;
1964 
1965 	/* Set flow period for ring != 16 */
1966 	if (index != DESC_INDEX)
1967 		flow_period_val = ENET_MAX_MTU_SIZE << 16;
1968 
1969 	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1970 	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1971 	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1972 	/* Disable rate control for now */
1973 	bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1974 				  TDMA_FLOW_PERIOD);
1975 	bcmgenet_tdma_ring_writel(priv, index,
1976 				  ((size << DMA_RING_SIZE_SHIFT) |
1977 				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1978 
1979 	/* Set start and end address, read and write pointers */
1980 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1981 				  DMA_START_ADDR);
1982 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1983 				  TDMA_READ_PTR);
1984 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1985 				  TDMA_WRITE_PTR);
1986 	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1987 				  DMA_END_ADDR);
1988 }
1989 
1990 /* Initialize a RDMA ring */
1991 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1992 				 unsigned int index, unsigned int size,
1993 				 unsigned int start_ptr, unsigned int end_ptr)
1994 {
1995 	struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
1996 	u32 words_per_bd = WORDS_PER_BD(priv);
1997 	int ret;
1998 
1999 	ring->priv = priv;
2000 	ring->index = index;
2001 	if (index == DESC_INDEX) {
2002 		ring->int_enable = bcmgenet_rx_ring16_int_enable;
2003 		ring->int_disable = bcmgenet_rx_ring16_int_disable;
2004 	} else {
2005 		ring->int_enable = bcmgenet_rx_ring_int_enable;
2006 		ring->int_disable = bcmgenet_rx_ring_int_disable;
2007 	}
2008 	ring->cbs = priv->rx_cbs + start_ptr;
2009 	ring->size = size;
2010 	ring->c_index = 0;
2011 	ring->read_ptr = start_ptr;
2012 	ring->cb_ptr = start_ptr;
2013 	ring->end_ptr = end_ptr - 1;
2014 
2015 	ret = bcmgenet_alloc_rx_buffers(priv, ring);
2016 	if (ret)
2017 		return ret;
2018 
2019 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2020 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2021 	bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2022 	bcmgenet_rdma_ring_writel(priv, index,
2023 				  ((size << DMA_RING_SIZE_SHIFT) |
2024 				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2025 	bcmgenet_rdma_ring_writel(priv, index,
2026 				  (DMA_FC_THRESH_LO <<
2027 				   DMA_XOFF_THRESHOLD_SHIFT) |
2028 				   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2029 
2030 	/* Set start and end address, read and write pointers */
2031 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2032 				  DMA_START_ADDR);
2033 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2034 				  RDMA_READ_PTR);
2035 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2036 				  RDMA_WRITE_PTR);
2037 	bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2038 				  DMA_END_ADDR);
2039 
2040 	return ret;
2041 }
2042 
2043 static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
2044 {
2045 	unsigned int i;
2046 	struct bcmgenet_tx_ring *ring;
2047 
2048 	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2049 		ring = &priv->tx_rings[i];
2050 		netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2051 	}
2052 
2053 	ring = &priv->tx_rings[DESC_INDEX];
2054 	netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
2055 }
2056 
2057 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2058 {
2059 	unsigned int i;
2060 	struct bcmgenet_tx_ring *ring;
2061 
2062 	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2063 		ring = &priv->tx_rings[i];
2064 		napi_enable(&ring->napi);
2065 	}
2066 
2067 	ring = &priv->tx_rings[DESC_INDEX];
2068 	napi_enable(&ring->napi);
2069 }
2070 
2071 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2072 {
2073 	unsigned int i;
2074 	struct bcmgenet_tx_ring *ring;
2075 
2076 	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2077 		ring = &priv->tx_rings[i];
2078 		napi_disable(&ring->napi);
2079 	}
2080 
2081 	ring = &priv->tx_rings[DESC_INDEX];
2082 	napi_disable(&ring->napi);
2083 }
2084 
2085 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2086 {
2087 	unsigned int i;
2088 	struct bcmgenet_tx_ring *ring;
2089 
2090 	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2091 		ring = &priv->tx_rings[i];
2092 		netif_napi_del(&ring->napi);
2093 	}
2094 
2095 	ring = &priv->tx_rings[DESC_INDEX];
2096 	netif_napi_del(&ring->napi);
2097 }
2098 
2099 /* Initialize Tx queues
2100  *
2101  * Queues 0-3 are priority-based, each one has 32 descriptors,
2102  * with queue 0 being the highest priority queue.
2103  *
2104  * Queue 16 is the default Tx queue with
2105  * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2106  *
2107  * The transmit control block pool is then partitioned as follows:
2108  * - Tx queue 0 uses tx_cbs[0..31]
2109  * - Tx queue 1 uses tx_cbs[32..63]
2110  * - Tx queue 2 uses tx_cbs[64..95]
2111  * - Tx queue 3 uses tx_cbs[96..127]
2112  * - Tx queue 16 uses tx_cbs[128..255]
2113  */
2114 static void bcmgenet_init_tx_queues(struct net_device *dev)
2115 {
2116 	struct bcmgenet_priv *priv = netdev_priv(dev);
2117 	u32 i, dma_enable;
2118 	u32 dma_ctrl, ring_cfg;
2119 	u32 dma_priority[3] = {0, 0, 0};
2120 
2121 	dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2122 	dma_enable = dma_ctrl & DMA_EN;
2123 	dma_ctrl &= ~DMA_EN;
2124 	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2125 
2126 	dma_ctrl = 0;
2127 	ring_cfg = 0;
2128 
2129 	/* Enable strict priority arbiter mode */
2130 	bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2131 
2132 	/* Initialize Tx priority queues */
2133 	for (i = 0; i < priv->hw_params->tx_queues; i++) {
2134 		bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2135 				      i * priv->hw_params->tx_bds_per_q,
2136 				      (i + 1) * priv->hw_params->tx_bds_per_q);
2137 		ring_cfg |= (1 << i);
2138 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2139 		dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2140 			((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2141 	}
2142 
2143 	/* Initialize Tx default queue 16 */
2144 	bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2145 			      priv->hw_params->tx_queues *
2146 			      priv->hw_params->tx_bds_per_q,
2147 			      TOTAL_DESC);
2148 	ring_cfg |= (1 << DESC_INDEX);
2149 	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2150 	dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2151 		((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2152 		 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2153 
2154 	/* Set Tx queue priorities */
2155 	bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2156 	bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2157 	bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2158 
2159 	/* Initialize Tx NAPI */
2160 	bcmgenet_init_tx_napi(priv);
2161 
2162 	/* Enable Tx queues */
2163 	bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2164 
2165 	/* Enable Tx DMA */
2166 	if (dma_enable)
2167 		dma_ctrl |= DMA_EN;
2168 	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2169 }
2170 
2171 static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2172 {
2173 	unsigned int i;
2174 	struct bcmgenet_rx_ring *ring;
2175 
2176 	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2177 		ring = &priv->rx_rings[i];
2178 		netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2179 	}
2180 
2181 	ring = &priv->rx_rings[DESC_INDEX];
2182 	netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2183 }
2184 
2185 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2186 {
2187 	unsigned int i;
2188 	struct bcmgenet_rx_ring *ring;
2189 
2190 	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2191 		ring = &priv->rx_rings[i];
2192 		napi_enable(&ring->napi);
2193 	}
2194 
2195 	ring = &priv->rx_rings[DESC_INDEX];
2196 	napi_enable(&ring->napi);
2197 }
2198 
2199 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2200 {
2201 	unsigned int i;
2202 	struct bcmgenet_rx_ring *ring;
2203 
2204 	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2205 		ring = &priv->rx_rings[i];
2206 		napi_disable(&ring->napi);
2207 	}
2208 
2209 	ring = &priv->rx_rings[DESC_INDEX];
2210 	napi_disable(&ring->napi);
2211 }
2212 
2213 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2214 {
2215 	unsigned int i;
2216 	struct bcmgenet_rx_ring *ring;
2217 
2218 	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2219 		ring = &priv->rx_rings[i];
2220 		netif_napi_del(&ring->napi);
2221 	}
2222 
2223 	ring = &priv->rx_rings[DESC_INDEX];
2224 	netif_napi_del(&ring->napi);
2225 }
2226 
2227 /* Initialize Rx queues
2228  *
2229  * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2230  * used to direct traffic to these queues.
2231  *
2232  * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2233  */
2234 static int bcmgenet_init_rx_queues(struct net_device *dev)
2235 {
2236 	struct bcmgenet_priv *priv = netdev_priv(dev);
2237 	u32 i;
2238 	u32 dma_enable;
2239 	u32 dma_ctrl;
2240 	u32 ring_cfg;
2241 	int ret;
2242 
2243 	dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2244 	dma_enable = dma_ctrl & DMA_EN;
2245 	dma_ctrl &= ~DMA_EN;
2246 	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2247 
2248 	dma_ctrl = 0;
2249 	ring_cfg = 0;
2250 
2251 	/* Initialize Rx priority queues */
2252 	for (i = 0; i < priv->hw_params->rx_queues; i++) {
2253 		ret = bcmgenet_init_rx_ring(priv, i,
2254 					    priv->hw_params->rx_bds_per_q,
2255 					    i * priv->hw_params->rx_bds_per_q,
2256 					    (i + 1) *
2257 					    priv->hw_params->rx_bds_per_q);
2258 		if (ret)
2259 			return ret;
2260 
2261 		ring_cfg |= (1 << i);
2262 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2263 	}
2264 
2265 	/* Initialize Rx default queue 16 */
2266 	ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2267 				    priv->hw_params->rx_queues *
2268 				    priv->hw_params->rx_bds_per_q,
2269 				    TOTAL_DESC);
2270 	if (ret)
2271 		return ret;
2272 
2273 	ring_cfg |= (1 << DESC_INDEX);
2274 	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2275 
2276 	/* Initialize Rx NAPI */
2277 	bcmgenet_init_rx_napi(priv);
2278 
2279 	/* Enable rings */
2280 	bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2281 
2282 	/* Configure ring as descriptor ring and re-enable DMA if enabled */
2283 	if (dma_enable)
2284 		dma_ctrl |= DMA_EN;
2285 	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2286 
2287 	return 0;
2288 }
2289 
2290 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2291 {
2292 	int ret = 0;
2293 	int timeout = 0;
2294 	u32 reg;
2295 	u32 dma_ctrl;
2296 	int i;
2297 
2298 	/* Disable TDMA to stop add more frames in TX DMA */
2299 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2300 	reg &= ~DMA_EN;
2301 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2302 
2303 	/* Check TDMA status register to confirm TDMA is disabled */
2304 	while (timeout++ < DMA_TIMEOUT_VAL) {
2305 		reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2306 		if (reg & DMA_DISABLED)
2307 			break;
2308 
2309 		udelay(1);
2310 	}
2311 
2312 	if (timeout == DMA_TIMEOUT_VAL) {
2313 		netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2314 		ret = -ETIMEDOUT;
2315 	}
2316 
2317 	/* Wait 10ms for packet drain in both tx and rx dma */
2318 	usleep_range(10000, 20000);
2319 
2320 	/* Disable RDMA */
2321 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2322 	reg &= ~DMA_EN;
2323 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2324 
2325 	timeout = 0;
2326 	/* Check RDMA status register to confirm RDMA is disabled */
2327 	while (timeout++ < DMA_TIMEOUT_VAL) {
2328 		reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2329 		if (reg & DMA_DISABLED)
2330 			break;
2331 
2332 		udelay(1);
2333 	}
2334 
2335 	if (timeout == DMA_TIMEOUT_VAL) {
2336 		netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2337 		ret = -ETIMEDOUT;
2338 	}
2339 
2340 	dma_ctrl = 0;
2341 	for (i = 0; i < priv->hw_params->rx_queues; i++)
2342 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2343 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2344 	reg &= ~dma_ctrl;
2345 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2346 
2347 	dma_ctrl = 0;
2348 	for (i = 0; i < priv->hw_params->tx_queues; i++)
2349 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2350 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2351 	reg &= ~dma_ctrl;
2352 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2353 
2354 	return ret;
2355 }
2356 
2357 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2358 {
2359 	int i;
2360 
2361 	bcmgenet_fini_rx_napi(priv);
2362 	bcmgenet_fini_tx_napi(priv);
2363 
2364 	/* disable DMA */
2365 	bcmgenet_dma_teardown(priv);
2366 
2367 	for (i = 0; i < priv->num_tx_bds; i++) {
2368 		if (priv->tx_cbs[i].skb != NULL) {
2369 			dev_kfree_skb(priv->tx_cbs[i].skb);
2370 			priv->tx_cbs[i].skb = NULL;
2371 		}
2372 	}
2373 
2374 	bcmgenet_free_rx_buffers(priv);
2375 	kfree(priv->rx_cbs);
2376 	kfree(priv->tx_cbs);
2377 }
2378 
2379 /* init_edma: Initialize DMA control register */
2380 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2381 {
2382 	int ret;
2383 	unsigned int i;
2384 	struct enet_cb *cb;
2385 
2386 	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2387 
2388 	/* Initialize common Rx ring structures */
2389 	priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2390 	priv->num_rx_bds = TOTAL_DESC;
2391 	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2392 			       GFP_KERNEL);
2393 	if (!priv->rx_cbs)
2394 		return -ENOMEM;
2395 
2396 	for (i = 0; i < priv->num_rx_bds; i++) {
2397 		cb = priv->rx_cbs + i;
2398 		cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2399 	}
2400 
2401 	/* Initialize common TX ring structures */
2402 	priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2403 	priv->num_tx_bds = TOTAL_DESC;
2404 	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2405 			       GFP_KERNEL);
2406 	if (!priv->tx_cbs) {
2407 		kfree(priv->rx_cbs);
2408 		return -ENOMEM;
2409 	}
2410 
2411 	for (i = 0; i < priv->num_tx_bds; i++) {
2412 		cb = priv->tx_cbs + i;
2413 		cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2414 	}
2415 
2416 	/* Init rDma */
2417 	bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2418 
2419 	/* Initialize Rx queues */
2420 	ret = bcmgenet_init_rx_queues(priv->dev);
2421 	if (ret) {
2422 		netdev_err(priv->dev, "failed to initialize Rx queues\n");
2423 		bcmgenet_free_rx_buffers(priv);
2424 		kfree(priv->rx_cbs);
2425 		kfree(priv->tx_cbs);
2426 		return ret;
2427 	}
2428 
2429 	/* Init tDma */
2430 	bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2431 
2432 	/* Initialize Tx queues */
2433 	bcmgenet_init_tx_queues(priv->dev);
2434 
2435 	return 0;
2436 }
2437 
2438 /* Interrupt bottom half */
2439 static void bcmgenet_irq_task(struct work_struct *work)
2440 {
2441 	struct bcmgenet_priv *priv = container_of(
2442 			work, struct bcmgenet_priv, bcmgenet_irq_work);
2443 
2444 	netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2445 
2446 	if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
2447 		priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
2448 		netif_dbg(priv, wol, priv->dev,
2449 			  "magic packet detected, waking up\n");
2450 		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2451 	}
2452 
2453 	/* Link UP/DOWN event */
2454 	if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
2455 		phy_mac_interrupt(priv->phydev,
2456 				  !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2457 		priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2458 	}
2459 }
2460 
2461 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2462 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2463 {
2464 	struct bcmgenet_priv *priv = dev_id;
2465 	struct bcmgenet_rx_ring *rx_ring;
2466 	struct bcmgenet_tx_ring *tx_ring;
2467 	unsigned int index;
2468 
2469 	/* Save irq status for bottom-half processing. */
2470 	priv->irq1_stat =
2471 		bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2472 		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2473 
2474 	/* clear interrupts */
2475 	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2476 
2477 	netif_dbg(priv, intr, priv->dev,
2478 		  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2479 
2480 	/* Check Rx priority queue interrupts */
2481 	for (index = 0; index < priv->hw_params->rx_queues; index++) {
2482 		if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2483 			continue;
2484 
2485 		rx_ring = &priv->rx_rings[index];
2486 
2487 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
2488 			rx_ring->int_disable(rx_ring);
2489 			__napi_schedule(&rx_ring->napi);
2490 		}
2491 	}
2492 
2493 	/* Check Tx priority queue interrupts */
2494 	for (index = 0; index < priv->hw_params->tx_queues; index++) {
2495 		if (!(priv->irq1_stat & BIT(index)))
2496 			continue;
2497 
2498 		tx_ring = &priv->tx_rings[index];
2499 
2500 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
2501 			tx_ring->int_disable(tx_ring);
2502 			__napi_schedule(&tx_ring->napi);
2503 		}
2504 	}
2505 
2506 	return IRQ_HANDLED;
2507 }
2508 
2509 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2510 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2511 {
2512 	struct bcmgenet_priv *priv = dev_id;
2513 	struct bcmgenet_rx_ring *rx_ring;
2514 	struct bcmgenet_tx_ring *tx_ring;
2515 
2516 	/* Save irq status for bottom-half processing. */
2517 	priv->irq0_stat =
2518 		bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2519 		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2520 
2521 	/* clear interrupts */
2522 	bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2523 
2524 	netif_dbg(priv, intr, priv->dev,
2525 		  "IRQ=0x%x\n", priv->irq0_stat);
2526 
2527 	if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
2528 		rx_ring = &priv->rx_rings[DESC_INDEX];
2529 
2530 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
2531 			rx_ring->int_disable(rx_ring);
2532 			__napi_schedule(&rx_ring->napi);
2533 		}
2534 	}
2535 
2536 	if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
2537 		tx_ring = &priv->tx_rings[DESC_INDEX];
2538 
2539 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
2540 			tx_ring->int_disable(tx_ring);
2541 			__napi_schedule(&tx_ring->napi);
2542 		}
2543 	}
2544 
2545 	if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2546 				UMAC_IRQ_PHY_DET_F |
2547 				UMAC_IRQ_LINK_EVENT |
2548 				UMAC_IRQ_HFB_SM |
2549 				UMAC_IRQ_HFB_MM |
2550 				UMAC_IRQ_MPD_R)) {
2551 		/* all other interested interrupts handled in bottom half */
2552 		schedule_work(&priv->bcmgenet_irq_work);
2553 	}
2554 
2555 	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2556 	    priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2557 		priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2558 		wake_up(&priv->wq);
2559 	}
2560 
2561 	return IRQ_HANDLED;
2562 }
2563 
2564 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2565 {
2566 	struct bcmgenet_priv *priv = dev_id;
2567 
2568 	pm_wakeup_event(&priv->pdev->dev, 0);
2569 
2570 	return IRQ_HANDLED;
2571 }
2572 
2573 #ifdef CONFIG_NET_POLL_CONTROLLER
2574 static void bcmgenet_poll_controller(struct net_device *dev)
2575 {
2576 	struct bcmgenet_priv *priv = netdev_priv(dev);
2577 
2578 	/* Invoke the main RX/TX interrupt handler */
2579 	disable_irq(priv->irq0);
2580 	bcmgenet_isr0(priv->irq0, priv);
2581 	enable_irq(priv->irq0);
2582 
2583 	/* And the interrupt handler for RX/TX priority queues */
2584 	disable_irq(priv->irq1);
2585 	bcmgenet_isr1(priv->irq1, priv);
2586 	enable_irq(priv->irq1);
2587 }
2588 #endif
2589 
2590 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2591 {
2592 	u32 reg;
2593 
2594 	reg = bcmgenet_rbuf_ctrl_get(priv);
2595 	reg |= BIT(1);
2596 	bcmgenet_rbuf_ctrl_set(priv, reg);
2597 	udelay(10);
2598 
2599 	reg &= ~BIT(1);
2600 	bcmgenet_rbuf_ctrl_set(priv, reg);
2601 	udelay(10);
2602 }
2603 
2604 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2605 				 unsigned char *addr)
2606 {
2607 	bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2608 			(addr[2] << 8) | addr[3], UMAC_MAC0);
2609 	bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2610 }
2611 
2612 /* Returns a reusable dma control register value */
2613 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2614 {
2615 	u32 reg;
2616 	u32 dma_ctrl;
2617 
2618 	/* disable DMA */
2619 	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2620 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2621 	reg &= ~dma_ctrl;
2622 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2623 
2624 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2625 	reg &= ~dma_ctrl;
2626 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2627 
2628 	bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2629 	udelay(10);
2630 	bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2631 
2632 	return dma_ctrl;
2633 }
2634 
2635 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2636 {
2637 	u32 reg;
2638 
2639 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2640 	reg |= dma_ctrl;
2641 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2642 
2643 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2644 	reg |= dma_ctrl;
2645 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2646 }
2647 
2648 static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
2649 					   u32 f_index)
2650 {
2651 	u32 offset;
2652 	u32 reg;
2653 
2654 	offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2655 	reg = bcmgenet_hfb_reg_readl(priv, offset);
2656 	return !!(reg & (1 << (f_index % 32)));
2657 }
2658 
2659 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
2660 {
2661 	u32 offset;
2662 	u32 reg;
2663 
2664 	offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2665 	reg = bcmgenet_hfb_reg_readl(priv, offset);
2666 	reg |= (1 << (f_index % 32));
2667 	bcmgenet_hfb_reg_writel(priv, reg, offset);
2668 }
2669 
2670 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
2671 						     u32 f_index, u32 rx_queue)
2672 {
2673 	u32 offset;
2674 	u32 reg;
2675 
2676 	offset = f_index / 8;
2677 	reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
2678 	reg &= ~(0xF << (4 * (f_index % 8)));
2679 	reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
2680 	bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
2681 }
2682 
2683 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
2684 					   u32 f_index, u32 f_length)
2685 {
2686 	u32 offset;
2687 	u32 reg;
2688 
2689 	offset = HFB_FLT_LEN_V3PLUS +
2690 		 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
2691 		 sizeof(u32);
2692 	reg = bcmgenet_hfb_reg_readl(priv, offset);
2693 	reg &= ~(0xFF << (8 * (f_index % 4)));
2694 	reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
2695 	bcmgenet_hfb_reg_writel(priv, reg, offset);
2696 }
2697 
2698 static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
2699 {
2700 	u32 f_index;
2701 
2702 	for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
2703 		if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
2704 			return f_index;
2705 
2706 	return -ENOMEM;
2707 }
2708 
2709 /* bcmgenet_hfb_add_filter
2710  *
2711  * Add new filter to Hardware Filter Block to match and direct Rx traffic to
2712  * desired Rx queue.
2713  *
2714  * f_data is an array of unsigned 32-bit integers where each 32-bit integer
2715  * provides filter data for 2 bytes (4 nibbles) of Rx frame:
2716  *
2717  * bits 31:20 - unused
2718  * bit  19    - nibble 0 match enable
2719  * bit  18    - nibble 1 match enable
2720  * bit  17    - nibble 2 match enable
2721  * bit  16    - nibble 3 match enable
2722  * bits 15:12 - nibble 0 data
2723  * bits 11:8  - nibble 1 data
2724  * bits 7:4   - nibble 2 data
2725  * bits 3:0   - nibble 3 data
2726  *
2727  * Example:
2728  * In order to match:
2729  * - Ethernet frame type = 0x0800 (IP)
2730  * - IP version field = 4
2731  * - IP protocol field = 0x11 (UDP)
2732  *
2733  * The following filter is needed:
2734  * u32 hfb_filter_ipv4_udp[] = {
2735  *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2736  *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
2737  *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
2738  * };
2739  *
2740  * To add the filter to HFB and direct the traffic to Rx queue 0, call:
2741  * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
2742  *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
2743  */
2744 int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
2745 			    u32 f_length, u32 rx_queue)
2746 {
2747 	int f_index;
2748 	u32 i;
2749 
2750 	f_index = bcmgenet_hfb_find_unused_filter(priv);
2751 	if (f_index < 0)
2752 		return -ENOMEM;
2753 
2754 	if (f_length > priv->hw_params->hfb_filter_size)
2755 		return -EINVAL;
2756 
2757 	for (i = 0; i < f_length; i++)
2758 		bcmgenet_hfb_writel(priv, f_data[i],
2759 			(f_index * priv->hw_params->hfb_filter_size + i) *
2760 			sizeof(u32));
2761 
2762 	bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
2763 	bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
2764 	bcmgenet_hfb_enable_filter(priv, f_index);
2765 	bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
2766 
2767 	return 0;
2768 }
2769 
2770 /* bcmgenet_hfb_clear
2771  *
2772  * Clear Hardware Filter Block and disable all filtering.
2773  */
2774 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2775 {
2776 	u32 i;
2777 
2778 	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2779 	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2780 	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2781 
2782 	for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2783 		bcmgenet_rdma_writel(priv, 0x0, i);
2784 
2785 	for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2786 		bcmgenet_hfb_reg_writel(priv, 0x0,
2787 					HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2788 
2789 	for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2790 			priv->hw_params->hfb_filter_size; i++)
2791 		bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2792 }
2793 
2794 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2795 {
2796 	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2797 		return;
2798 
2799 	bcmgenet_hfb_clear(priv);
2800 }
2801 
2802 static void bcmgenet_netif_start(struct net_device *dev)
2803 {
2804 	struct bcmgenet_priv *priv = netdev_priv(dev);
2805 
2806 	/* Start the network engine */
2807 	bcmgenet_enable_rx_napi(priv);
2808 	bcmgenet_enable_tx_napi(priv);
2809 
2810 	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2811 
2812 	netif_tx_start_all_queues(dev);
2813 
2814 	/* Monitor link interrupts now */
2815 	bcmgenet_link_intr_enable(priv);
2816 
2817 	phy_start(priv->phydev);
2818 }
2819 
2820 static int bcmgenet_open(struct net_device *dev)
2821 {
2822 	struct bcmgenet_priv *priv = netdev_priv(dev);
2823 	unsigned long dma_ctrl;
2824 	u32 reg;
2825 	int ret;
2826 
2827 	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2828 
2829 	/* Turn on the clock */
2830 	clk_prepare_enable(priv->clk);
2831 
2832 	/* If this is an internal GPHY, power it back on now, before UniMAC is
2833 	 * brought out of reset as absolutely no UniMAC activity is allowed
2834 	 */
2835 	if (priv->internal_phy)
2836 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2837 
2838 	/* take MAC out of reset */
2839 	bcmgenet_umac_reset(priv);
2840 
2841 	ret = init_umac(priv);
2842 	if (ret)
2843 		goto err_clk_disable;
2844 
2845 	/* disable ethernet MAC while updating its registers */
2846 	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2847 
2848 	/* Make sure we reflect the value of CRC_CMD_FWD */
2849 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2850 	priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2851 
2852 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
2853 
2854 	if (priv->internal_phy) {
2855 		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2856 		reg |= EXT_ENERGY_DET_MASK;
2857 		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2858 	}
2859 
2860 	/* Disable RX/TX DMA and flush TX queues */
2861 	dma_ctrl = bcmgenet_dma_disable(priv);
2862 
2863 	/* Reinitialize TDMA and RDMA and SW housekeeping */
2864 	ret = bcmgenet_init_dma(priv);
2865 	if (ret) {
2866 		netdev_err(dev, "failed to initialize DMA\n");
2867 		goto err_clk_disable;
2868 	}
2869 
2870 	/* Always enable ring 16 - descriptor ring */
2871 	bcmgenet_enable_dma(priv, dma_ctrl);
2872 
2873 	/* HFB init */
2874 	bcmgenet_hfb_init(priv);
2875 
2876 	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2877 			  dev->name, priv);
2878 	if (ret < 0) {
2879 		netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2880 		goto err_fini_dma;
2881 	}
2882 
2883 	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2884 			  dev->name, priv);
2885 	if (ret < 0) {
2886 		netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2887 		goto err_irq0;
2888 	}
2889 
2890 	ret = bcmgenet_mii_probe(dev);
2891 	if (ret) {
2892 		netdev_err(dev, "failed to connect to PHY\n");
2893 		goto err_irq1;
2894 	}
2895 
2896 	bcmgenet_netif_start(dev);
2897 
2898 	return 0;
2899 
2900 err_irq1:
2901 	free_irq(priv->irq1, priv);
2902 err_irq0:
2903 	free_irq(priv->irq0, priv);
2904 err_fini_dma:
2905 	bcmgenet_fini_dma(priv);
2906 err_clk_disable:
2907 	clk_disable_unprepare(priv->clk);
2908 	return ret;
2909 }
2910 
2911 static void bcmgenet_netif_stop(struct net_device *dev)
2912 {
2913 	struct bcmgenet_priv *priv = netdev_priv(dev);
2914 
2915 	netif_tx_stop_all_queues(dev);
2916 	phy_stop(priv->phydev);
2917 	bcmgenet_intr_disable(priv);
2918 	bcmgenet_disable_rx_napi(priv);
2919 	bcmgenet_disable_tx_napi(priv);
2920 
2921 	/* Wait for pending work items to complete. Since interrupts are
2922 	 * disabled no new work will be scheduled.
2923 	 */
2924 	cancel_work_sync(&priv->bcmgenet_irq_work);
2925 
2926 	priv->old_link = -1;
2927 	priv->old_speed = -1;
2928 	priv->old_duplex = -1;
2929 	priv->old_pause = -1;
2930 }
2931 
2932 static int bcmgenet_close(struct net_device *dev)
2933 {
2934 	struct bcmgenet_priv *priv = netdev_priv(dev);
2935 	int ret;
2936 
2937 	netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2938 
2939 	bcmgenet_netif_stop(dev);
2940 
2941 	/* Really kill the PHY state machine and disconnect from it */
2942 	phy_disconnect(priv->phydev);
2943 
2944 	/* Disable MAC receive */
2945 	umac_enable_set(priv, CMD_RX_EN, false);
2946 
2947 	ret = bcmgenet_dma_teardown(priv);
2948 	if (ret)
2949 		return ret;
2950 
2951 	/* Disable MAC transmit. TX DMA disabled have to done before this */
2952 	umac_enable_set(priv, CMD_TX_EN, false);
2953 
2954 	/* tx reclaim */
2955 	bcmgenet_tx_reclaim_all(dev);
2956 	bcmgenet_fini_dma(priv);
2957 
2958 	free_irq(priv->irq0, priv);
2959 	free_irq(priv->irq1, priv);
2960 
2961 	if (priv->internal_phy)
2962 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2963 
2964 	clk_disable_unprepare(priv->clk);
2965 
2966 	return ret;
2967 }
2968 
2969 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2970 {
2971 	struct bcmgenet_priv *priv = ring->priv;
2972 	u32 p_index, c_index, intsts, intmsk;
2973 	struct netdev_queue *txq;
2974 	unsigned int free_bds;
2975 	unsigned long flags;
2976 	bool txq_stopped;
2977 
2978 	if (!netif_msg_tx_err(priv))
2979 		return;
2980 
2981 	txq = netdev_get_tx_queue(priv->dev, ring->queue);
2982 
2983 	spin_lock_irqsave(&ring->lock, flags);
2984 	if (ring->index == DESC_INDEX) {
2985 		intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2986 		intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2987 	} else {
2988 		intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2989 		intmsk = 1 << ring->index;
2990 	}
2991 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2992 	p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2993 	txq_stopped = netif_tx_queue_stopped(txq);
2994 	free_bds = ring->free_bds;
2995 	spin_unlock_irqrestore(&ring->lock, flags);
2996 
2997 	netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
2998 		  "TX queue status: %s, interrupts: %s\n"
2999 		  "(sw)free_bds: %d (sw)size: %d\n"
3000 		  "(sw)p_index: %d (hw)p_index: %d\n"
3001 		  "(sw)c_index: %d (hw)c_index: %d\n"
3002 		  "(sw)clean_p: %d (sw)write_p: %d\n"
3003 		  "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3004 		  ring->index, ring->queue,
3005 		  txq_stopped ? "stopped" : "active",
3006 		  intsts & intmsk ? "enabled" : "disabled",
3007 		  free_bds, ring->size,
3008 		  ring->prod_index, p_index & DMA_P_INDEX_MASK,
3009 		  ring->c_index, c_index & DMA_C_INDEX_MASK,
3010 		  ring->clean_ptr, ring->write_ptr,
3011 		  ring->cb_ptr, ring->end_ptr);
3012 }
3013 
3014 static void bcmgenet_timeout(struct net_device *dev)
3015 {
3016 	struct bcmgenet_priv *priv = netdev_priv(dev);
3017 	u32 int0_enable = 0;
3018 	u32 int1_enable = 0;
3019 	unsigned int q;
3020 
3021 	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3022 
3023 	for (q = 0; q < priv->hw_params->tx_queues; q++)
3024 		bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3025 	bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3026 
3027 	bcmgenet_tx_reclaim_all(dev);
3028 
3029 	for (q = 0; q < priv->hw_params->tx_queues; q++)
3030 		int1_enable |= (1 << q);
3031 
3032 	int0_enable = UMAC_IRQ_TXDMA_DONE;
3033 
3034 	/* Re-enable TX interrupts if disabled */
3035 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3036 	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3037 
3038 	dev->trans_start = jiffies;
3039 
3040 	dev->stats.tx_errors++;
3041 
3042 	netif_tx_wake_all_queues(dev);
3043 }
3044 
3045 #define MAX_MC_COUNT	16
3046 
3047 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3048 					 unsigned char *addr,
3049 					 int *i,
3050 					 int *mc)
3051 {
3052 	u32 reg;
3053 
3054 	bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3055 			     UMAC_MDF_ADDR + (*i * 4));
3056 	bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3057 			     addr[4] << 8 | addr[5],
3058 			     UMAC_MDF_ADDR + ((*i + 1) * 4));
3059 	reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
3060 	reg |= (1 << (MAX_MC_COUNT - *mc));
3061 	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3062 	*i += 2;
3063 	(*mc)++;
3064 }
3065 
3066 static void bcmgenet_set_rx_mode(struct net_device *dev)
3067 {
3068 	struct bcmgenet_priv *priv = netdev_priv(dev);
3069 	struct netdev_hw_addr *ha;
3070 	int i, mc;
3071 	u32 reg;
3072 
3073 	netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3074 
3075 	/* Promiscuous mode */
3076 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3077 	if (dev->flags & IFF_PROMISC) {
3078 		reg |= CMD_PROMISC;
3079 		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3080 		bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3081 		return;
3082 	} else {
3083 		reg &= ~CMD_PROMISC;
3084 		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3085 	}
3086 
3087 	/* UniMac doesn't support ALLMULTI */
3088 	if (dev->flags & IFF_ALLMULTI) {
3089 		netdev_warn(dev, "ALLMULTI is not supported\n");
3090 		return;
3091 	}
3092 
3093 	/* update MDF filter */
3094 	i = 0;
3095 	mc = 0;
3096 	/* Broadcast */
3097 	bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
3098 	/* my own address.*/
3099 	bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
3100 	/* Unicast list*/
3101 	if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
3102 		return;
3103 
3104 	if (!netdev_uc_empty(dev))
3105 		netdev_for_each_uc_addr(ha, dev)
3106 			bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
3107 	/* Multicast */
3108 	if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
3109 		return;
3110 
3111 	netdev_for_each_mc_addr(ha, dev)
3112 		bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
3113 }
3114 
3115 /* Set the hardware MAC address. */
3116 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3117 {
3118 	struct sockaddr *addr = p;
3119 
3120 	/* Setting the MAC address at the hardware level is not possible
3121 	 * without disabling the UniMAC RX/TX enable bits.
3122 	 */
3123 	if (netif_running(dev))
3124 		return -EBUSY;
3125 
3126 	ether_addr_copy(dev->dev_addr, addr->sa_data);
3127 
3128 	return 0;
3129 }
3130 
3131 static const struct net_device_ops bcmgenet_netdev_ops = {
3132 	.ndo_open		= bcmgenet_open,
3133 	.ndo_stop		= bcmgenet_close,
3134 	.ndo_start_xmit		= bcmgenet_xmit,
3135 	.ndo_tx_timeout		= bcmgenet_timeout,
3136 	.ndo_set_rx_mode	= bcmgenet_set_rx_mode,
3137 	.ndo_set_mac_address	= bcmgenet_set_mac_addr,
3138 	.ndo_do_ioctl		= bcmgenet_ioctl,
3139 	.ndo_set_features	= bcmgenet_set_features,
3140 #ifdef CONFIG_NET_POLL_CONTROLLER
3141 	.ndo_poll_controller	= bcmgenet_poll_controller,
3142 #endif
3143 };
3144 
3145 /* Array of GENET hardware parameters/characteristics */
3146 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3147 	[GENET_V1] = {
3148 		.tx_queues = 0,
3149 		.tx_bds_per_q = 0,
3150 		.rx_queues = 0,
3151 		.rx_bds_per_q = 0,
3152 		.bp_in_en_shift = 16,
3153 		.bp_in_mask = 0xffff,
3154 		.hfb_filter_cnt = 16,
3155 		.qtag_mask = 0x1F,
3156 		.hfb_offset = 0x1000,
3157 		.rdma_offset = 0x2000,
3158 		.tdma_offset = 0x3000,
3159 		.words_per_bd = 2,
3160 	},
3161 	[GENET_V2] = {
3162 		.tx_queues = 4,
3163 		.tx_bds_per_q = 32,
3164 		.rx_queues = 0,
3165 		.rx_bds_per_q = 0,
3166 		.bp_in_en_shift = 16,
3167 		.bp_in_mask = 0xffff,
3168 		.hfb_filter_cnt = 16,
3169 		.qtag_mask = 0x1F,
3170 		.tbuf_offset = 0x0600,
3171 		.hfb_offset = 0x1000,
3172 		.hfb_reg_offset = 0x2000,
3173 		.rdma_offset = 0x3000,
3174 		.tdma_offset = 0x4000,
3175 		.words_per_bd = 2,
3176 		.flags = GENET_HAS_EXT,
3177 	},
3178 	[GENET_V3] = {
3179 		.tx_queues = 4,
3180 		.tx_bds_per_q = 32,
3181 		.rx_queues = 0,
3182 		.rx_bds_per_q = 0,
3183 		.bp_in_en_shift = 17,
3184 		.bp_in_mask = 0x1ffff,
3185 		.hfb_filter_cnt = 48,
3186 		.hfb_filter_size = 128,
3187 		.qtag_mask = 0x3F,
3188 		.tbuf_offset = 0x0600,
3189 		.hfb_offset = 0x8000,
3190 		.hfb_reg_offset = 0xfc00,
3191 		.rdma_offset = 0x10000,
3192 		.tdma_offset = 0x11000,
3193 		.words_per_bd = 2,
3194 		.flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3195 			 GENET_HAS_MOCA_LINK_DET,
3196 	},
3197 	[GENET_V4] = {
3198 		.tx_queues = 4,
3199 		.tx_bds_per_q = 32,
3200 		.rx_queues = 0,
3201 		.rx_bds_per_q = 0,
3202 		.bp_in_en_shift = 17,
3203 		.bp_in_mask = 0x1ffff,
3204 		.hfb_filter_cnt = 48,
3205 		.hfb_filter_size = 128,
3206 		.qtag_mask = 0x3F,
3207 		.tbuf_offset = 0x0600,
3208 		.hfb_offset = 0x8000,
3209 		.hfb_reg_offset = 0xfc00,
3210 		.rdma_offset = 0x2000,
3211 		.tdma_offset = 0x4000,
3212 		.words_per_bd = 3,
3213 		.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3214 			 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3215 	},
3216 };
3217 
3218 /* Infer hardware parameters from the detected GENET version */
3219 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3220 {
3221 	struct bcmgenet_hw_params *params;
3222 	u32 reg;
3223 	u8 major;
3224 	u16 gphy_rev;
3225 
3226 	if (GENET_IS_V4(priv)) {
3227 		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3228 		genet_dma_ring_regs = genet_dma_ring_regs_v4;
3229 		priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3230 		priv->version = GENET_V4;
3231 	} else if (GENET_IS_V3(priv)) {
3232 		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3233 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3234 		priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3235 		priv->version = GENET_V3;
3236 	} else if (GENET_IS_V2(priv)) {
3237 		bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3238 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3239 		priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3240 		priv->version = GENET_V2;
3241 	} else if (GENET_IS_V1(priv)) {
3242 		bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3243 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3244 		priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3245 		priv->version = GENET_V1;
3246 	}
3247 
3248 	/* enum genet_version starts at 1 */
3249 	priv->hw_params = &bcmgenet_hw_params[priv->version];
3250 	params = priv->hw_params;
3251 
3252 	/* Read GENET HW version */
3253 	reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3254 	major = (reg >> 24 & 0x0f);
3255 	if (major == 5)
3256 		major = 4;
3257 	else if (major == 0)
3258 		major = 1;
3259 	if (major != priv->version) {
3260 		dev_err(&priv->pdev->dev,
3261 			"GENET version mismatch, got: %d, configured for: %d\n",
3262 			major, priv->version);
3263 	}
3264 
3265 	/* Print the GENET core version */
3266 	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3267 		 major, (reg >> 16) & 0x0f, reg & 0xffff);
3268 
3269 	/* Store the integrated PHY revision for the MDIO probing function
3270 	 * to pass this information to the PHY driver. The PHY driver expects
3271 	 * to find the PHY major revision in bits 15:8 while the GENET register
3272 	 * stores that information in bits 7:0, account for that.
3273 	 *
3274 	 * On newer chips, starting with PHY revision G0, a new scheme is
3275 	 * deployed similar to the Starfighter 2 switch with GPHY major
3276 	 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3277 	 * is reserved as well as special value 0x01ff, we have a small
3278 	 * heuristic to check for the new GPHY revision and re-arrange things
3279 	 * so the GPHY driver is happy.
3280 	 */
3281 	gphy_rev = reg & 0xffff;
3282 
3283 	/* This is the good old scheme, just GPHY major, no minor nor patch */
3284 	if ((gphy_rev & 0xf0) != 0)
3285 		priv->gphy_rev = gphy_rev << 8;
3286 
3287 	/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3288 	else if ((gphy_rev & 0xff00) != 0)
3289 		priv->gphy_rev = gphy_rev;
3290 
3291 	/* This is reserved so should require special treatment */
3292 	else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3293 		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3294 		return;
3295 	}
3296 
3297 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3298 	if (!(params->flags & GENET_HAS_40BITS))
3299 		pr_warn("GENET does not support 40-bits PA\n");
3300 #endif
3301 
3302 	pr_debug("Configuration for version: %d\n"
3303 		"TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3304 		"BP << en: %2d, BP msk: 0x%05x\n"
3305 		"HFB count: %2d, QTAQ msk: 0x%05x\n"
3306 		"TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3307 		"RDMA: 0x%05x, TDMA: 0x%05x\n"
3308 		"Words/BD: %d\n",
3309 		priv->version,
3310 		params->tx_queues, params->tx_bds_per_q,
3311 		params->rx_queues, params->rx_bds_per_q,
3312 		params->bp_in_en_shift, params->bp_in_mask,
3313 		params->hfb_filter_cnt, params->qtag_mask,
3314 		params->tbuf_offset, params->hfb_offset,
3315 		params->hfb_reg_offset,
3316 		params->rdma_offset, params->tdma_offset,
3317 		params->words_per_bd);
3318 }
3319 
3320 static const struct of_device_id bcmgenet_match[] = {
3321 	{ .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3322 	{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3323 	{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3324 	{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3325 	{ },
3326 };
3327 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3328 
3329 static int bcmgenet_probe(struct platform_device *pdev)
3330 {
3331 	struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3332 	struct device_node *dn = pdev->dev.of_node;
3333 	const struct of_device_id *of_id = NULL;
3334 	struct bcmgenet_priv *priv;
3335 	struct net_device *dev;
3336 	const void *macaddr;
3337 	struct resource *r;
3338 	int err = -EIO;
3339 
3340 	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3341 	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3342 				 GENET_MAX_MQ_CNT + 1);
3343 	if (!dev) {
3344 		dev_err(&pdev->dev, "can't allocate net device\n");
3345 		return -ENOMEM;
3346 	}
3347 
3348 	if (dn) {
3349 		of_id = of_match_node(bcmgenet_match, dn);
3350 		if (!of_id)
3351 			return -EINVAL;
3352 	}
3353 
3354 	priv = netdev_priv(dev);
3355 	priv->irq0 = platform_get_irq(pdev, 0);
3356 	priv->irq1 = platform_get_irq(pdev, 1);
3357 	priv->wol_irq = platform_get_irq(pdev, 2);
3358 	if (!priv->irq0 || !priv->irq1) {
3359 		dev_err(&pdev->dev, "can't find IRQs\n");
3360 		err = -EINVAL;
3361 		goto err;
3362 	}
3363 
3364 	if (dn) {
3365 		macaddr = of_get_mac_address(dn);
3366 		if (!macaddr) {
3367 			dev_err(&pdev->dev, "can't find MAC address\n");
3368 			err = -EINVAL;
3369 			goto err;
3370 		}
3371 	} else {
3372 		macaddr = pd->mac_address;
3373 	}
3374 
3375 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3376 	priv->base = devm_ioremap_resource(&pdev->dev, r);
3377 	if (IS_ERR(priv->base)) {
3378 		err = PTR_ERR(priv->base);
3379 		goto err;
3380 	}
3381 
3382 	SET_NETDEV_DEV(dev, &pdev->dev);
3383 	dev_set_drvdata(&pdev->dev, dev);
3384 	ether_addr_copy(dev->dev_addr, macaddr);
3385 	dev->watchdog_timeo = 2 * HZ;
3386 	dev->ethtool_ops = &bcmgenet_ethtool_ops;
3387 	dev->netdev_ops = &bcmgenet_netdev_ops;
3388 
3389 	priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3390 
3391 	/* Set hardware features */
3392 	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3393 		NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3394 
3395 	/* Request the WOL interrupt and advertise suspend if available */
3396 	priv->wol_irq_disabled = true;
3397 	err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3398 			       dev->name, priv);
3399 	if (!err)
3400 		device_set_wakeup_capable(&pdev->dev, 1);
3401 
3402 	/* Set the needed headroom to account for any possible
3403 	 * features enabling/disabling at runtime
3404 	 */
3405 	dev->needed_headroom += 64;
3406 
3407 	netdev_boot_setup_check(dev);
3408 
3409 	priv->dev = dev;
3410 	priv->pdev = pdev;
3411 	if (of_id)
3412 		priv->version = (enum bcmgenet_version)of_id->data;
3413 	else
3414 		priv->version = pd->genet_version;
3415 
3416 	priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3417 	if (IS_ERR(priv->clk)) {
3418 		dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3419 		priv->clk = NULL;
3420 	}
3421 
3422 	clk_prepare_enable(priv->clk);
3423 
3424 	bcmgenet_set_hw_params(priv);
3425 
3426 	/* Mii wait queue */
3427 	init_waitqueue_head(&priv->wq);
3428 	/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3429 	priv->rx_buf_len = RX_BUF_LENGTH;
3430 	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3431 
3432 	priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3433 	if (IS_ERR(priv->clk_wol)) {
3434 		dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3435 		priv->clk_wol = NULL;
3436 	}
3437 
3438 	priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3439 	if (IS_ERR(priv->clk_eee)) {
3440 		dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3441 		priv->clk_eee = NULL;
3442 	}
3443 
3444 	err = reset_umac(priv);
3445 	if (err)
3446 		goto err_clk_disable;
3447 
3448 	err = bcmgenet_mii_init(dev);
3449 	if (err)
3450 		goto err_clk_disable;
3451 
3452 	/* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues
3453 	 * just the ring 16 descriptor based TX
3454 	 */
3455 	netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3456 	netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3457 
3458 	/* libphy will determine the link state */
3459 	netif_carrier_off(dev);
3460 
3461 	/* Turn off the main clock, WOL clock is handled separately */
3462 	clk_disable_unprepare(priv->clk);
3463 
3464 	err = register_netdev(dev);
3465 	if (err)
3466 		goto err;
3467 
3468 	return err;
3469 
3470 err_clk_disable:
3471 	clk_disable_unprepare(priv->clk);
3472 err:
3473 	free_netdev(dev);
3474 	return err;
3475 }
3476 
3477 static int bcmgenet_remove(struct platform_device *pdev)
3478 {
3479 	struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3480 
3481 	dev_set_drvdata(&pdev->dev, NULL);
3482 	unregister_netdev(priv->dev);
3483 	bcmgenet_mii_exit(priv->dev);
3484 	free_netdev(priv->dev);
3485 
3486 	return 0;
3487 }
3488 
3489 #ifdef CONFIG_PM_SLEEP
3490 static int bcmgenet_suspend(struct device *d)
3491 {
3492 	struct net_device *dev = dev_get_drvdata(d);
3493 	struct bcmgenet_priv *priv = netdev_priv(dev);
3494 	int ret;
3495 
3496 	if (!netif_running(dev))
3497 		return 0;
3498 
3499 	bcmgenet_netif_stop(dev);
3500 
3501 	phy_suspend(priv->phydev);
3502 
3503 	netif_device_detach(dev);
3504 
3505 	/* Disable MAC receive */
3506 	umac_enable_set(priv, CMD_RX_EN, false);
3507 
3508 	ret = bcmgenet_dma_teardown(priv);
3509 	if (ret)
3510 		return ret;
3511 
3512 	/* Disable MAC transmit. TX DMA disabled have to done before this */
3513 	umac_enable_set(priv, CMD_TX_EN, false);
3514 
3515 	/* tx reclaim */
3516 	bcmgenet_tx_reclaim_all(dev);
3517 	bcmgenet_fini_dma(priv);
3518 
3519 	/* Prepare the device for Wake-on-LAN and switch to the slow clock */
3520 	if (device_may_wakeup(d) && priv->wolopts) {
3521 		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3522 		clk_prepare_enable(priv->clk_wol);
3523 	} else if (priv->internal_phy) {
3524 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3525 	}
3526 
3527 	/* Turn off the clocks */
3528 	clk_disable_unprepare(priv->clk);
3529 
3530 	return ret;
3531 }
3532 
3533 static int bcmgenet_resume(struct device *d)
3534 {
3535 	struct net_device *dev = dev_get_drvdata(d);
3536 	struct bcmgenet_priv *priv = netdev_priv(dev);
3537 	unsigned long dma_ctrl;
3538 	int ret;
3539 	u32 reg;
3540 
3541 	if (!netif_running(dev))
3542 		return 0;
3543 
3544 	/* Turn on the clock */
3545 	ret = clk_prepare_enable(priv->clk);
3546 	if (ret)
3547 		return ret;
3548 
3549 	/* If this is an internal GPHY, power it back on now, before UniMAC is
3550 	 * brought out of reset as absolutely no UniMAC activity is allowed
3551 	 */
3552 	if (priv->internal_phy)
3553 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3554 
3555 	bcmgenet_umac_reset(priv);
3556 
3557 	ret = init_umac(priv);
3558 	if (ret)
3559 		goto out_clk_disable;
3560 
3561 	/* From WOL-enabled suspend, switch to regular clock */
3562 	if (priv->wolopts)
3563 		clk_disable_unprepare(priv->clk_wol);
3564 
3565 	phy_init_hw(priv->phydev);
3566 	/* Speed settings must be restored */
3567 	bcmgenet_mii_config(priv->dev);
3568 
3569 	/* disable ethernet MAC while updating its registers */
3570 	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
3571 
3572 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
3573 
3574 	if (priv->internal_phy) {
3575 		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3576 		reg |= EXT_ENERGY_DET_MASK;
3577 		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3578 	}
3579 
3580 	if (priv->wolopts)
3581 		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3582 
3583 	/* Disable RX/TX DMA and flush TX queues */
3584 	dma_ctrl = bcmgenet_dma_disable(priv);
3585 
3586 	/* Reinitialize TDMA and RDMA and SW housekeeping */
3587 	ret = bcmgenet_init_dma(priv);
3588 	if (ret) {
3589 		netdev_err(dev, "failed to initialize DMA\n");
3590 		goto out_clk_disable;
3591 	}
3592 
3593 	/* Always enable ring 16 - descriptor ring */
3594 	bcmgenet_enable_dma(priv, dma_ctrl);
3595 
3596 	netif_device_attach(dev);
3597 
3598 	phy_resume(priv->phydev);
3599 
3600 	if (priv->eee.eee_enabled)
3601 		bcmgenet_eee_enable_set(dev, true);
3602 
3603 	bcmgenet_netif_start(dev);
3604 
3605 	return 0;
3606 
3607 out_clk_disable:
3608 	clk_disable_unprepare(priv->clk);
3609 	return ret;
3610 }
3611 #endif /* CONFIG_PM_SLEEP */
3612 
3613 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3614 
3615 static struct platform_driver bcmgenet_driver = {
3616 	.probe	= bcmgenet_probe,
3617 	.remove	= bcmgenet_remove,
3618 	.driver	= {
3619 		.name	= "bcmgenet",
3620 		.of_match_table = bcmgenet_match,
3621 		.pm	= &bcmgenet_pm_ops,
3622 	},
3623 };
3624 module_platform_driver(bcmgenet_driver);
3625 
3626 MODULE_AUTHOR("Broadcom Corporation");
3627 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3628 MODULE_ALIAS("platform:bcmgenet");
3629 MODULE_LICENSE("GPL");
3630