xref: /linux/drivers/net/ethernet/broadcom/genet/bcmgenet.c (revision 5393b2b5bee2ac51a0043dc7f4ac3475f053d08d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Broadcom GENET (Gigabit Ethernet) controller driver
4  *
5  * Copyright (c) 2014-2025 Broadcom
6  */
7 
8 #define pr_fmt(fmt)				"bcmgenet: " fmt
9 
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/interrupt.h>
17 #include <linux/string.h>
18 #include <linux/if_ether.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/delay.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/pm.h>
25 #include <linux/clk.h>
26 #include <net/arp.h>
27 
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/in.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/phy.h>
38 
39 #include <linux/unaligned.h>
40 
41 #include "bcmgenet.h"
42 
43 /* Default highest priority queue for multi queue support */
44 #define GENET_Q1_PRIORITY	0
45 #define GENET_Q0_PRIORITY	1
46 
47 #define GENET_Q0_RX_BD_CNT	\
48 	(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
49 #define GENET_Q0_TX_BD_CNT	\
50 	(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
51 
52 #define RX_BUF_LENGTH		2048
53 #define SKB_ALIGNMENT		32
54 
55 /* Tx/Rx DMA register offset, skip 256 descriptors */
56 #define WORDS_PER_BD(p)		(p->hw_params->words_per_bd)
57 #define DMA_DESC_SIZE		(WORDS_PER_BD(priv) * sizeof(u32))
58 
59 #define GENET_TDMA_REG_OFF	(priv->hw_params->tdma_offset + \
60 				TOTAL_DESC * DMA_DESC_SIZE)
61 
62 #define GENET_RDMA_REG_OFF	(priv->hw_params->rdma_offset + \
63 				TOTAL_DESC * DMA_DESC_SIZE)
64 
65 /* Forward declarations */
66 static void bcmgenet_set_rx_mode(struct net_device *dev);
67 
68 static inline void bcmgenet_writel(u32 value, void __iomem *offset)
69 {
70 	/* MIPS chips strapped for BE will automagically configure the
71 	 * peripheral registers for CPU-native byte order.
72 	 */
73 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
74 		__raw_writel(value, offset);
75 	else
76 		writel_relaxed(value, offset);
77 }
78 
79 static inline u32 bcmgenet_readl(void __iomem *offset)
80 {
81 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
82 		return __raw_readl(offset);
83 	else
84 		return readl_relaxed(offset);
85 }
86 
87 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
88 					     void __iomem *d, u32 value)
89 {
90 	bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
91 }
92 
93 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
94 				    void __iomem *d,
95 				    dma_addr_t addr)
96 {
97 	bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
98 
99 	/* Register writes to GISB bus can take couple hundred nanoseconds
100 	 * and are done for each packet, save these expensive writes unless
101 	 * the platform is explicitly configured for 64-bits/LPAE.
102 	 */
103 #ifdef CONFIG_PHYS_ADDR_T_64BIT
104 	if (bcmgenet_has_40bits(priv))
105 		bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
106 #endif
107 }
108 
109 /* Combined address + length/status setter */
110 static inline void dmadesc_set(struct bcmgenet_priv *priv,
111 			       void __iomem *d, dma_addr_t addr, u32 val)
112 {
113 	dmadesc_set_addr(priv, d, addr);
114 	dmadesc_set_length_status(priv, d, val);
115 }
116 
117 #define GENET_VER_FMT	"%1d.%1d EPHY: 0x%04x"
118 
119 #define GENET_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
120 				NETIF_MSG_LINK)
121 
122 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
123 {
124 	if (GENET_IS_V1(priv))
125 		return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
126 	else
127 		return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
128 }
129 
130 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
131 {
132 	if (GENET_IS_V1(priv))
133 		bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
134 	else
135 		bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
136 }
137 
138 /* These macros are defined to deal with register map change
139  * between GENET1.1 and GENET2. Only those currently being used
140  * by driver are defined.
141  */
142 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
143 {
144 	if (GENET_IS_V1(priv))
145 		return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
146 	else
147 		return bcmgenet_readl(priv->base +
148 				      priv->hw_params->tbuf_offset + TBUF_CTRL);
149 }
150 
151 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
152 {
153 	if (GENET_IS_V1(priv))
154 		bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
155 	else
156 		bcmgenet_writel(val, priv->base +
157 				priv->hw_params->tbuf_offset + TBUF_CTRL);
158 }
159 
160 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
161 {
162 	if (GENET_IS_V1(priv))
163 		return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
164 	else
165 		return bcmgenet_readl(priv->base +
166 				      priv->hw_params->tbuf_offset + TBUF_BP_MC);
167 }
168 
169 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
170 {
171 	if (GENET_IS_V1(priv))
172 		bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
173 	else
174 		bcmgenet_writel(val, priv->base +
175 				priv->hw_params->tbuf_offset + TBUF_BP_MC);
176 }
177 
178 /* RX/TX DMA register accessors */
179 enum dma_reg {
180 	DMA_RING_CFG = 0,
181 	DMA_CTRL,
182 	DMA_STATUS,
183 	DMA_SCB_BURST_SIZE,
184 	DMA_ARB_CTRL,
185 	DMA_PRIORITY_0,
186 	DMA_PRIORITY_1,
187 	DMA_PRIORITY_2,
188 	DMA_INDEX2RING_0,
189 	DMA_INDEX2RING_1,
190 	DMA_INDEX2RING_2,
191 	DMA_INDEX2RING_3,
192 	DMA_INDEX2RING_4,
193 	DMA_INDEX2RING_5,
194 	DMA_INDEX2RING_6,
195 	DMA_INDEX2RING_7,
196 	DMA_RING0_TIMEOUT,
197 	DMA_RING1_TIMEOUT,
198 	DMA_RING2_TIMEOUT,
199 	DMA_RING3_TIMEOUT,
200 	DMA_RING4_TIMEOUT,
201 	DMA_RING5_TIMEOUT,
202 	DMA_RING6_TIMEOUT,
203 	DMA_RING7_TIMEOUT,
204 	DMA_RING8_TIMEOUT,
205 	DMA_RING9_TIMEOUT,
206 	DMA_RING10_TIMEOUT,
207 	DMA_RING11_TIMEOUT,
208 	DMA_RING12_TIMEOUT,
209 	DMA_RING13_TIMEOUT,
210 	DMA_RING14_TIMEOUT,
211 	DMA_RING15_TIMEOUT,
212 	DMA_RING16_TIMEOUT,
213 };
214 
215 static const u8 bcmgenet_dma_regs_v3plus[] = {
216 	[DMA_RING_CFG]		= 0x00,
217 	[DMA_CTRL]		= 0x04,
218 	[DMA_STATUS]		= 0x08,
219 	[DMA_SCB_BURST_SIZE]	= 0x0C,
220 	[DMA_ARB_CTRL]		= 0x2C,
221 	[DMA_PRIORITY_0]	= 0x30,
222 	[DMA_PRIORITY_1]	= 0x34,
223 	[DMA_PRIORITY_2]	= 0x38,
224 	[DMA_RING0_TIMEOUT]	= 0x2C,
225 	[DMA_RING1_TIMEOUT]	= 0x30,
226 	[DMA_RING2_TIMEOUT]	= 0x34,
227 	[DMA_RING3_TIMEOUT]	= 0x38,
228 	[DMA_RING4_TIMEOUT]	= 0x3c,
229 	[DMA_RING5_TIMEOUT]	= 0x40,
230 	[DMA_RING6_TIMEOUT]	= 0x44,
231 	[DMA_RING7_TIMEOUT]	= 0x48,
232 	[DMA_RING8_TIMEOUT]	= 0x4c,
233 	[DMA_RING9_TIMEOUT]	= 0x50,
234 	[DMA_RING10_TIMEOUT]	= 0x54,
235 	[DMA_RING11_TIMEOUT]	= 0x58,
236 	[DMA_RING12_TIMEOUT]	= 0x5c,
237 	[DMA_RING13_TIMEOUT]	= 0x60,
238 	[DMA_RING14_TIMEOUT]	= 0x64,
239 	[DMA_RING15_TIMEOUT]	= 0x68,
240 	[DMA_RING16_TIMEOUT]	= 0x6C,
241 	[DMA_INDEX2RING_0]	= 0x70,
242 	[DMA_INDEX2RING_1]	= 0x74,
243 	[DMA_INDEX2RING_2]	= 0x78,
244 	[DMA_INDEX2RING_3]	= 0x7C,
245 	[DMA_INDEX2RING_4]	= 0x80,
246 	[DMA_INDEX2RING_5]	= 0x84,
247 	[DMA_INDEX2RING_6]	= 0x88,
248 	[DMA_INDEX2RING_7]	= 0x8C,
249 };
250 
251 static const u8 bcmgenet_dma_regs_v2[] = {
252 	[DMA_RING_CFG]		= 0x00,
253 	[DMA_CTRL]		= 0x04,
254 	[DMA_STATUS]		= 0x08,
255 	[DMA_SCB_BURST_SIZE]	= 0x0C,
256 	[DMA_ARB_CTRL]		= 0x30,
257 	[DMA_PRIORITY_0]	= 0x34,
258 	[DMA_PRIORITY_1]	= 0x38,
259 	[DMA_PRIORITY_2]	= 0x3C,
260 	[DMA_RING0_TIMEOUT]	= 0x2C,
261 	[DMA_RING1_TIMEOUT]	= 0x30,
262 	[DMA_RING2_TIMEOUT]	= 0x34,
263 	[DMA_RING3_TIMEOUT]	= 0x38,
264 	[DMA_RING4_TIMEOUT]	= 0x3c,
265 	[DMA_RING5_TIMEOUT]	= 0x40,
266 	[DMA_RING6_TIMEOUT]	= 0x44,
267 	[DMA_RING7_TIMEOUT]	= 0x48,
268 	[DMA_RING8_TIMEOUT]	= 0x4c,
269 	[DMA_RING9_TIMEOUT]	= 0x50,
270 	[DMA_RING10_TIMEOUT]	= 0x54,
271 	[DMA_RING11_TIMEOUT]	= 0x58,
272 	[DMA_RING12_TIMEOUT]	= 0x5c,
273 	[DMA_RING13_TIMEOUT]	= 0x60,
274 	[DMA_RING14_TIMEOUT]	= 0x64,
275 	[DMA_RING15_TIMEOUT]	= 0x68,
276 	[DMA_RING16_TIMEOUT]	= 0x6C,
277 };
278 
279 static const u8 bcmgenet_dma_regs_v1[] = {
280 	[DMA_CTRL]		= 0x00,
281 	[DMA_STATUS]		= 0x04,
282 	[DMA_SCB_BURST_SIZE]	= 0x0C,
283 	[DMA_ARB_CTRL]		= 0x30,
284 	[DMA_PRIORITY_0]	= 0x34,
285 	[DMA_PRIORITY_1]	= 0x38,
286 	[DMA_PRIORITY_2]	= 0x3C,
287 	[DMA_RING0_TIMEOUT]	= 0x2C,
288 	[DMA_RING1_TIMEOUT]	= 0x30,
289 	[DMA_RING2_TIMEOUT]	= 0x34,
290 	[DMA_RING3_TIMEOUT]	= 0x38,
291 	[DMA_RING4_TIMEOUT]	= 0x3c,
292 	[DMA_RING5_TIMEOUT]	= 0x40,
293 	[DMA_RING6_TIMEOUT]	= 0x44,
294 	[DMA_RING7_TIMEOUT]	= 0x48,
295 	[DMA_RING8_TIMEOUT]	= 0x4c,
296 	[DMA_RING9_TIMEOUT]	= 0x50,
297 	[DMA_RING10_TIMEOUT]	= 0x54,
298 	[DMA_RING11_TIMEOUT]	= 0x58,
299 	[DMA_RING12_TIMEOUT]	= 0x5c,
300 	[DMA_RING13_TIMEOUT]	= 0x60,
301 	[DMA_RING14_TIMEOUT]	= 0x64,
302 	[DMA_RING15_TIMEOUT]	= 0x68,
303 	[DMA_RING16_TIMEOUT]	= 0x6C,
304 };
305 
306 /* Set at runtime once bcmgenet version is known */
307 static const u8 *bcmgenet_dma_regs;
308 
309 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
310 {
311 	return netdev_priv(dev_get_drvdata(dev));
312 }
313 
314 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
315 				      enum dma_reg r)
316 {
317 	return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
318 			      DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
319 }
320 
321 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
322 					u32 val, enum dma_reg r)
323 {
324 	bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
325 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
326 }
327 
328 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
329 				      enum dma_reg r)
330 {
331 	return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
332 			      DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
333 }
334 
335 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
336 					u32 val, enum dma_reg r)
337 {
338 	bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
339 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
340 }
341 
342 /* RDMA/TDMA ring registers and accessors
343  * we merge the common fields and just prefix with T/D the registers
344  * having different meaning depending on the direction
345  */
346 enum dma_ring_reg {
347 	TDMA_READ_PTR = 0,
348 	RDMA_WRITE_PTR = TDMA_READ_PTR,
349 	TDMA_READ_PTR_HI,
350 	RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
351 	TDMA_CONS_INDEX,
352 	RDMA_PROD_INDEX = TDMA_CONS_INDEX,
353 	TDMA_PROD_INDEX,
354 	RDMA_CONS_INDEX = TDMA_PROD_INDEX,
355 	DMA_RING_BUF_SIZE,
356 	DMA_START_ADDR,
357 	DMA_START_ADDR_HI,
358 	DMA_END_ADDR,
359 	DMA_END_ADDR_HI,
360 	DMA_MBUF_DONE_THRESH,
361 	TDMA_FLOW_PERIOD,
362 	RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
363 	TDMA_WRITE_PTR,
364 	RDMA_READ_PTR = TDMA_WRITE_PTR,
365 	TDMA_WRITE_PTR_HI,
366 	RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
367 };
368 
369 /* GENET v4 supports 40-bits pointer addressing
370  * for obvious reasons the LO and HI word parts
371  * are contiguous, but this offsets the other
372  * registers.
373  */
374 static const u8 genet_dma_ring_regs_v4[] = {
375 	[TDMA_READ_PTR]			= 0x00,
376 	[TDMA_READ_PTR_HI]		= 0x04,
377 	[TDMA_CONS_INDEX]		= 0x08,
378 	[TDMA_PROD_INDEX]		= 0x0C,
379 	[DMA_RING_BUF_SIZE]		= 0x10,
380 	[DMA_START_ADDR]		= 0x14,
381 	[DMA_START_ADDR_HI]		= 0x18,
382 	[DMA_END_ADDR]			= 0x1C,
383 	[DMA_END_ADDR_HI]		= 0x20,
384 	[DMA_MBUF_DONE_THRESH]		= 0x24,
385 	[TDMA_FLOW_PERIOD]		= 0x28,
386 	[TDMA_WRITE_PTR]		= 0x2C,
387 	[TDMA_WRITE_PTR_HI]		= 0x30,
388 };
389 
390 static const u8 genet_dma_ring_regs_v123[] = {
391 	[TDMA_READ_PTR]			= 0x00,
392 	[TDMA_CONS_INDEX]		= 0x04,
393 	[TDMA_PROD_INDEX]		= 0x08,
394 	[DMA_RING_BUF_SIZE]		= 0x0C,
395 	[DMA_START_ADDR]		= 0x10,
396 	[DMA_END_ADDR]			= 0x14,
397 	[DMA_MBUF_DONE_THRESH]		= 0x18,
398 	[TDMA_FLOW_PERIOD]		= 0x1C,
399 	[TDMA_WRITE_PTR]		= 0x20,
400 };
401 
402 /* Set at runtime once GENET version is known */
403 static const u8 *genet_dma_ring_regs;
404 
405 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
406 					   unsigned int ring,
407 					   enum dma_ring_reg r)
408 {
409 	return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
410 			      (DMA_RING_SIZE * ring) +
411 			      genet_dma_ring_regs[r]);
412 }
413 
414 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
415 					     unsigned int ring, u32 val,
416 					     enum dma_ring_reg r)
417 {
418 	bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
419 			(DMA_RING_SIZE * ring) +
420 			genet_dma_ring_regs[r]);
421 }
422 
423 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
424 					   unsigned int ring,
425 					   enum dma_ring_reg r)
426 {
427 	return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
428 			      (DMA_RING_SIZE * ring) +
429 			      genet_dma_ring_regs[r]);
430 }
431 
432 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
433 					     unsigned int ring, u32 val,
434 					     enum dma_ring_reg r)
435 {
436 	bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
437 			(DMA_RING_SIZE * ring) +
438 			genet_dma_ring_regs[r]);
439 }
440 
441 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
442 {
443 	u32 offset;
444 	u32 reg;
445 
446 	if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
447 		reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
448 		reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) |
449 			RBUF_HFB_EN;
450 		bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
451 	} else {
452 		offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
453 		reg = bcmgenet_hfb_reg_readl(priv, offset);
454 		reg |= (1 << (f_index % 32));
455 		bcmgenet_hfb_reg_writel(priv, reg, offset);
456 		reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
457 		reg |= RBUF_HFB_EN;
458 		bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
459 	}
460 }
461 
462 static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
463 {
464 	u32 offset, reg, reg1;
465 
466 	if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
467 		reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
468 		reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT));
469 		if (!(reg & RBUF_HFB_FILTER_EN_MASK))
470 			reg &= ~RBUF_HFB_EN;
471 		bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
472 	} else {
473 		offset = HFB_FLT_ENABLE_V3PLUS;
474 		reg = bcmgenet_hfb_reg_readl(priv, offset);
475 		reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
476 		if  (f_index < 32) {
477 			reg1 &= ~(1 << (f_index % 32));
478 			bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
479 		} else {
480 			reg &= ~(1 << (f_index % 32));
481 			bcmgenet_hfb_reg_writel(priv, reg, offset);
482 		}
483 		if (!reg && !reg1) {
484 			reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
485 			reg &= ~RBUF_HFB_EN;
486 			bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
487 		}
488 	}
489 }
490 
491 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
492 						     u32 f_index, u32 rx_queue)
493 {
494 	u32 offset;
495 	u32 reg;
496 
497 	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
498 		return;
499 
500 	offset = f_index / 8;
501 	reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
502 	reg &= ~(0xF << (4 * (f_index % 8)));
503 	reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
504 	bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
505 }
506 
507 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
508 					   u32 f_index, u32 f_length)
509 {
510 	u32 offset;
511 	u32 reg;
512 
513 	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
514 		offset = HFB_FLT_LEN_V2;
515 	else
516 		offset = HFB_FLT_LEN_V3PLUS;
517 
518 	offset += sizeof(u32) *
519 		  ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4);
520 	reg = bcmgenet_hfb_reg_readl(priv, offset);
521 	reg &= ~(0xFF << (8 * (f_index % 4)));
522 	reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
523 	bcmgenet_hfb_reg_writel(priv, reg, offset);
524 }
525 
526 static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
527 {
528 	while (size) {
529 		switch (*(unsigned char *)mask++) {
530 		case 0x00:
531 		case 0x0f:
532 		case 0xf0:
533 		case 0xff:
534 			size--;
535 			continue;
536 		default:
537 			return -EINVAL;
538 		}
539 	}
540 
541 	return 0;
542 }
543 
544 #define VALIDATE_MASK(x) \
545 	bcmgenet_hfb_validate_mask(&(x), sizeof(x))
546 
547 static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
548 				    u32 offset, void *val, void *mask,
549 				    size_t size)
550 {
551 	u32 index, tmp;
552 
553 	index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
554 	tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
555 
556 	while (size--) {
557 		if (offset++ & 1) {
558 			tmp &= ~0x300FF;
559 			tmp |= (*(unsigned char *)val++);
560 			switch ((*(unsigned char *)mask++)) {
561 			case 0xFF:
562 				tmp |= 0x30000;
563 				break;
564 			case 0xF0:
565 				tmp |= 0x20000;
566 				break;
567 			case 0x0F:
568 				tmp |= 0x10000;
569 				break;
570 			}
571 			bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
572 			if (size)
573 				tmp = bcmgenet_hfb_readl(priv,
574 							 index * sizeof(u32));
575 		} else {
576 			tmp &= ~0xCFF00;
577 			tmp |= (*(unsigned char *)val++) << 8;
578 			switch ((*(unsigned char *)mask++)) {
579 			case 0xFF:
580 				tmp |= 0xC0000;
581 				break;
582 			case 0xF0:
583 				tmp |= 0x80000;
584 				break;
585 			case 0x0F:
586 				tmp |= 0x40000;
587 				break;
588 			}
589 			if (!size)
590 				bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
591 		}
592 	}
593 
594 	return 0;
595 }
596 
597 static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
598 					     struct bcmgenet_rxnfc_rule *rule)
599 {
600 	struct ethtool_rx_flow_spec *fs = &rule->fs;
601 	u32 offset = 0, f_length = 0, f, q;
602 	u8 val_8, mask_8;
603 	__be16 val_16;
604 	u16 mask_16;
605 	size_t size;
606 
607 	f = fs->location + 1;
608 	if (fs->flow_type & FLOW_MAC_EXT) {
609 		bcmgenet_hfb_insert_data(priv, f, 0,
610 					 &fs->h_ext.h_dest, &fs->m_ext.h_dest,
611 					 sizeof(fs->h_ext.h_dest));
612 	}
613 
614 	if (fs->flow_type & FLOW_EXT) {
615 		if (fs->m_ext.vlan_etype ||
616 		    fs->m_ext.vlan_tci) {
617 			bcmgenet_hfb_insert_data(priv, f, 12,
618 						 &fs->h_ext.vlan_etype,
619 						 &fs->m_ext.vlan_etype,
620 						 sizeof(fs->h_ext.vlan_etype));
621 			bcmgenet_hfb_insert_data(priv, f, 14,
622 						 &fs->h_ext.vlan_tci,
623 						 &fs->m_ext.vlan_tci,
624 						 sizeof(fs->h_ext.vlan_tci));
625 			offset += VLAN_HLEN;
626 			f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
627 		}
628 	}
629 
630 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
631 	case ETHER_FLOW:
632 		f_length += DIV_ROUND_UP(ETH_HLEN, 2);
633 		bcmgenet_hfb_insert_data(priv, f, 0,
634 					 &fs->h_u.ether_spec.h_dest,
635 					 &fs->m_u.ether_spec.h_dest,
636 					 sizeof(fs->h_u.ether_spec.h_dest));
637 		bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
638 					 &fs->h_u.ether_spec.h_source,
639 					 &fs->m_u.ether_spec.h_source,
640 					 sizeof(fs->h_u.ether_spec.h_source));
641 		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
642 					 &fs->h_u.ether_spec.h_proto,
643 					 &fs->m_u.ether_spec.h_proto,
644 					 sizeof(fs->h_u.ether_spec.h_proto));
645 		break;
646 	case IP_USER_FLOW:
647 		f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
648 		/* Specify IP Ether Type */
649 		val_16 = htons(ETH_P_IP);
650 		mask_16 = 0xFFFF;
651 		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
652 					 &val_16, &mask_16, sizeof(val_16));
653 		bcmgenet_hfb_insert_data(priv, f, 15 + offset,
654 					 &fs->h_u.usr_ip4_spec.tos,
655 					 &fs->m_u.usr_ip4_spec.tos,
656 					 sizeof(fs->h_u.usr_ip4_spec.tos));
657 		bcmgenet_hfb_insert_data(priv, f, 23 + offset,
658 					 &fs->h_u.usr_ip4_spec.proto,
659 					 &fs->m_u.usr_ip4_spec.proto,
660 					 sizeof(fs->h_u.usr_ip4_spec.proto));
661 		bcmgenet_hfb_insert_data(priv, f, 26 + offset,
662 					 &fs->h_u.usr_ip4_spec.ip4src,
663 					 &fs->m_u.usr_ip4_spec.ip4src,
664 					 sizeof(fs->h_u.usr_ip4_spec.ip4src));
665 		bcmgenet_hfb_insert_data(priv, f, 30 + offset,
666 					 &fs->h_u.usr_ip4_spec.ip4dst,
667 					 &fs->m_u.usr_ip4_spec.ip4dst,
668 					 sizeof(fs->h_u.usr_ip4_spec.ip4dst));
669 		if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
670 			break;
671 
672 		/* Only supports 20 byte IPv4 header */
673 		val_8 = 0x45;
674 		mask_8 = 0xFF;
675 		bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
676 					 &val_8, &mask_8,
677 					 sizeof(val_8));
678 		size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
679 		bcmgenet_hfb_insert_data(priv, f,
680 					 ETH_HLEN + 20 + offset,
681 					 &fs->h_u.usr_ip4_spec.l4_4_bytes,
682 					 &fs->m_u.usr_ip4_spec.l4_4_bytes,
683 					 size);
684 		f_length += DIV_ROUND_UP(size, 2);
685 		break;
686 	}
687 
688 	bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
689 	if (fs->ring_cookie == RX_CLS_FLOW_WAKE)
690 		q = 0;
691 	else if (fs->ring_cookie == RX_CLS_FLOW_DISC)
692 		q = priv->hw_params->rx_queues + 1;
693 	else
694 		/* Other Rx rings are direct mapped here */
695 		q = fs->ring_cookie;
696 	bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
697 	bcmgenet_hfb_enable_filter(priv, f);
698 	rule->state = BCMGENET_RXNFC_STATE_ENABLED;
699 }
700 
701 /* bcmgenet_hfb_clear
702  *
703  * Clear Hardware Filter Block and disable all filtering.
704  */
705 static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
706 {
707 	u32 base, i;
708 
709 	bcmgenet_hfb_set_filter_length(priv, f_index, 0);
710 	base = f_index * priv->hw_params->hfb_filter_size;
711 	for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
712 		bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
713 }
714 
715 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
716 {
717 	u32 i;
718 
719 	bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
720 
721 	if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) {
722 		bcmgenet_hfb_reg_writel(priv, 0,
723 					HFB_FLT_ENABLE_V3PLUS);
724 		bcmgenet_hfb_reg_writel(priv, 0,
725 					HFB_FLT_ENABLE_V3PLUS + 4);
726 		for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
727 			bcmgenet_rdma_writel(priv, 0, i);
728 	}
729 
730 	for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
731 		bcmgenet_hfb_clear_filter(priv, i);
732 
733 	/* Enable filter 0 to send default flow to ring 0 */
734 	bcmgenet_hfb_set_filter_length(priv, 0, 4);
735 	bcmgenet_hfb_enable_filter(priv, 0);
736 }
737 
738 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
739 {
740 	int i;
741 
742 	INIT_LIST_HEAD(&priv->rxnfc_list);
743 	for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
744 		INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
745 		priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
746 	}
747 
748 	bcmgenet_hfb_clear(priv);
749 }
750 
751 static int bcmgenet_begin(struct net_device *dev)
752 {
753 	struct bcmgenet_priv *priv = netdev_priv(dev);
754 
755 	/* Turn on the clock */
756 	return clk_prepare_enable(priv->clk);
757 }
758 
759 static void bcmgenet_complete(struct net_device *dev)
760 {
761 	struct bcmgenet_priv *priv = netdev_priv(dev);
762 
763 	/* Turn off the clock */
764 	clk_disable_unprepare(priv->clk);
765 }
766 
767 static int bcmgenet_get_link_ksettings(struct net_device *dev,
768 				       struct ethtool_link_ksettings *cmd)
769 {
770 	if (!netif_running(dev))
771 		return -EINVAL;
772 
773 	if (!dev->phydev)
774 		return -ENODEV;
775 
776 	phy_ethtool_ksettings_get(dev->phydev, cmd);
777 
778 	return 0;
779 }
780 
781 static int bcmgenet_set_link_ksettings(struct net_device *dev,
782 				       const struct ethtool_link_ksettings *cmd)
783 {
784 	if (!netif_running(dev))
785 		return -EINVAL;
786 
787 	if (!dev->phydev)
788 		return -ENODEV;
789 
790 	return phy_ethtool_ksettings_set(dev->phydev, cmd);
791 }
792 
793 static int bcmgenet_set_features(struct net_device *dev,
794 				 netdev_features_t features)
795 {
796 	struct bcmgenet_priv *priv = netdev_priv(dev);
797 	u32 reg;
798 	int ret;
799 
800 	ret = clk_prepare_enable(priv->clk);
801 	if (ret)
802 		return ret;
803 
804 	/* Make sure we reflect the value of CRC_CMD_FWD */
805 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
806 	priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
807 
808 	clk_disable_unprepare(priv->clk);
809 
810 	return ret;
811 }
812 
813 static u32 bcmgenet_get_msglevel(struct net_device *dev)
814 {
815 	struct bcmgenet_priv *priv = netdev_priv(dev);
816 
817 	return priv->msg_enable;
818 }
819 
820 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
821 {
822 	struct bcmgenet_priv *priv = netdev_priv(dev);
823 
824 	priv->msg_enable = level;
825 }
826 
827 static int bcmgenet_get_coalesce(struct net_device *dev,
828 				 struct ethtool_coalesce *ec,
829 				 struct kernel_ethtool_coalesce *kernel_coal,
830 				 struct netlink_ext_ack *extack)
831 {
832 	struct bcmgenet_priv *priv = netdev_priv(dev);
833 	struct bcmgenet_rx_ring *ring;
834 	unsigned int i;
835 
836 	ec->tx_max_coalesced_frames =
837 		bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
838 	ec->rx_max_coalesced_frames =
839 		bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
840 	ec->rx_coalesce_usecs =
841 		bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000;
842 
843 	for (i = 0; i <= priv->hw_params->rx_queues; i++) {
844 		ring = &priv->rx_rings[i];
845 		ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
846 	}
847 
848 	return 0;
849 }
850 
851 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
852 				     u32 usecs, u32 pkts)
853 {
854 	struct bcmgenet_priv *priv = ring->priv;
855 	unsigned int i = ring->index;
856 	u32 reg;
857 
858 	bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
859 
860 	reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
861 	reg &= ~DMA_TIMEOUT_MASK;
862 	reg |= DIV_ROUND_UP(usecs * 1000, 8192);
863 	bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
864 }
865 
866 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
867 					  struct ethtool_coalesce *ec)
868 {
869 	struct dim_cq_moder moder;
870 	u32 usecs, pkts;
871 
872 	ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
873 	ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
874 	usecs = ring->rx_coalesce_usecs;
875 	pkts = ring->rx_max_coalesced_frames;
876 
877 	if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
878 		moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
879 		usecs = moder.usec;
880 		pkts = moder.pkts;
881 	}
882 
883 	ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
884 	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
885 }
886 
887 static int bcmgenet_set_coalesce(struct net_device *dev,
888 				 struct ethtool_coalesce *ec,
889 				 struct kernel_ethtool_coalesce *kernel_coal,
890 				 struct netlink_ext_ack *extack)
891 {
892 	struct bcmgenet_priv *priv = netdev_priv(dev);
893 	unsigned int i;
894 
895 	/* Base system clock is 125Mhz, DMA timeout is this reference clock
896 	 * divided by 1024, which yields roughly 8.192us, our maximum value
897 	 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
898 	 */
899 	if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
900 	    ec->tx_max_coalesced_frames == 0 ||
901 	    ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
902 	    ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
903 		return -EINVAL;
904 
905 	if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
906 		return -EINVAL;
907 
908 	/* GENET TDMA hardware does not support a configurable timeout, but will
909 	 * always generate an interrupt either after MBDONE packets have been
910 	 * transmitted, or when the ring is empty.
911 	 */
912 
913 	/* Program all TX queues with the same values, as there is no
914 	 * ethtool knob to do coalescing on a per-queue basis
915 	 */
916 	for (i = 0; i <= priv->hw_params->tx_queues; i++)
917 		bcmgenet_tdma_ring_writel(priv, i,
918 					  ec->tx_max_coalesced_frames,
919 					  DMA_MBUF_DONE_THRESH);
920 
921 	for (i = 0; i <= priv->hw_params->rx_queues; i++)
922 		bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
923 
924 	return 0;
925 }
926 
927 static void bcmgenet_get_pauseparam(struct net_device *dev,
928 				    struct ethtool_pauseparam *epause)
929 {
930 	struct bcmgenet_priv *priv;
931 	u32 umac_cmd;
932 
933 	priv = netdev_priv(dev);
934 
935 	epause->autoneg = priv->autoneg_pause;
936 
937 	if (netif_carrier_ok(dev)) {
938 		/* report active state when link is up */
939 		umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
940 		epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
941 		epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
942 	} else {
943 		/* otherwise report stored settings */
944 		epause->tx_pause = priv->tx_pause;
945 		epause->rx_pause = priv->rx_pause;
946 	}
947 }
948 
949 static int bcmgenet_set_pauseparam(struct net_device *dev,
950 				   struct ethtool_pauseparam *epause)
951 {
952 	struct bcmgenet_priv *priv = netdev_priv(dev);
953 
954 	if (!dev->phydev)
955 		return -ENODEV;
956 
957 	if (!phy_validate_pause(dev->phydev, epause))
958 		return -EINVAL;
959 
960 	priv->autoneg_pause = !!epause->autoneg;
961 	priv->tx_pause = !!epause->tx_pause;
962 	priv->rx_pause = !!epause->rx_pause;
963 
964 	bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
965 
966 	return 0;
967 }
968 
969 /* standard ethtool support functions. */
970 enum bcmgenet_stat_type {
971 	BCMGENET_STAT_RTNL = -1,
972 	BCMGENET_STAT_MIB_RX,
973 	BCMGENET_STAT_MIB_TX,
974 	BCMGENET_STAT_RUNT,
975 	BCMGENET_STAT_MISC,
976 	BCMGENET_STAT_SOFT,
977 	BCMGENET_STAT_SOFT64,
978 };
979 
980 struct bcmgenet_stats {
981 	char stat_string[ETH_GSTRING_LEN];
982 	int stat_sizeof;
983 	int stat_offset;
984 	enum bcmgenet_stat_type type;
985 	/* reg offset from UMAC base for misc counters */
986 	u16 reg_offset;
987 	/* sync for u64 stats counters */
988 	int syncp_offset;
989 };
990 
991 #define STAT_RTNL(m) { \
992 	.stat_string = __stringify(m), \
993 	.stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \
994 	.stat_offset = offsetof(struct rtnl_link_stats64, m), \
995 	.type = BCMGENET_STAT_RTNL, \
996 }
997 
998 #define STAT_GENET_MIB(str, m, _type) { \
999 	.stat_string = str, \
1000 	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1001 	.stat_offset = offsetof(struct bcmgenet_priv, m), \
1002 	.type = _type, \
1003 }
1004 
1005 #define STAT_GENET_SOFT_MIB64(str, s, m) { \
1006 	.stat_string = str, \
1007 	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \
1008 	.stat_offset = offsetof(struct bcmgenet_priv, s.m), \
1009 	.type = BCMGENET_STAT_SOFT64, \
1010 	.syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \
1011 }
1012 
1013 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
1014 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
1015 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
1016 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
1017 
1018 #define STAT_GENET_MISC(str, m, offset) { \
1019 	.stat_string = str, \
1020 	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1021 	.stat_offset = offsetof(struct bcmgenet_priv, m), \
1022 	.type = BCMGENET_STAT_MISC, \
1023 	.reg_offset = offset, \
1024 }
1025 
1026 #define STAT_GENET_Q(num) \
1027 	STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \
1028 			tx_rings[num].stats64, packets), \
1029 	STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \
1030 			tx_rings[num].stats64, bytes), \
1031 	STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \
1032 			tx_rings[num].stats64, errors), \
1033 	STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \
1034 			tx_rings[num].stats64, dropped), \
1035 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \
1036 			rx_rings[num].stats64, bytes),	 \
1037 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \
1038 			rx_rings[num].stats64, packets), \
1039 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \
1040 			rx_rings[num].stats64, errors), \
1041 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \
1042 			rx_rings[num].stats64, dropped), \
1043 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \
1044 			rx_rings[num].stats64, multicast), \
1045 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \
1046 			rx_rings[num].stats64, missed), \
1047 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \
1048 			rx_rings[num].stats64, length_errors), \
1049 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \
1050 			rx_rings[num].stats64, over_errors), \
1051 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \
1052 			rx_rings[num].stats64, crc_errors), \
1053 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \
1054 			rx_rings[num].stats64, frame_errors), \
1055 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \
1056 			rx_rings[num].stats64, fragmented_errors), \
1057 	STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \
1058 			rx_rings[num].stats64, broadcast)
1059 
1060 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
1061  * between the end of TX stats and the beginning of the RX RUNT
1062  */
1063 #define BCMGENET_STAT_OFFSET	0xc
1064 
1065 /* Hardware counters must be kept in sync because the order/offset
1066  * is important here (order in structure declaration = order in hardware)
1067  */
1068 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
1069 	/* general stats */
1070 	STAT_RTNL(rx_packets),
1071 	STAT_RTNL(tx_packets),
1072 	STAT_RTNL(rx_bytes),
1073 	STAT_RTNL(tx_bytes),
1074 	STAT_RTNL(rx_errors),
1075 	STAT_RTNL(tx_errors),
1076 	STAT_RTNL(rx_dropped),
1077 	STAT_RTNL(tx_dropped),
1078 	STAT_RTNL(multicast),
1079 	STAT_RTNL(rx_missed_errors),
1080 	STAT_RTNL(rx_length_errors),
1081 	STAT_RTNL(rx_over_errors),
1082 	STAT_RTNL(rx_crc_errors),
1083 	STAT_RTNL(rx_frame_errors),
1084 	/* UniMAC RSV counters */
1085 	STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
1086 	STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
1087 	STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
1088 	STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
1089 	STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
1090 	STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
1091 	STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
1092 	STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
1093 	STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
1094 	STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
1095 	STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
1096 	STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
1097 	STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
1098 	STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
1099 	STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
1100 	STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
1101 	STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
1102 	STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
1103 	STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
1104 	STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
1105 	STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
1106 	STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
1107 	STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
1108 	STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
1109 	STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
1110 	STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
1111 	STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
1112 	STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
1113 	STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
1114 	/* UniMAC TSV counters */
1115 	STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
1116 	STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
1117 	STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
1118 	STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
1119 	STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
1120 	STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
1121 	STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
1122 	STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
1123 	STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
1124 	STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
1125 	STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
1126 	STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
1127 	STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
1128 	STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
1129 	STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
1130 	STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
1131 	STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
1132 	STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
1133 	STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
1134 	STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
1135 	STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
1136 	STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
1137 	STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
1138 	STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
1139 	STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
1140 	STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
1141 	STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
1142 	STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
1143 	STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
1144 	/* UniMAC RUNT counters */
1145 	STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
1146 	STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
1147 	STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
1148 	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
1149 	/* Misc UniMAC counters */
1150 	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
1151 			UMAC_RBUF_OVFL_CNT_V1),
1152 	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
1153 			UMAC_RBUF_ERR_CNT_V1),
1154 	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
1155 	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
1156 	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
1157 	STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
1158 	STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
1159 	STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
1160 			    mib.tx_realloc_tsb_failed),
1161 	/* Per TX queues */
1162 	STAT_GENET_Q(0),
1163 	STAT_GENET_Q(1),
1164 	STAT_GENET_Q(2),
1165 	STAT_GENET_Q(3),
1166 	STAT_GENET_Q(4),
1167 };
1168 
1169 #define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
1170 
1171 #define BCMGENET_STATS64_ADD(stats, m, v) \
1172 	do { \
1173 		u64_stats_update_begin(&stats->syncp); \
1174 		u64_stats_add(&stats->m, v); \
1175 		u64_stats_update_end(&stats->syncp); \
1176 	} while (0)
1177 
1178 #define BCMGENET_STATS64_INC(stats, m) \
1179 	do { \
1180 		u64_stats_update_begin(&stats->syncp); \
1181 		u64_stats_inc(&stats->m); \
1182 		u64_stats_update_end(&stats->syncp); \
1183 	} while (0)
1184 
1185 static void bcmgenet_get_drvinfo(struct net_device *dev,
1186 				 struct ethtool_drvinfo *info)
1187 {
1188 	strscpy(info->driver, "bcmgenet", sizeof(info->driver));
1189 }
1190 
1191 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
1192 {
1193 	switch (string_set) {
1194 	case ETH_SS_STATS:
1195 		return BCMGENET_STATS_LEN;
1196 	default:
1197 		return -EOPNOTSUPP;
1198 	}
1199 }
1200 
1201 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
1202 				 u8 *data)
1203 {
1204 	const char *str;
1205 	int i;
1206 
1207 	switch (stringset) {
1208 	case ETH_SS_STATS:
1209 		for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1210 			str = bcmgenet_gstrings_stats[i].stat_string;
1211 			ethtool_puts(&data, str);
1212 		}
1213 		break;
1214 	}
1215 }
1216 
1217 static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
1218 {
1219 	u16 new_offset;
1220 	u32 val;
1221 
1222 	switch (offset) {
1223 	case UMAC_RBUF_OVFL_CNT_V1:
1224 		if (GENET_IS_V2(priv))
1225 			new_offset = RBUF_OVFL_CNT_V2;
1226 		else
1227 			new_offset = RBUF_OVFL_CNT_V3PLUS;
1228 
1229 		val = bcmgenet_rbuf_readl(priv,	new_offset);
1230 		/* clear if overflowed */
1231 		if (val == ~0)
1232 			bcmgenet_rbuf_writel(priv, 0, new_offset);
1233 		break;
1234 	case UMAC_RBUF_ERR_CNT_V1:
1235 		if (GENET_IS_V2(priv))
1236 			new_offset = RBUF_ERR_CNT_V2;
1237 		else
1238 			new_offset = RBUF_ERR_CNT_V3PLUS;
1239 
1240 		val = bcmgenet_rbuf_readl(priv,	new_offset);
1241 		/* clear if overflowed */
1242 		if (val == ~0)
1243 			bcmgenet_rbuf_writel(priv, 0, new_offset);
1244 		break;
1245 	default:
1246 		val = bcmgenet_umac_readl(priv, offset);
1247 		/* clear if overflowed */
1248 		if (val == ~0)
1249 			bcmgenet_umac_writel(priv, 0, offset);
1250 		break;
1251 	}
1252 
1253 	return val;
1254 }
1255 
1256 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
1257 {
1258 	int i, j = 0;
1259 
1260 	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1261 		const struct bcmgenet_stats *s;
1262 		u8 offset = 0;
1263 		u32 val = 0;
1264 		char *p;
1265 
1266 		s = &bcmgenet_gstrings_stats[i];
1267 		switch (s->type) {
1268 		case BCMGENET_STAT_RTNL:
1269 		case BCMGENET_STAT_SOFT:
1270 		case BCMGENET_STAT_SOFT64:
1271 			continue;
1272 		case BCMGENET_STAT_RUNT:
1273 			offset += BCMGENET_STAT_OFFSET;
1274 			fallthrough;
1275 		case BCMGENET_STAT_MIB_TX:
1276 			offset += BCMGENET_STAT_OFFSET;
1277 			fallthrough;
1278 		case BCMGENET_STAT_MIB_RX:
1279 			val = bcmgenet_umac_readl(priv,
1280 						  UMAC_MIB_START + j + offset);
1281 			offset = 0;	/* Reset Offset */
1282 			break;
1283 		case BCMGENET_STAT_MISC:
1284 			if (GENET_IS_V1(priv)) {
1285 				val = bcmgenet_umac_readl(priv, s->reg_offset);
1286 				/* clear if overflowed */
1287 				if (val == ~0)
1288 					bcmgenet_umac_writel(priv, 0,
1289 							     s->reg_offset);
1290 			} else {
1291 				val = bcmgenet_update_stat_misc(priv,
1292 								s->reg_offset);
1293 			}
1294 			break;
1295 		}
1296 
1297 		j += s->stat_sizeof;
1298 		p = (char *)priv + s->stat_offset;
1299 		*(u32 *)p = val;
1300 	}
1301 }
1302 
1303 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
1304 				       struct ethtool_stats *stats,
1305 				       u64 *data)
1306 {
1307 	struct bcmgenet_priv *priv = netdev_priv(dev);
1308 	struct rtnl_link_stats64 stats64;
1309 	struct u64_stats_sync *syncp;
1310 	unsigned int start;
1311 	int i;
1312 
1313 	if (netif_running(dev))
1314 		bcmgenet_update_mib_counters(priv);
1315 
1316 	dev_get_stats(dev, &stats64);
1317 
1318 	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1319 		const struct bcmgenet_stats *s;
1320 		char *p;
1321 
1322 		s = &bcmgenet_gstrings_stats[i];
1323 		p = (char *)priv;
1324 
1325 		if (s->type == BCMGENET_STAT_SOFT64) {
1326 			syncp = (struct u64_stats_sync *)(p + s->syncp_offset);
1327 			do {
1328 				start = u64_stats_fetch_begin(syncp);
1329 				data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset));
1330 			} while (u64_stats_fetch_retry(syncp, start));
1331 		} else {
1332 			if (s->type == BCMGENET_STAT_RTNL)
1333 				p = (char *)&stats64;
1334 
1335 			p += s->stat_offset;
1336 			if (sizeof(unsigned long) != sizeof(u32) &&
1337 				s->stat_sizeof == sizeof(unsigned long))
1338 				data[i] = *(unsigned long *)p;
1339 			else
1340 				data[i] = *(u32 *)p;
1341 		}
1342 	}
1343 }
1344 
1345 void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
1346 {
1347 	struct bcmgenet_priv *priv = netdev_priv(dev);
1348 	u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
1349 	u32 reg;
1350 
1351 	if (enable && !priv->clk_eee_enabled) {
1352 		clk_prepare_enable(priv->clk_eee);
1353 		priv->clk_eee_enabled = true;
1354 	}
1355 
1356 	reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
1357 	if (enable)
1358 		reg |= EEE_EN;
1359 	else
1360 		reg &= ~EEE_EN;
1361 	bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
1362 
1363 	/* Enable EEE and switch to a 27Mhz clock automatically */
1364 	reg = bcmgenet_readl(priv->base + off);
1365 	if (enable)
1366 		reg |= TBUF_EEE_EN | TBUF_PM_EN;
1367 	else
1368 		reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
1369 	bcmgenet_writel(reg, priv->base + off);
1370 
1371 	/* Do the same for thing for RBUF */
1372 	reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
1373 	if (enable)
1374 		reg |= RBUF_EEE_EN | RBUF_PM_EN;
1375 	else
1376 		reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
1377 	bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
1378 
1379 	if (!enable && priv->clk_eee_enabled) {
1380 		clk_disable_unprepare(priv->clk_eee);
1381 		priv->clk_eee_enabled = false;
1382 	}
1383 
1384 }
1385 
1386 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
1387 {
1388 	struct bcmgenet_priv *priv = netdev_priv(dev);
1389 	int ret;
1390 
1391 	if (GENET_IS_V1(priv))
1392 		return -EOPNOTSUPP;
1393 
1394 	if (!dev->phydev)
1395 		return -ENODEV;
1396 
1397 	ret = phy_ethtool_get_eee(dev->phydev, e);
1398 	if (ret)
1399 		return ret;
1400 
1401 	/* tx_lpi_timer is maintained by the MAC hardware register; the
1402 	 * PHY-level eee_cfg timer is not set for GENET.
1403 	 */
1404 	e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1405 
1406 	return 0;
1407 }
1408 
1409 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
1410 {
1411 	struct bcmgenet_priv *priv = netdev_priv(dev);
1412 
1413 	if (GENET_IS_V1(priv))
1414 		return -EOPNOTSUPP;
1415 
1416 	if (!dev->phydev)
1417 		return -ENODEV;
1418 
1419 	bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1420 
1421 	return phy_ethtool_set_eee(dev->phydev, e);
1422 }
1423 
1424 static int bcmgenet_validate_flow(struct net_device *dev,
1425 				  struct ethtool_rxnfc *cmd)
1426 {
1427 	struct ethtool_usrip4_spec *l4_mask;
1428 	struct ethhdr *eth_mask;
1429 
1430 	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES &&
1431 	    cmd->fs.location != RX_CLS_LOC_ANY) {
1432 		netdev_err(dev, "rxnfc: Invalid location (%d)\n",
1433 			   cmd->fs.location);
1434 		return -EINVAL;
1435 	}
1436 
1437 	switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1438 	case IP_USER_FLOW:
1439 		l4_mask = &cmd->fs.m_u.usr_ip4_spec;
1440 		/* don't allow mask which isn't valid */
1441 		if (VALIDATE_MASK(l4_mask->ip4src) ||
1442 		    VALIDATE_MASK(l4_mask->ip4dst) ||
1443 		    VALIDATE_MASK(l4_mask->l4_4_bytes) ||
1444 		    VALIDATE_MASK(l4_mask->proto) ||
1445 		    VALIDATE_MASK(l4_mask->ip_ver) ||
1446 		    VALIDATE_MASK(l4_mask->tos)) {
1447 			netdev_err(dev, "rxnfc: Unsupported mask\n");
1448 			return -EINVAL;
1449 		}
1450 		break;
1451 	case ETHER_FLOW:
1452 		eth_mask = &cmd->fs.m_u.ether_spec;
1453 		/* don't allow mask which isn't valid */
1454 		if (VALIDATE_MASK(eth_mask->h_dest) ||
1455 		    VALIDATE_MASK(eth_mask->h_source) ||
1456 		    VALIDATE_MASK(eth_mask->h_proto)) {
1457 			netdev_err(dev, "rxnfc: Unsupported mask\n");
1458 			return -EINVAL;
1459 		}
1460 		break;
1461 	default:
1462 		netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
1463 			   cmd->fs.flow_type);
1464 		return -EINVAL;
1465 	}
1466 
1467 	if ((cmd->fs.flow_type & FLOW_EXT)) {
1468 		/* don't allow mask which isn't valid */
1469 		if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
1470 		    VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
1471 			netdev_err(dev, "rxnfc: Unsupported mask\n");
1472 			return -EINVAL;
1473 		}
1474 		if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
1475 			netdev_err(dev, "rxnfc: user-def not supported\n");
1476 			return -EINVAL;
1477 		}
1478 	}
1479 
1480 	if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
1481 		/* don't allow mask which isn't valid */
1482 		if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
1483 			netdev_err(dev, "rxnfc: Unsupported mask\n");
1484 			return -EINVAL;
1485 		}
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 static int bcmgenet_insert_flow(struct net_device *dev,
1492 				struct ethtool_rxnfc *cmd)
1493 {
1494 	struct bcmgenet_priv *priv = netdev_priv(dev);
1495 	struct bcmgenet_rxnfc_rule *loc_rule;
1496 	int err, i;
1497 
1498 	if (priv->hw_params->hfb_filter_size < 128) {
1499 		netdev_err(dev, "rxnfc: Not supported by this device\n");
1500 		return -EINVAL;
1501 	}
1502 
1503 	if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
1504 	    cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE &&
1505 	    cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) {
1506 		netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
1507 			   cmd->fs.ring_cookie);
1508 		return -EINVAL;
1509 	}
1510 
1511 	err = bcmgenet_validate_flow(dev, cmd);
1512 	if (err)
1513 		return err;
1514 
1515 	if (cmd->fs.location == RX_CLS_LOC_ANY) {
1516 		list_for_each_entry(loc_rule, &priv->rxnfc_list, list) {
1517 			cmd->fs.location = loc_rule->fs.location;
1518 			err = memcmp(&loc_rule->fs, &cmd->fs,
1519 				     sizeof(struct ethtool_rx_flow_spec));
1520 			if (!err)
1521 				/* rule exists so return current location */
1522 				return 0;
1523 		}
1524 		for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
1525 			loc_rule = &priv->rxnfc_rules[i];
1526 			if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1527 				cmd->fs.location = i;
1528 				break;
1529 			}
1530 		}
1531 		if (i == MAX_NUM_OF_FS_RULES) {
1532 			cmd->fs.location = RX_CLS_LOC_ANY;
1533 			return -ENOSPC;
1534 		}
1535 	} else {
1536 		loc_rule = &priv->rxnfc_rules[cmd->fs.location];
1537 	}
1538 	if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1539 		bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
1540 	if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1541 		list_del(&loc_rule->list);
1542 		bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
1543 	}
1544 	loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1545 	memcpy(&loc_rule->fs, &cmd->fs,
1546 	       sizeof(struct ethtool_rx_flow_spec));
1547 
1548 	bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
1549 
1550 	list_add_tail(&loc_rule->list, &priv->rxnfc_list);
1551 
1552 	return 0;
1553 }
1554 
1555 static int bcmgenet_delete_flow(struct net_device *dev,
1556 				struct ethtool_rxnfc *cmd)
1557 {
1558 	struct bcmgenet_priv *priv = netdev_priv(dev);
1559 	struct bcmgenet_rxnfc_rule *rule;
1560 	int err = 0;
1561 
1562 	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1563 		return -EINVAL;
1564 
1565 	rule = &priv->rxnfc_rules[cmd->fs.location];
1566 	if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1567 		err =  -ENOENT;
1568 		goto out;
1569 	}
1570 
1571 	if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1572 		bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
1573 	if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1574 		list_del(&rule->list);
1575 		bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
1576 	}
1577 	rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1578 	memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
1579 
1580 out:
1581 	return err;
1582 }
1583 
1584 static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1585 {
1586 	struct bcmgenet_priv *priv = netdev_priv(dev);
1587 	int err = 0;
1588 
1589 	switch (cmd->cmd) {
1590 	case ETHTOOL_SRXCLSRLINS:
1591 		err = bcmgenet_insert_flow(dev, cmd);
1592 		break;
1593 	case ETHTOOL_SRXCLSRLDEL:
1594 		err = bcmgenet_delete_flow(dev, cmd);
1595 		break;
1596 	default:
1597 		netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
1598 			    cmd->cmd);
1599 		return -EINVAL;
1600 	}
1601 
1602 	return err;
1603 }
1604 
1605 static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
1606 			     int loc)
1607 {
1608 	struct bcmgenet_priv *priv = netdev_priv(dev);
1609 	struct bcmgenet_rxnfc_rule *rule;
1610 	int err = 0;
1611 
1612 	if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1613 		return -EINVAL;
1614 
1615 	rule = &priv->rxnfc_rules[loc];
1616 	if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
1617 		err = -ENOENT;
1618 	else
1619 		memcpy(&cmd->fs, &rule->fs,
1620 		       sizeof(struct ethtool_rx_flow_spec));
1621 
1622 	return err;
1623 }
1624 
1625 static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
1626 {
1627 	struct list_head *pos;
1628 	int res = 0;
1629 
1630 	list_for_each(pos, &priv->rxnfc_list)
1631 		res++;
1632 
1633 	return res;
1634 }
1635 
1636 static u32 bcmgenet_get_rx_ring_count(struct net_device *dev)
1637 {
1638 	struct bcmgenet_priv *priv = netdev_priv(dev);
1639 
1640 	return priv->hw_params->rx_queues ?: 1;
1641 }
1642 
1643 static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1644 			      u32 *rule_locs)
1645 {
1646 	struct bcmgenet_priv *priv = netdev_priv(dev);
1647 	struct bcmgenet_rxnfc_rule *rule;
1648 	int err = 0;
1649 	int i = 0;
1650 
1651 	switch (cmd->cmd) {
1652 	case ETHTOOL_GRXCLSRLCNT:
1653 		cmd->rule_cnt = bcmgenet_get_num_flows(priv);
1654 		cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
1655 		break;
1656 	case ETHTOOL_GRXCLSRULE:
1657 		err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
1658 		break;
1659 	case ETHTOOL_GRXCLSRLALL:
1660 		list_for_each_entry(rule, &priv->rxnfc_list, list)
1661 			if (i < cmd->rule_cnt)
1662 				rule_locs[i++] = rule->fs.location;
1663 		cmd->rule_cnt = i;
1664 		cmd->data = MAX_NUM_OF_FS_RULES;
1665 		break;
1666 	default:
1667 		err = -EOPNOTSUPP;
1668 		break;
1669 	}
1670 
1671 	return err;
1672 }
1673 
1674 /* standard ethtool support functions. */
1675 static const struct ethtool_ops bcmgenet_ethtool_ops = {
1676 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1677 				     ETHTOOL_COALESCE_MAX_FRAMES |
1678 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1679 	.begin			= bcmgenet_begin,
1680 	.complete		= bcmgenet_complete,
1681 	.get_strings		= bcmgenet_get_strings,
1682 	.get_sset_count		= bcmgenet_get_sset_count,
1683 	.get_ethtool_stats	= bcmgenet_get_ethtool_stats,
1684 	.get_drvinfo		= bcmgenet_get_drvinfo,
1685 	.get_link		= ethtool_op_get_link,
1686 	.get_msglevel		= bcmgenet_get_msglevel,
1687 	.set_msglevel		= bcmgenet_set_msglevel,
1688 	.get_wol		= bcmgenet_get_wol,
1689 	.set_wol		= bcmgenet_set_wol,
1690 	.get_eee		= bcmgenet_get_eee,
1691 	.set_eee		= bcmgenet_set_eee,
1692 	.nway_reset		= phy_ethtool_nway_reset,
1693 	.get_coalesce		= bcmgenet_get_coalesce,
1694 	.set_coalesce		= bcmgenet_set_coalesce,
1695 	.get_link_ksettings	= bcmgenet_get_link_ksettings,
1696 	.set_link_ksettings	= bcmgenet_set_link_ksettings,
1697 	.get_ts_info		= ethtool_op_get_ts_info,
1698 	.get_rxnfc		= bcmgenet_get_rxnfc,
1699 	.set_rxnfc		= bcmgenet_set_rxnfc,
1700 	.get_rx_ring_count	= bcmgenet_get_rx_ring_count,
1701 	.get_pauseparam		= bcmgenet_get_pauseparam,
1702 	.set_pauseparam		= bcmgenet_set_pauseparam,
1703 };
1704 
1705 /* Power down the unimac, based on mode. */
1706 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1707 				enum bcmgenet_power_mode mode)
1708 {
1709 	int ret = 0;
1710 	u32 reg;
1711 
1712 	switch (mode) {
1713 	case GENET_POWER_CABLE_SENSE:
1714 		phy_detach(priv->dev->phydev);
1715 		break;
1716 
1717 	case GENET_POWER_WOL_MAGIC:
1718 		ret = bcmgenet_wol_power_down_cfg(priv, mode);
1719 		break;
1720 
1721 	case GENET_POWER_PASSIVE:
1722 		/* Power down LED */
1723 		if (bcmgenet_has_ext(priv)) {
1724 			reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1725 			if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv))
1726 				reg |= EXT_PWR_DOWN_PHY_EN |
1727 				       EXT_PWR_DOWN_PHY_RD |
1728 				       EXT_PWR_DOWN_PHY_SD |
1729 				       EXT_PWR_DOWN_PHY_RX |
1730 				       EXT_PWR_DOWN_PHY_TX |
1731 				       EXT_IDDQ_GLBL_PWR;
1732 			else
1733 				reg |= EXT_PWR_DOWN_PHY;
1734 
1735 			reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1736 			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1737 
1738 			bcmgenet_phy_power_set(priv->dev, false);
1739 		}
1740 		break;
1741 	default:
1742 		break;
1743 	}
1744 
1745 	return ret;
1746 }
1747 
1748 static int bcmgenet_power_up(struct bcmgenet_priv *priv,
1749 			     enum bcmgenet_power_mode mode)
1750 {
1751 	int ret = 0;
1752 	u32 reg;
1753 
1754 	if (!bcmgenet_has_ext(priv))
1755 		return ret;
1756 
1757 	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1758 
1759 	switch (mode) {
1760 	case GENET_POWER_PASSIVE:
1761 		reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
1762 			 EXT_ENERGY_DET_MASK);
1763 		if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) {
1764 			reg &= ~(EXT_PWR_DOWN_PHY_EN |
1765 				 EXT_PWR_DOWN_PHY_RD |
1766 				 EXT_PWR_DOWN_PHY_SD |
1767 				 EXT_PWR_DOWN_PHY_RX |
1768 				 EXT_PWR_DOWN_PHY_TX |
1769 				 EXT_IDDQ_GLBL_PWR);
1770 			reg |=   EXT_PHY_RESET;
1771 			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1772 			mdelay(1);
1773 
1774 			reg &=  ~EXT_PHY_RESET;
1775 		} else {
1776 			reg &= ~EXT_PWR_DOWN_PHY;
1777 			reg |= EXT_PWR_DN_EN_LD;
1778 		}
1779 		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1780 		bcmgenet_phy_power_set(priv->dev, true);
1781 		break;
1782 
1783 	case GENET_POWER_CABLE_SENSE:
1784 		/* enable APD */
1785 		if (!GENET_IS_V5(priv)) {
1786 			reg |= EXT_PWR_DN_EN_LD;
1787 			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1788 		}
1789 		break;
1790 	case GENET_POWER_WOL_MAGIC:
1791 		ret = bcmgenet_wol_power_up_cfg(priv, mode);
1792 		break;
1793 	default:
1794 		break;
1795 	}
1796 
1797 	return ret;
1798 }
1799 
1800 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1801 					 struct bcmgenet_tx_ring *ring)
1802 {
1803 	struct enet_cb *tx_cb_ptr;
1804 
1805 	tx_cb_ptr = ring->cbs;
1806 	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1807 
1808 	/* Advancing local write pointer */
1809 	if (ring->write_ptr == ring->end_ptr)
1810 		ring->write_ptr = ring->cb_ptr;
1811 	else
1812 		ring->write_ptr++;
1813 
1814 	return tx_cb_ptr;
1815 }
1816 
1817 static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1818 					 struct bcmgenet_tx_ring *ring)
1819 {
1820 	struct enet_cb *tx_cb_ptr;
1821 
1822 	/* Rewinding local write pointer */
1823 	if (ring->write_ptr == ring->cb_ptr)
1824 		ring->write_ptr = ring->end_ptr;
1825 	else
1826 		ring->write_ptr--;
1827 
1828 	tx_cb_ptr = ring->cbs;
1829 	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1830 
1831 	return tx_cb_ptr;
1832 }
1833 
1834 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1835 {
1836 	bcmgenet_intrl2_1_writel(ring->priv,
1837 				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1838 				 INTRL2_CPU_MASK_SET);
1839 }
1840 
1841 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1842 {
1843 	bcmgenet_intrl2_1_writel(ring->priv,
1844 				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1845 				 INTRL2_CPU_MASK_CLEAR);
1846 }
1847 
1848 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1849 {
1850 	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1851 				 INTRL2_CPU_MASK_CLEAR);
1852 }
1853 
1854 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1855 {
1856 	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1857 				 INTRL2_CPU_MASK_SET);
1858 }
1859 
1860 /* Simple helper to free a transmit control block's resources
1861  * Returns an skb when the last transmit control block associated with the
1862  * skb is freed.  The skb should be freed by the caller if necessary.
1863  */
1864 static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1865 					   struct enet_cb *cb)
1866 {
1867 	struct sk_buff *skb;
1868 
1869 	skb = cb->skb;
1870 
1871 	if (skb) {
1872 		cb->skb = NULL;
1873 		if (cb == GENET_CB(skb)->first_cb)
1874 			dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1875 					 dma_unmap_len(cb, dma_len),
1876 					 DMA_TO_DEVICE);
1877 		else
1878 			dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1879 				       dma_unmap_len(cb, dma_len),
1880 				       DMA_TO_DEVICE);
1881 		dma_unmap_addr_set(cb, dma_addr, 0);
1882 
1883 		if (cb == GENET_CB(skb)->last_cb)
1884 			return skb;
1885 
1886 	} else if (dma_unmap_addr(cb, dma_addr)) {
1887 		dma_unmap_page(dev,
1888 			       dma_unmap_addr(cb, dma_addr),
1889 			       dma_unmap_len(cb, dma_len),
1890 			       DMA_TO_DEVICE);
1891 		dma_unmap_addr_set(cb, dma_addr, 0);
1892 	}
1893 
1894 	return NULL;
1895 }
1896 
1897 /* Simple helper to free a receive control block's resources */
1898 static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1899 					   struct enet_cb *cb)
1900 {
1901 	struct sk_buff *skb;
1902 
1903 	skb = cb->skb;
1904 	cb->skb = NULL;
1905 
1906 	if (dma_unmap_addr(cb, dma_addr)) {
1907 		dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1908 				 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1909 		dma_unmap_addr_set(cb, dma_addr, 0);
1910 	}
1911 
1912 	return skb;
1913 }
1914 
1915 /* Unlocked version of the reclaim routine */
1916 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1917 					  struct bcmgenet_tx_ring *ring)
1918 {
1919 	struct bcmgenet_tx_stats64 *stats = &ring->stats64;
1920 	struct bcmgenet_priv *priv = netdev_priv(dev);
1921 	unsigned int txbds_processed = 0;
1922 	unsigned int bytes_compl = 0;
1923 	unsigned int pkts_compl = 0;
1924 	unsigned int txbds_ready;
1925 	unsigned int c_index;
1926 	struct sk_buff *skb;
1927 
1928 	/* Clear status before servicing to reduce spurious interrupts */
1929 	bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR);
1930 
1931 	/* Compute how many buffers are transmitted since last xmit call */
1932 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1933 		& DMA_C_INDEX_MASK;
1934 	txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1935 
1936 	netif_dbg(priv, tx_done, dev,
1937 		  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1938 		  __func__, ring->index, ring->c_index, c_index, txbds_ready);
1939 
1940 	/* Reclaim transmitted buffers */
1941 	while (txbds_processed < txbds_ready) {
1942 		skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1943 					  &priv->tx_cbs[ring->clean_ptr]);
1944 		if (skb) {
1945 			pkts_compl++;
1946 			bytes_compl += GENET_CB(skb)->bytes_sent;
1947 			dev_consume_skb_any(skb);
1948 		}
1949 
1950 		txbds_processed++;
1951 		if (likely(ring->clean_ptr < ring->end_ptr))
1952 			ring->clean_ptr++;
1953 		else
1954 			ring->clean_ptr = ring->cb_ptr;
1955 	}
1956 
1957 	ring->free_bds += txbds_processed;
1958 	ring->c_index = c_index;
1959 
1960 	u64_stats_update_begin(&stats->syncp);
1961 	u64_stats_add(&stats->packets, pkts_compl);
1962 	u64_stats_add(&stats->bytes, bytes_compl);
1963 	u64_stats_update_end(&stats->syncp);
1964 
1965 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
1966 				  pkts_compl, bytes_compl);
1967 
1968 	return txbds_processed;
1969 }
1970 
1971 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1972 				struct bcmgenet_tx_ring *ring,
1973 				bool all)
1974 {
1975 	struct bcmgenet_priv *priv = netdev_priv(dev);
1976 	struct device *kdev = &priv->pdev->dev;
1977 	unsigned int released, drop, wr_ptr;
1978 	struct enet_cb *cb_ptr;
1979 	struct sk_buff *skb;
1980 
1981 	spin_lock_bh(&ring->lock);
1982 	released = __bcmgenet_tx_reclaim(dev, ring);
1983 	if (all) {
1984 		skb = NULL;
1985 		drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
1986 		released += drop;
1987 		ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
1988 		ring->free_bds += drop;
1989 		while (drop--) {
1990 			cb_ptr = bcmgenet_put_txcb(priv, ring);
1991 			skb = cb_ptr->skb;
1992 			bcmgenet_free_tx_cb(kdev, cb_ptr);
1993 			if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
1994 				dev_consume_skb_any(skb);
1995 				skb = NULL;
1996 			}
1997 		}
1998 		if (skb)
1999 			dev_consume_skb_any(skb);
2000 		netdev_tx_reset_queue(netdev_get_tx_queue(dev, ring->index));
2001 		bcmgenet_tdma_ring_writel(priv, ring->index,
2002 					  ring->prod_index, TDMA_PROD_INDEX);
2003 		wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
2004 		bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
2005 					  TDMA_WRITE_PTR);
2006 	}
2007 	spin_unlock_bh(&ring->lock);
2008 
2009 	return released;
2010 }
2011 
2012 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
2013 {
2014 	struct bcmgenet_tx_ring *ring =
2015 		container_of(napi, struct bcmgenet_tx_ring, napi);
2016 	unsigned int work_done = 0;
2017 	struct netdev_queue *txq;
2018 
2019 	spin_lock(&ring->lock);
2020 	work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
2021 	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
2022 		txq = netdev_get_tx_queue(ring->priv->dev, ring->index);
2023 		netif_tx_wake_queue(txq);
2024 	}
2025 	spin_unlock(&ring->lock);
2026 
2027 	if (work_done == 0) {
2028 		napi_complete(napi);
2029 		bcmgenet_tx_ring_int_enable(ring);
2030 
2031 		return 0;
2032 	}
2033 
2034 	return budget;
2035 }
2036 
2037 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
2038 {
2039 	struct bcmgenet_priv *priv = netdev_priv(dev);
2040 	int i = 0;
2041 
2042 	do {
2043 		bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
2044 	} while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
2045 }
2046 
2047 /* Reallocate the SKB to put enough headroom in front of it and insert
2048  * the transmit checksum offsets in the descriptors
2049  */
2050 static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
2051 					struct sk_buff *skb,
2052 					struct bcmgenet_tx_ring *ring)
2053 {
2054 	struct bcmgenet_tx_stats64 *stats = &ring->stats64;
2055 	struct bcmgenet_priv *priv = netdev_priv(dev);
2056 	struct status_64 *status = NULL;
2057 	struct sk_buff *new_skb;
2058 	u16 offset;
2059 	u8 ip_proto;
2060 	__be16 ip_ver;
2061 	u32 tx_csum_info;
2062 
2063 	if (unlikely(skb_headroom(skb) < sizeof(*status))) {
2064 		/* If 64 byte status block enabled, must make sure skb has
2065 		 * enough headroom for us to insert 64B status block.
2066 		 */
2067 		new_skb = skb_realloc_headroom(skb, sizeof(*status));
2068 		if (!new_skb) {
2069 			dev_kfree_skb_any(skb);
2070 			priv->mib.tx_realloc_tsb_failed++;
2071 			BCMGENET_STATS64_INC(stats, dropped);
2072 			return NULL;
2073 		}
2074 		dev_consume_skb_any(skb);
2075 		skb = new_skb;
2076 		priv->mib.tx_realloc_tsb++;
2077 	}
2078 
2079 	skb_push(skb, sizeof(*status));
2080 	status = (struct status_64 *)skb->data;
2081 
2082 	if (skb->ip_summed  == CHECKSUM_PARTIAL) {
2083 		ip_ver = skb->protocol;
2084 		switch (ip_ver) {
2085 		case htons(ETH_P_IP):
2086 			ip_proto = ip_hdr(skb)->protocol;
2087 			break;
2088 		case htons(ETH_P_IPV6):
2089 			ip_proto = ipv6_hdr(skb)->nexthdr;
2090 			break;
2091 		default:
2092 			/* don't use UDP flag */
2093 			ip_proto = 0;
2094 			break;
2095 		}
2096 
2097 		offset = skb_checksum_start_offset(skb) - sizeof(*status);
2098 		tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
2099 				(offset + skb->csum_offset) |
2100 				STATUS_TX_CSUM_LV;
2101 
2102 		/* Set the special UDP flag for UDP */
2103 		if (ip_proto == IPPROTO_UDP)
2104 			tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
2105 
2106 		status->tx_csum_info = tx_csum_info;
2107 	}
2108 
2109 	return skb;
2110 }
2111 
2112 static void bcmgenet_hide_tsb(struct sk_buff *skb)
2113 {
2114 	__skb_pull(skb, sizeof(struct status_64));
2115 }
2116 
2117 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
2118 {
2119 	struct bcmgenet_priv *priv = netdev_priv(dev);
2120 	struct device *kdev = &priv->pdev->dev;
2121 	struct bcmgenet_tx_ring *ring = NULL;
2122 	struct enet_cb *tx_cb_ptr;
2123 	struct netdev_queue *txq;
2124 	int nr_frags, index;
2125 	dma_addr_t mapping;
2126 	unsigned int size;
2127 	skb_frag_t *frag;
2128 	u32 len_stat;
2129 	int ret;
2130 	int i;
2131 
2132 	index = skb_get_queue_mapping(skb);
2133 	/* Mapping strategy:
2134 	 * queue_mapping = 0, unclassified, packet xmited through ring 0
2135 	 * queue_mapping = 1, goes to ring 1. (highest priority queue)
2136 	 * queue_mapping = 2, goes to ring 2.
2137 	 * queue_mapping = 3, goes to ring 3.
2138 	 * queue_mapping = 4, goes to ring 4.
2139 	 */
2140 	ring = &priv->tx_rings[index];
2141 	txq = netdev_get_tx_queue(dev, index);
2142 
2143 	nr_frags = skb_shinfo(skb)->nr_frags;
2144 
2145 	spin_lock(&ring->lock);
2146 	if (ring->free_bds <= (nr_frags + 1)) {
2147 		if (!netif_tx_queue_stopped(txq))
2148 			netif_tx_stop_queue(txq);
2149 		ret = NETDEV_TX_BUSY;
2150 		goto out;
2151 	}
2152 
2153 	/* Retain how many bytes will be sent on the wire, without TSB inserted
2154 	 * by transmit checksum offload
2155 	 */
2156 	GENET_CB(skb)->bytes_sent = skb->len;
2157 
2158 	/* add the Transmit Status Block */
2159 	skb = bcmgenet_add_tsb(dev, skb, ring);
2160 	if (!skb) {
2161 		ret = NETDEV_TX_OK;
2162 		goto out;
2163 	}
2164 
2165 	for (i = 0; i <= nr_frags; i++) {
2166 		tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
2167 
2168 		BUG_ON(!tx_cb_ptr);
2169 
2170 		if (!i) {
2171 			/* Transmit single SKB or head of fragment list */
2172 			GENET_CB(skb)->first_cb = tx_cb_ptr;
2173 			size = skb_headlen(skb);
2174 			mapping = dma_map_single(kdev, skb->data, size,
2175 						 DMA_TO_DEVICE);
2176 		} else {
2177 			/* xmit fragment */
2178 			frag = &skb_shinfo(skb)->frags[i - 1];
2179 			size = skb_frag_size(frag);
2180 			mapping = skb_frag_dma_map(kdev, frag, 0, size,
2181 						   DMA_TO_DEVICE);
2182 		}
2183 
2184 		ret = dma_mapping_error(kdev, mapping);
2185 		if (ret) {
2186 			priv->mib.tx_dma_failed++;
2187 			netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
2188 			ret = NETDEV_TX_OK;
2189 			goto out_unmap_frags;
2190 		}
2191 		dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
2192 		dma_unmap_len_set(tx_cb_ptr, dma_len, size);
2193 
2194 		tx_cb_ptr->skb = skb;
2195 
2196 		len_stat = (size << DMA_BUFLENGTH_SHIFT) |
2197 			   (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
2198 
2199 		/* Note: if we ever change from DMA_TX_APPEND_CRC below we
2200 		 * will need to restore software padding of "runt" packets
2201 		 */
2202 		len_stat |= DMA_TX_APPEND_CRC;
2203 
2204 		if (!i) {
2205 			len_stat |= DMA_SOP;
2206 			if (skb->ip_summed == CHECKSUM_PARTIAL)
2207 				len_stat |= DMA_TX_DO_CSUM;
2208 		}
2209 		if (i == nr_frags)
2210 			len_stat |= DMA_EOP;
2211 
2212 		dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
2213 	}
2214 
2215 	GENET_CB(skb)->last_cb = tx_cb_ptr;
2216 
2217 	bcmgenet_hide_tsb(skb);
2218 	skb_tx_timestamp(skb);
2219 
2220 	/* Decrement total BD count and advance our write pointer */
2221 	ring->free_bds -= nr_frags + 1;
2222 	ring->prod_index += nr_frags + 1;
2223 	ring->prod_index &= DMA_P_INDEX_MASK;
2224 
2225 	netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
2226 
2227 	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
2228 		netif_tx_stop_queue(txq);
2229 
2230 	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
2231 		/* Packets are ready, update producer index */
2232 		bcmgenet_tdma_ring_writel(priv, ring->index,
2233 					  ring->prod_index, TDMA_PROD_INDEX);
2234 out:
2235 	spin_unlock(&ring->lock);
2236 
2237 	return ret;
2238 
2239 out_unmap_frags:
2240 	/* Back up for failed control block mapping */
2241 	bcmgenet_put_txcb(priv, ring);
2242 
2243 	/* Unmap successfully mapped control blocks */
2244 	while (i-- > 0) {
2245 		tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
2246 		bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
2247 	}
2248 
2249 	dev_kfree_skb(skb);
2250 	goto out;
2251 }
2252 
2253 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
2254 					  struct enet_cb *cb)
2255 {
2256 	struct device *kdev = &priv->pdev->dev;
2257 	struct sk_buff *skb;
2258 	struct sk_buff *rx_skb;
2259 	dma_addr_t mapping;
2260 
2261 	/* Allocate a new Rx skb */
2262 	skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
2263 				 GFP_ATOMIC | __GFP_NOWARN);
2264 	if (!skb) {
2265 		priv->mib.alloc_rx_buff_failed++;
2266 		netif_err(priv, rx_err, priv->dev,
2267 			  "%s: Rx skb allocation failed\n", __func__);
2268 		return NULL;
2269 	}
2270 
2271 	/* DMA-map the new Rx skb */
2272 	mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
2273 				 DMA_FROM_DEVICE);
2274 	if (dma_mapping_error(kdev, mapping)) {
2275 		priv->mib.rx_dma_failed++;
2276 		dev_kfree_skb_any(skb);
2277 		netif_err(priv, rx_err, priv->dev,
2278 			  "%s: Rx skb DMA mapping failed\n", __func__);
2279 		return NULL;
2280 	}
2281 
2282 	/* Grab the current Rx skb from the ring and DMA-unmap it */
2283 	rx_skb = bcmgenet_free_rx_cb(kdev, cb);
2284 
2285 	/* Put the new Rx skb on the ring */
2286 	cb->skb = skb;
2287 	dma_unmap_addr_set(cb, dma_addr, mapping);
2288 	dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
2289 	dmadesc_set_addr(priv, cb->bd_addr, mapping);
2290 
2291 	/* Return the current Rx skb to caller */
2292 	return rx_skb;
2293 }
2294 
2295 /* bcmgenet_desc_rx - descriptor based rx process.
2296  * this could be called from bottom half, or from NAPI polling method.
2297  */
2298 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
2299 				     unsigned int budget)
2300 {
2301 	struct bcmgenet_rx_stats64 *stats = &ring->stats64;
2302 	struct bcmgenet_priv *priv = ring->priv;
2303 	struct net_device *dev = priv->dev;
2304 	struct enet_cb *cb;
2305 	struct sk_buff *skb;
2306 	u32 dma_length_status;
2307 	unsigned long dma_flag;
2308 	int len;
2309 	unsigned int rxpktprocessed = 0, rxpkttoprocess;
2310 	unsigned int bytes_processed = 0;
2311 	unsigned int p_index, mask;
2312 	unsigned int discards;
2313 
2314 	/* Clear status before servicing to reduce spurious interrupts */
2315 	mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
2316 	bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR);
2317 
2318 	p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
2319 
2320 	discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
2321 		   DMA_P_INDEX_DISCARD_CNT_MASK;
2322 	if (discards > ring->old_discards) {
2323 		discards = discards - ring->old_discards;
2324 		BCMGENET_STATS64_ADD(stats, missed, discards);
2325 		ring->old_discards += discards;
2326 
2327 		/* Clear HW register when we reach 75% of maximum 0xFFFF */
2328 		if (ring->old_discards >= 0xC000) {
2329 			ring->old_discards = 0;
2330 			bcmgenet_rdma_ring_writel(priv, ring->index, 0,
2331 						  RDMA_PROD_INDEX);
2332 		}
2333 	}
2334 
2335 	p_index &= DMA_P_INDEX_MASK;
2336 	rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
2337 
2338 	netif_dbg(priv, rx_status, dev,
2339 		  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
2340 
2341 	while ((rxpktprocessed < rxpkttoprocess) &&
2342 	       (rxpktprocessed < budget)) {
2343 		struct status_64 *status;
2344 		__be16 rx_csum;
2345 
2346 		cb = &priv->rx_cbs[ring->read_ptr];
2347 		skb = bcmgenet_rx_refill(priv, cb);
2348 
2349 		if (unlikely(!skb)) {
2350 			BCMGENET_STATS64_INC(stats, dropped);
2351 			goto next;
2352 		}
2353 
2354 		status = (struct status_64 *)skb->data;
2355 		dma_length_status = status->length_status;
2356 		if (dev->features & NETIF_F_RXCSUM) {
2357 			rx_csum = (__force __be16)(status->rx_csum & 0xffff);
2358 			if (rx_csum) {
2359 				skb->csum = (__force __wsum)ntohs(rx_csum);
2360 				skb->ip_summed = CHECKSUM_COMPLETE;
2361 			}
2362 		}
2363 
2364 		/* DMA flags and length are still valid no matter how
2365 		 * we got the Receive Status Vector (64B RSB or register)
2366 		 */
2367 		dma_flag = dma_length_status & 0xffff;
2368 		len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
2369 
2370 		netif_dbg(priv, rx_status, dev,
2371 			  "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
2372 			  __func__, p_index, ring->c_index,
2373 			  ring->read_ptr, dma_length_status);
2374 
2375 		if (unlikely(len > RX_BUF_LENGTH)) {
2376 			netif_err(priv, rx_status, dev, "oversized packet\n");
2377 			BCMGENET_STATS64_INC(stats, length_errors);
2378 			dev_kfree_skb_any(skb);
2379 			goto next;
2380 		}
2381 
2382 		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
2383 			netif_err(priv, rx_status, dev,
2384 				  "dropping fragmented packet!\n");
2385 			BCMGENET_STATS64_INC(stats, fragmented_errors);
2386 			dev_kfree_skb_any(skb);
2387 			goto next;
2388 		}
2389 
2390 		/* report errors */
2391 		if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
2392 						DMA_RX_OV |
2393 						DMA_RX_NO |
2394 						DMA_RX_LG |
2395 						DMA_RX_RXER))) {
2396 			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
2397 				  (unsigned int)dma_flag);
2398 			u64_stats_update_begin(&stats->syncp);
2399 			if (dma_flag & DMA_RX_CRC_ERROR)
2400 				u64_stats_inc(&stats->crc_errors);
2401 			if (dma_flag & DMA_RX_OV)
2402 				u64_stats_inc(&stats->over_errors);
2403 			if (dma_flag & DMA_RX_NO)
2404 				u64_stats_inc(&stats->frame_errors);
2405 			if (dma_flag & DMA_RX_LG)
2406 				u64_stats_inc(&stats->length_errors);
2407 			if ((dma_flag & (DMA_RX_CRC_ERROR |
2408 						DMA_RX_OV |
2409 						DMA_RX_NO |
2410 						DMA_RX_LG |
2411 						DMA_RX_RXER)) == DMA_RX_RXER)
2412 				u64_stats_inc(&stats->errors);
2413 			u64_stats_update_end(&stats->syncp);
2414 			dev_kfree_skb_any(skb);
2415 			goto next;
2416 		} /* error packet */
2417 
2418 		skb_put(skb, len);
2419 
2420 		/* remove RSB and hardware 2bytes added for IP alignment */
2421 		skb_pull(skb, 66);
2422 		len -= 66;
2423 
2424 		if (priv->crc_fwd_en) {
2425 			skb_trim(skb, len - ETH_FCS_LEN);
2426 			len -= ETH_FCS_LEN;
2427 		}
2428 
2429 		bytes_processed += len;
2430 
2431 		/*Finish setting up the received SKB and send it to the kernel*/
2432 		skb->protocol = eth_type_trans(skb, priv->dev);
2433 
2434 		u64_stats_update_begin(&stats->syncp);
2435 		u64_stats_inc(&stats->packets);
2436 		u64_stats_add(&stats->bytes, len);
2437 		if (dma_flag & DMA_RX_MULT)
2438 			u64_stats_inc(&stats->multicast);
2439 		else if (dma_flag & DMA_RX_BRDCAST)
2440 			u64_stats_inc(&stats->broadcast);
2441 		u64_stats_update_end(&stats->syncp);
2442 
2443 		/* Notify kernel */
2444 		napi_gro_receive(&ring->napi, skb);
2445 		netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
2446 
2447 next:
2448 		rxpktprocessed++;
2449 		if (likely(ring->read_ptr < ring->end_ptr))
2450 			ring->read_ptr++;
2451 		else
2452 			ring->read_ptr = ring->cb_ptr;
2453 
2454 		ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
2455 		bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
2456 	}
2457 
2458 	ring->dim.bytes = bytes_processed;
2459 	ring->dim.packets = rxpktprocessed;
2460 
2461 	return rxpktprocessed;
2462 }
2463 
2464 /* Rx NAPI polling method */
2465 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
2466 {
2467 	struct bcmgenet_rx_ring *ring = container_of(napi,
2468 			struct bcmgenet_rx_ring, napi);
2469 	struct dim_sample dim_sample = {};
2470 	unsigned int work_done;
2471 
2472 	work_done = bcmgenet_desc_rx(ring, budget);
2473 
2474 	if (work_done < budget && napi_complete_done(napi, work_done))
2475 		bcmgenet_rx_ring_int_enable(ring);
2476 
2477 	if (ring->dim.use_dim) {
2478 		dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
2479 				  ring->dim.bytes, &dim_sample);
2480 		net_dim(&ring->dim.dim, &dim_sample);
2481 	}
2482 
2483 	return work_done;
2484 }
2485 
2486 static void bcmgenet_dim_work(struct work_struct *work)
2487 {
2488 	struct dim *dim = container_of(work, struct dim, work);
2489 	struct bcmgenet_net_dim *ndim =
2490 			container_of(dim, struct bcmgenet_net_dim, dim);
2491 	struct bcmgenet_rx_ring *ring =
2492 			container_of(ndim, struct bcmgenet_rx_ring, dim);
2493 	struct dim_cq_moder cur_profile =
2494 			net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
2495 
2496 	bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
2497 	dim->state = DIM_START_MEASURE;
2498 }
2499 
2500 /* Assign skb to RX DMA descriptor. */
2501 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
2502 				     struct bcmgenet_rx_ring *ring)
2503 {
2504 	struct enet_cb *cb;
2505 	struct sk_buff *skb;
2506 	int i;
2507 
2508 	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2509 
2510 	/* loop here for each buffer needing assign */
2511 	for (i = 0; i < ring->size; i++) {
2512 		cb = ring->cbs + i;
2513 		skb = bcmgenet_rx_refill(priv, cb);
2514 		if (skb)
2515 			dev_consume_skb_any(skb);
2516 		if (!cb->skb)
2517 			return -ENOMEM;
2518 	}
2519 
2520 	return 0;
2521 }
2522 
2523 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
2524 {
2525 	struct sk_buff *skb;
2526 	struct enet_cb *cb;
2527 	int i;
2528 
2529 	for (i = 0; i < priv->num_rx_bds; i++) {
2530 		cb = &priv->rx_cbs[i];
2531 
2532 		skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
2533 		if (skb)
2534 			dev_consume_skb_any(skb);
2535 	}
2536 }
2537 
2538 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
2539 {
2540 	u32 reg;
2541 
2542 	spin_lock_bh(&priv->reg_lock);
2543 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2544 	if (reg & CMD_SW_RESET) {
2545 		spin_unlock_bh(&priv->reg_lock);
2546 		return;
2547 	}
2548 	if (enable)
2549 		reg |= mask;
2550 	else
2551 		reg &= ~mask;
2552 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2553 	spin_unlock_bh(&priv->reg_lock);
2554 
2555 	/* UniMAC stops on a packet boundary, wait for a full-size packet
2556 	 * to be processed
2557 	 */
2558 	if (enable == 0)
2559 		usleep_range(1000, 2000);
2560 }
2561 
2562 static void reset_umac(struct bcmgenet_priv *priv)
2563 {
2564 	/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
2565 	bcmgenet_rbuf_ctrl_set(priv, 0);
2566 	udelay(10);
2567 
2568 	/* issue soft reset and disable MAC while updating its registers */
2569 	spin_lock_bh(&priv->reg_lock);
2570 	bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
2571 	udelay(2);
2572 	spin_unlock_bh(&priv->reg_lock);
2573 }
2574 
2575 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
2576 {
2577 	/* Mask all interrupts.*/
2578 	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2579 	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2580 	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2581 	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2582 }
2583 
2584 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
2585 {
2586 	u32 int0_enable = 0;
2587 
2588 	/* Monitor cable plug/unplugged event for internal PHY, external PHY
2589 	 * and MoCA PHY
2590 	 */
2591 	if (priv->internal_phy) {
2592 		int0_enable |= UMAC_IRQ_LINK_EVENT;
2593 		if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
2594 			int0_enable |= UMAC_IRQ_PHY_DET_R;
2595 	} else if (priv->ext_phy) {
2596 		int0_enable |= UMAC_IRQ_LINK_EVENT;
2597 	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2598 		if (bcmgenet_has_moca_link_det(priv))
2599 			int0_enable |= UMAC_IRQ_LINK_EVENT;
2600 	}
2601 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2602 }
2603 
2604 static void init_umac(struct bcmgenet_priv *priv)
2605 {
2606 	struct device *kdev = &priv->pdev->dev;
2607 	u32 reg;
2608 	u32 int0_enable = 0;
2609 
2610 	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
2611 
2612 	reset_umac(priv);
2613 
2614 	/* clear tx/rx counter */
2615 	bcmgenet_umac_writel(priv,
2616 			     MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
2617 			     UMAC_MIB_CTRL);
2618 	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
2619 
2620 	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2621 
2622 	/* init tx registers, enable TSB */
2623 	reg = bcmgenet_tbuf_ctrl_get(priv);
2624 	reg |= TBUF_64B_EN;
2625 	bcmgenet_tbuf_ctrl_set(priv, reg);
2626 
2627 	/* init rx registers, enable ip header optimization and RSB */
2628 	reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
2629 	reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
2630 	bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
2631 
2632 	/* enable rx checksumming */
2633 	reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
2634 	reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
2635 	/* If UniMAC forwards CRC, we need to skip over it to get
2636 	 * a valid CHK bit to be set in the per-packet status word
2637 	 */
2638 	if (priv->crc_fwd_en)
2639 		reg |= RBUF_SKIP_FCS;
2640 	else
2641 		reg &= ~RBUF_SKIP_FCS;
2642 	bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
2643 
2644 	if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
2645 		bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
2646 
2647 	bcmgenet_intr_disable(priv);
2648 
2649 	/* Configure backpressure vectors for MoCA */
2650 	if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2651 		reg = bcmgenet_bp_mc_get(priv);
2652 		reg |= BIT(priv->hw_params->bp_in_en_shift);
2653 
2654 		/* bp_mask: back pressure mask */
2655 		if (netif_is_multiqueue(priv->dev))
2656 			reg |= priv->hw_params->bp_in_mask;
2657 		else
2658 			reg &= ~priv->hw_params->bp_in_mask;
2659 		bcmgenet_bp_mc_set(priv, reg);
2660 	}
2661 
2662 	/* Enable MDIO interrupts on GENET v3+ */
2663 	if (bcmgenet_has_mdio_intr(priv))
2664 		int0_enable |= UMAC_IRQ_MDIO_EVENT;
2665 
2666 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2667 
2668 	dev_dbg(kdev, "done init umac\n");
2669 }
2670 
2671 static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
2672 			      void (*cb)(struct work_struct *work))
2673 {
2674 	struct bcmgenet_net_dim *dim = &ring->dim;
2675 
2676 	INIT_WORK(&dim->dim.work, cb);
2677 	dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2678 	dim->event_ctr = 0;
2679 	dim->packets = 0;
2680 	dim->bytes = 0;
2681 }
2682 
2683 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2684 {
2685 	struct bcmgenet_net_dim *dim = &ring->dim;
2686 	struct dim_cq_moder moder;
2687 	u32 usecs, pkts;
2688 
2689 	usecs = ring->rx_coalesce_usecs;
2690 	pkts = ring->rx_max_coalesced_frames;
2691 
2692 	/* If DIM was enabled, re-apply default parameters */
2693 	if (dim->use_dim) {
2694 		moder = net_dim_get_def_rx_moderation(dim->dim.mode);
2695 		usecs = moder.usec;
2696 		pkts = moder.pkts;
2697 	}
2698 
2699 	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2700 }
2701 
2702 /* Initialize a Tx ring along with corresponding hardware registers */
2703 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2704 				  unsigned int index, unsigned int size,
2705 				  unsigned int start_ptr, unsigned int end_ptr)
2706 {
2707 	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2708 	u32 words_per_bd = WORDS_PER_BD(priv);
2709 	u32 flow_period_val = 0;
2710 
2711 	spin_lock_init(&ring->lock);
2712 	ring->priv = priv;
2713 	ring->index = index;
2714 	ring->cbs = priv->tx_cbs + start_ptr;
2715 	ring->size = size;
2716 	ring->clean_ptr = start_ptr;
2717 	ring->c_index = 0;
2718 	ring->free_bds = size;
2719 	ring->write_ptr = start_ptr;
2720 	ring->cb_ptr = start_ptr;
2721 	ring->end_ptr = end_ptr - 1;
2722 	ring->prod_index = 0;
2723 
2724 	/* Set flow period for ring != 0 */
2725 	if (index)
2726 		flow_period_val = ENET_MAX_MTU_SIZE << 16;
2727 
2728 	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2729 	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2730 	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2731 	/* Disable rate control for now */
2732 	bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2733 				  TDMA_FLOW_PERIOD);
2734 	bcmgenet_tdma_ring_writel(priv, index,
2735 				  ((size << DMA_RING_SIZE_SHIFT) |
2736 				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2737 
2738 	/* Set start and end address, read and write pointers */
2739 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2740 				  DMA_START_ADDR);
2741 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2742 				  TDMA_READ_PTR);
2743 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2744 				  TDMA_WRITE_PTR);
2745 	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2746 				  DMA_END_ADDR);
2747 
2748 	/* Initialize Tx NAPI */
2749 	netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
2750 }
2751 
2752 /* Initialize a RDMA ring */
2753 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2754 				 unsigned int index, unsigned int size,
2755 				 unsigned int start_ptr, unsigned int end_ptr)
2756 {
2757 	struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2758 	u32 words_per_bd = WORDS_PER_BD(priv);
2759 	int ret;
2760 
2761 	ring->priv = priv;
2762 	ring->index = index;
2763 	ring->cbs = priv->rx_cbs + start_ptr;
2764 	ring->size = size;
2765 	ring->c_index = 0;
2766 	ring->read_ptr = start_ptr;
2767 	ring->cb_ptr = start_ptr;
2768 	ring->end_ptr = end_ptr - 1;
2769 
2770 	ret = bcmgenet_alloc_rx_buffers(priv, ring);
2771 	if (ret)
2772 		return ret;
2773 
2774 	bcmgenet_init_dim(ring, bcmgenet_dim_work);
2775 	bcmgenet_init_rx_coalesce(ring);
2776 
2777 	/* Initialize Rx NAPI */
2778 	netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
2779 
2780 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2781 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2782 	bcmgenet_rdma_ring_writel(priv, index,
2783 				  ((size << DMA_RING_SIZE_SHIFT) |
2784 				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2785 	bcmgenet_rdma_ring_writel(priv, index,
2786 				  (DMA_FC_THRESH_LO <<
2787 				   DMA_XOFF_THRESHOLD_SHIFT) |
2788 				   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2789 
2790 	/* Set start and end address, read and write pointers */
2791 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2792 				  DMA_START_ADDR);
2793 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2794 				  RDMA_READ_PTR);
2795 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2796 				  RDMA_WRITE_PTR);
2797 	bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2798 				  DMA_END_ADDR);
2799 
2800 	return ret;
2801 }
2802 
2803 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2804 {
2805 	unsigned int i;
2806 	struct bcmgenet_tx_ring *ring;
2807 
2808 	for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
2809 		ring = &priv->tx_rings[i];
2810 		napi_enable(&ring->napi);
2811 		bcmgenet_tx_ring_int_enable(ring);
2812 	}
2813 }
2814 
2815 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2816 {
2817 	unsigned int i;
2818 	struct bcmgenet_tx_ring *ring;
2819 
2820 	for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
2821 		ring = &priv->tx_rings[i];
2822 		napi_disable(&ring->napi);
2823 	}
2824 }
2825 
2826 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2827 {
2828 	unsigned int i;
2829 	struct bcmgenet_tx_ring *ring;
2830 
2831 	for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
2832 		ring = &priv->tx_rings[i];
2833 		netif_napi_del(&ring->napi);
2834 	}
2835 }
2836 
2837 static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv)
2838 {
2839 	int timeout = 0;
2840 	u32 reg, mask;
2841 
2842 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2843 	mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
2844 	mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2845 	reg &= ~mask;
2846 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2847 
2848 	/* Check DMA status register to confirm DMA is disabled */
2849 	while (timeout++ < DMA_TIMEOUT_VAL) {
2850 		reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2851 		if ((reg & mask) == mask)
2852 			return 0;
2853 
2854 		udelay(1);
2855 	}
2856 
2857 	return -ETIMEDOUT;
2858 }
2859 
2860 static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv)
2861 {
2862 	int timeout = 0;
2863 	u32 reg, mask;
2864 
2865 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2866 	mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
2867 	mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2868 	reg &= ~mask;
2869 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2870 
2871 	/* Check DMA status register to confirm DMA is disabled */
2872 	while (timeout++ < DMA_TIMEOUT_VAL) {
2873 		reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2874 		if ((reg & mask) == mask)
2875 			return 0;
2876 
2877 		udelay(1);
2878 	}
2879 
2880 	return -ETIMEDOUT;
2881 }
2882 
2883 /* Initialize Tx queues
2884  *
2885  * Queues 1-4 are priority-based, each one has 32 descriptors,
2886  * with queue 1 being the highest priority queue.
2887  *
2888  * Queue 0 is the default Tx queue with
2889  * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2890  *
2891  * The transmit control block pool is then partitioned as follows:
2892  * - Tx queue 0 uses tx_cbs[0..127]
2893  * - Tx queue 1 uses tx_cbs[128..159]
2894  * - Tx queue 2 uses tx_cbs[160..191]
2895  * - Tx queue 3 uses tx_cbs[192..223]
2896  * - Tx queue 4 uses tx_cbs[224..255]
2897  */
2898 static void bcmgenet_init_tx_queues(struct net_device *dev)
2899 {
2900 	struct bcmgenet_priv *priv = netdev_priv(dev);
2901 	unsigned int start = 0, end = GENET_Q0_TX_BD_CNT;
2902 	u32 i, ring_mask, dma_priority[3] = {0, 0, 0};
2903 
2904 	/* Enable strict priority arbiter mode */
2905 	bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2906 
2907 	/* Initialize Tx priority queues */
2908 	for (i = 0; i <= priv->hw_params->tx_queues; i++) {
2909 		bcmgenet_init_tx_ring(priv, i, end - start, start, end);
2910 		start = end;
2911 		end += priv->hw_params->tx_bds_per_q;
2912 		dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2913 			(i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY)
2914 			<< DMA_PRIO_REG_SHIFT(i);
2915 	}
2916 
2917 	/* Set Tx queue priorities */
2918 	bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2919 	bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2920 	bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2921 
2922 	/* Configure Tx queues as descriptor rings */
2923 	ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
2924 	bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG);
2925 
2926 	/* Enable Tx rings */
2927 	ring_mask <<= DMA_RING_BUF_EN_SHIFT;
2928 	bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL);
2929 }
2930 
2931 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2932 {
2933 	unsigned int i;
2934 	struct bcmgenet_rx_ring *ring;
2935 
2936 	for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
2937 		ring = &priv->rx_rings[i];
2938 		napi_enable(&ring->napi);
2939 		bcmgenet_rx_ring_int_enable(ring);
2940 	}
2941 }
2942 
2943 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2944 {
2945 	unsigned int i;
2946 	struct bcmgenet_rx_ring *ring;
2947 
2948 	for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
2949 		ring = &priv->rx_rings[i];
2950 		napi_disable(&ring->napi);
2951 		cancel_work_sync(&ring->dim.dim.work);
2952 	}
2953 }
2954 
2955 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2956 {
2957 	unsigned int i;
2958 	struct bcmgenet_rx_ring *ring;
2959 
2960 	for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
2961 		ring = &priv->rx_rings[i];
2962 		netif_napi_del(&ring->napi);
2963 	}
2964 }
2965 
2966 /* Initialize Rx queues
2967  *
2968  * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2969  * used to direct traffic to these queues.
2970  *
2971  * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors.
2972  */
2973 static int bcmgenet_init_rx_queues(struct net_device *dev)
2974 {
2975 	struct bcmgenet_priv *priv = netdev_priv(dev);
2976 	unsigned int start = 0, end = GENET_Q0_RX_BD_CNT;
2977 	u32 i, ring_mask;
2978 	int ret;
2979 
2980 	/* Initialize Rx priority queues */
2981 	for (i = 0; i <= priv->hw_params->rx_queues; i++) {
2982 		ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end);
2983 		if (ret)
2984 			return ret;
2985 
2986 		start = end;
2987 		end += priv->hw_params->rx_bds_per_q;
2988 	}
2989 
2990 	/* Configure Rx queues as descriptor rings */
2991 	ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
2992 	bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG);
2993 
2994 	/* Enable Rx rings */
2995 	ring_mask <<= DMA_RING_BUF_EN_SHIFT;
2996 	bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL);
2997 
2998 	return 0;
2999 }
3000 
3001 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
3002 {
3003 	int ret = 0;
3004 
3005 	/* Disable TDMA to stop add more frames in TX DMA */
3006 	if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) {
3007 		netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
3008 		ret = -ETIMEDOUT;
3009 	}
3010 
3011 	/* Wait 10ms for packet drain in both tx and rx dma */
3012 	usleep_range(10000, 20000);
3013 
3014 	/* Disable RDMA */
3015 	if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) {
3016 		netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
3017 		ret = -ETIMEDOUT;
3018 	}
3019 
3020 	return ret;
3021 }
3022 
3023 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
3024 {
3025 	struct netdev_queue *txq;
3026 	int i;
3027 
3028 	bcmgenet_fini_rx_napi(priv);
3029 	bcmgenet_fini_tx_napi(priv);
3030 
3031 	for (i = 0; i <= priv->hw_params->tx_queues; i++) {
3032 		txq = netdev_get_tx_queue(priv->dev, i);
3033 		netdev_tx_reset_queue(txq);
3034 	}
3035 
3036 	bcmgenet_free_rx_buffers(priv);
3037 	kfree(priv->rx_cbs);
3038 	kfree(priv->tx_cbs);
3039 }
3040 
3041 /* init_edma: Initialize DMA control register */
3042 static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx)
3043 {
3044 	struct enet_cb *cb;
3045 	unsigned int i;
3046 	int ret;
3047 	u32 reg;
3048 
3049 	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
3050 
3051 	/* Disable TX DMA */
3052 	ret = bcmgenet_tdma_disable(priv);
3053 	if (ret) {
3054 		netdev_err(priv->dev, "failed to halt Tx DMA\n");
3055 		return ret;
3056 	}
3057 
3058 	/* Disable RX DMA */
3059 	ret = bcmgenet_rdma_disable(priv);
3060 	if (ret) {
3061 		netdev_err(priv->dev, "failed to halt Rx DMA\n");
3062 		return ret;
3063 	}
3064 
3065 	/* Flush TX queues */
3066 	bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
3067 	udelay(10);
3068 	bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
3069 
3070 	if (flush_rx) {
3071 		reg = bcmgenet_rbuf_ctrl_get(priv);
3072 		bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
3073 		udelay(10);
3074 		bcmgenet_rbuf_ctrl_set(priv, reg);
3075 		udelay(10);
3076 	}
3077 
3078 	/* Initialize common Rx ring structures */
3079 	priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
3080 	priv->num_rx_bds = TOTAL_DESC;
3081 	priv->rx_cbs = kzalloc_objs(struct enet_cb, priv->num_rx_bds);
3082 	if (!priv->rx_cbs)
3083 		return -ENOMEM;
3084 
3085 	for (i = 0; i < priv->num_rx_bds; i++) {
3086 		cb = priv->rx_cbs + i;
3087 		cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
3088 	}
3089 
3090 	/* Initialize common TX ring structures */
3091 	priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
3092 	priv->num_tx_bds = TOTAL_DESC;
3093 	priv->tx_cbs = kzalloc_objs(struct enet_cb, priv->num_tx_bds);
3094 	if (!priv->tx_cbs) {
3095 		kfree(priv->rx_cbs);
3096 		return -ENOMEM;
3097 	}
3098 
3099 	for (i = 0; i < priv->num_tx_bds; i++) {
3100 		cb = priv->tx_cbs + i;
3101 		cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
3102 	}
3103 
3104 	/* Init rDma */
3105 	bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
3106 			     DMA_SCB_BURST_SIZE);
3107 
3108 	/* Initialize Rx queues */
3109 	ret = bcmgenet_init_rx_queues(priv->dev);
3110 	if (ret) {
3111 		netdev_err(priv->dev, "failed to initialize Rx queues\n");
3112 		bcmgenet_free_rx_buffers(priv);
3113 		kfree(priv->rx_cbs);
3114 		kfree(priv->tx_cbs);
3115 		return ret;
3116 	}
3117 
3118 	/* Init tDma */
3119 	bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
3120 			     DMA_SCB_BURST_SIZE);
3121 
3122 	/* Initialize Tx queues */
3123 	bcmgenet_init_tx_queues(priv->dev);
3124 
3125 	/* Enable RX/TX DMA */
3126 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3127 	reg |= DMA_EN;
3128 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3129 
3130 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3131 	reg |= DMA_EN;
3132 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3133 
3134 	return 0;
3135 }
3136 
3137 /* Interrupt bottom half */
3138 static void bcmgenet_irq_task(struct work_struct *work)
3139 {
3140 	unsigned int status;
3141 	struct bcmgenet_priv *priv = container_of(
3142 			work, struct bcmgenet_priv, bcmgenet_irq_work);
3143 
3144 	netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
3145 
3146 	spin_lock_irq(&priv->lock);
3147 	status = priv->irq0_stat;
3148 	priv->irq0_stat = 0;
3149 	spin_unlock_irq(&priv->lock);
3150 
3151 	if (status & UMAC_IRQ_PHY_DET_R &&
3152 	    priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
3153 		phy_init_hw(priv->dev->phydev);
3154 		genphy_config_aneg(priv->dev->phydev);
3155 	}
3156 
3157 	/* Link UP/DOWN event */
3158 	if (status & UMAC_IRQ_LINK_EVENT)
3159 		phy_mac_interrupt(priv->dev->phydev);
3160 
3161 }
3162 
3163 /* bcmgenet_isr1: handle Rx and Tx queues */
3164 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
3165 {
3166 	struct bcmgenet_priv *priv = dev_id;
3167 	struct bcmgenet_rx_ring *rx_ring;
3168 	struct bcmgenet_tx_ring *tx_ring;
3169 	unsigned int index, status;
3170 
3171 	/* Read irq status */
3172 	status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
3173 		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3174 
3175 	/* clear interrupts */
3176 	bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
3177 
3178 	netif_dbg(priv, intr, priv->dev,
3179 		  "%s: IRQ=0x%x\n", __func__, status);
3180 
3181 	/* Check Rx priority queue interrupts */
3182 	for (index = 0; index <= priv->hw_params->rx_queues; index++) {
3183 		if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
3184 			continue;
3185 
3186 		rx_ring = &priv->rx_rings[index];
3187 		rx_ring->dim.event_ctr++;
3188 
3189 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
3190 			bcmgenet_rx_ring_int_disable(rx_ring);
3191 			__napi_schedule_irqoff(&rx_ring->napi);
3192 		}
3193 	}
3194 
3195 	/* Check Tx priority queue interrupts */
3196 	for (index = 0; index <= priv->hw_params->tx_queues; index++) {
3197 		if (!(status & BIT(index)))
3198 			continue;
3199 
3200 		tx_ring = &priv->tx_rings[index];
3201 
3202 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
3203 			bcmgenet_tx_ring_int_disable(tx_ring);
3204 			__napi_schedule_irqoff(&tx_ring->napi);
3205 		}
3206 	}
3207 
3208 	return IRQ_HANDLED;
3209 }
3210 
3211 /* bcmgenet_isr0: handle other stuff */
3212 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
3213 {
3214 	struct bcmgenet_priv *priv = dev_id;
3215 	unsigned int status;
3216 	unsigned long flags;
3217 
3218 	/* Read irq status */
3219 	status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
3220 		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3221 
3222 	/* clear interrupts */
3223 	bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
3224 
3225 	netif_dbg(priv, intr, priv->dev,
3226 		  "IRQ=0x%x\n", status);
3227 
3228 	if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT)
3229 		wake_up(&priv->wq);
3230 
3231 	/* all other interested interrupts handled in bottom half */
3232 	status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
3233 	if (status) {
3234 		/* Save irq status for bottom-half processing. */
3235 		spin_lock_irqsave(&priv->lock, flags);
3236 		priv->irq0_stat |= status;
3237 		spin_unlock_irqrestore(&priv->lock, flags);
3238 
3239 		schedule_work(&priv->bcmgenet_irq_work);
3240 	}
3241 
3242 	return IRQ_HANDLED;
3243 }
3244 
3245 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
3246 {
3247 	/* Acknowledge the interrupt */
3248 	return IRQ_HANDLED;
3249 }
3250 
3251 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
3252 {
3253 	u32 reg;
3254 
3255 	reg = bcmgenet_rbuf_ctrl_get(priv);
3256 	reg |= BIT(1);
3257 	bcmgenet_rbuf_ctrl_set(priv, reg);
3258 	udelay(10);
3259 
3260 	reg &= ~BIT(1);
3261 	bcmgenet_rbuf_ctrl_set(priv, reg);
3262 	udelay(10);
3263 }
3264 
3265 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
3266 				 const unsigned char *addr)
3267 {
3268 	bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
3269 	bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
3270 }
3271 
3272 static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
3273 				 unsigned char *addr)
3274 {
3275 	u32 addr_tmp;
3276 
3277 	addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
3278 	put_unaligned_be32(addr_tmp, &addr[0]);
3279 	addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
3280 	put_unaligned_be16(addr_tmp, &addr[4]);
3281 }
3282 
3283 static void bcmgenet_netif_start(struct net_device *dev)
3284 {
3285 	struct bcmgenet_priv *priv = netdev_priv(dev);
3286 
3287 	/* Start the network engine */
3288 	netif_addr_lock_bh(dev);
3289 	bcmgenet_set_rx_mode(dev);
3290 	netif_addr_unlock_bh(dev);
3291 	bcmgenet_enable_rx_napi(priv);
3292 
3293 	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
3294 
3295 	bcmgenet_enable_tx_napi(priv);
3296 
3297 	/* Monitor link interrupts now */
3298 	bcmgenet_link_intr_enable(priv);
3299 
3300 	phy_start(dev->phydev);
3301 }
3302 
3303 static int bcmgenet_open(struct net_device *dev)
3304 {
3305 	struct bcmgenet_priv *priv = netdev_priv(dev);
3306 	int ret;
3307 
3308 	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
3309 
3310 	/* Turn on the clock */
3311 	clk_prepare_enable(priv->clk);
3312 
3313 	/* If this is an internal GPHY, power it back on now, before UniMAC is
3314 	 * brought out of reset as absolutely no UniMAC activity is allowed
3315 	 */
3316 	if (priv->internal_phy)
3317 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3318 
3319 	/* take MAC out of reset */
3320 	bcmgenet_umac_reset(priv);
3321 
3322 	init_umac(priv);
3323 
3324 	/* Apply features again in case we changed them while interface was
3325 	 * down
3326 	 */
3327 	bcmgenet_set_features(dev, dev->features);
3328 
3329 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
3330 
3331 	/* HFB init */
3332 	bcmgenet_hfb_init(priv);
3333 
3334 	/* Reinitialize TDMA and RDMA and SW housekeeping */
3335 	ret = bcmgenet_init_dma(priv, true);
3336 	if (ret) {
3337 		netdev_err(dev, "failed to initialize DMA\n");
3338 		goto err_clk_disable;
3339 	}
3340 
3341 	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
3342 			  dev->name, priv);
3343 	if (ret < 0) {
3344 		netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
3345 		goto err_fini_dma;
3346 	}
3347 
3348 	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
3349 			  dev->name, priv);
3350 	if (ret < 0) {
3351 		netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
3352 		goto err_irq0;
3353 	}
3354 
3355 	ret = bcmgenet_mii_probe(dev);
3356 	if (ret) {
3357 		netdev_err(dev, "failed to connect to PHY\n");
3358 		goto err_irq1;
3359 	}
3360 
3361 	bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
3362 
3363 	bcmgenet_netif_start(dev);
3364 
3365 	netif_tx_start_all_queues(dev);
3366 
3367 	return 0;
3368 
3369 err_irq1:
3370 	free_irq(priv->irq1, priv);
3371 err_irq0:
3372 	free_irq(priv->irq0, priv);
3373 err_fini_dma:
3374 	bcmgenet_dma_teardown(priv);
3375 	bcmgenet_fini_dma(priv);
3376 err_clk_disable:
3377 	if (priv->internal_phy)
3378 		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3379 	clk_disable_unprepare(priv->clk);
3380 	return ret;
3381 }
3382 
3383 static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
3384 {
3385 	struct bcmgenet_priv *priv = netdev_priv(dev);
3386 
3387 	netif_tx_disable(dev);
3388 
3389 	/* Disable MAC receive */
3390 	bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
3391 	umac_enable_set(priv, CMD_RX_EN, false);
3392 
3393 	if (stop_phy)
3394 		phy_stop(dev->phydev);
3395 
3396 	bcmgenet_dma_teardown(priv);
3397 
3398 	/* Disable MAC transmit. TX DMA disabled must be done before this */
3399 	umac_enable_set(priv, CMD_TX_EN, false);
3400 
3401 	bcmgenet_disable_tx_napi(priv);
3402 	bcmgenet_disable_rx_napi(priv);
3403 	bcmgenet_intr_disable(priv);
3404 
3405 	/* Wait for pending work items to complete. Since interrupts are
3406 	 * disabled no new work will be scheduled.
3407 	 */
3408 	cancel_work_sync(&priv->bcmgenet_irq_work);
3409 
3410 	/* tx reclaim */
3411 	bcmgenet_tx_reclaim_all(dev);
3412 	bcmgenet_fini_dma(priv);
3413 }
3414 
3415 static int bcmgenet_close(struct net_device *dev)
3416 {
3417 	struct bcmgenet_priv *priv = netdev_priv(dev);
3418 	int ret = 0;
3419 
3420 	netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
3421 
3422 	bcmgenet_netif_stop(dev, false);
3423 
3424 	/* Really kill the PHY state machine and disconnect from it */
3425 	phy_disconnect(dev->phydev);
3426 
3427 	free_irq(priv->irq0, priv);
3428 	free_irq(priv->irq1, priv);
3429 
3430 	if (priv->internal_phy)
3431 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3432 
3433 	clk_disable_unprepare(priv->clk);
3434 
3435 	return ret;
3436 }
3437 
3438 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
3439 {
3440 	struct bcmgenet_priv *priv = ring->priv;
3441 	u32 p_index, c_index, intsts, intmsk;
3442 	struct netdev_queue *txq;
3443 	unsigned int free_bds;
3444 	bool txq_stopped;
3445 
3446 	if (!netif_msg_tx_err(priv))
3447 		return;
3448 
3449 	txq = netdev_get_tx_queue(priv->dev, ring->index);
3450 
3451 	spin_lock(&ring->lock);
3452 	intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3453 	intmsk = 1 << ring->index;
3454 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
3455 	p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
3456 	txq_stopped = netif_tx_queue_stopped(txq);
3457 	free_bds = ring->free_bds;
3458 	spin_unlock(&ring->lock);
3459 
3460 	netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
3461 		  "TX queue status: %s, interrupts: %s\n"
3462 		  "(sw)free_bds: %d (sw)size: %d\n"
3463 		  "(sw)p_index: %d (hw)p_index: %d\n"
3464 		  "(sw)c_index: %d (hw)c_index: %d\n"
3465 		  "(sw)clean_p: %d (sw)write_p: %d\n"
3466 		  "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3467 		  ring->index, ring->index,
3468 		  txq_stopped ? "stopped" : "active",
3469 		  intsts & intmsk ? "enabled" : "disabled",
3470 		  free_bds, ring->size,
3471 		  ring->prod_index, p_index & DMA_P_INDEX_MASK,
3472 		  ring->c_index, c_index & DMA_C_INDEX_MASK,
3473 		  ring->clean_ptr, ring->write_ptr,
3474 		  ring->cb_ptr, ring->end_ptr);
3475 }
3476 
3477 static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
3478 {
3479 	struct bcmgenet_priv *priv = netdev_priv(dev);
3480 	struct bcmgenet_tx_ring *ring = &priv->tx_rings[txqueue];
3481 	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
3482 
3483 	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3484 
3485 	bcmgenet_dump_tx_queue(ring);
3486 
3487 	bcmgenet_tx_reclaim(dev, ring, true);
3488 
3489 	/* Re-enable the TX interrupt for this ring */
3490 	bcmgenet_intrl2_1_writel(priv, 1 << txqueue, INTRL2_CPU_MASK_CLEAR);
3491 
3492 	txq_trans_cond_update(txq);
3493 
3494 	BCMGENET_STATS64_INC((&ring->stats64), errors);
3495 
3496 	netif_tx_wake_queue(txq);
3497 }
3498 
3499 #define MAX_MDF_FILTER	17
3500 
3501 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3502 					 const unsigned char *addr,
3503 					 int *i)
3504 {
3505 	bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3506 			     UMAC_MDF_ADDR + (*i * 4));
3507 	bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3508 			     addr[4] << 8 | addr[5],
3509 			     UMAC_MDF_ADDR + ((*i + 1) * 4));
3510 	*i += 2;
3511 }
3512 
3513 static void bcmgenet_set_rx_mode(struct net_device *dev)
3514 {
3515 	struct bcmgenet_priv *priv = netdev_priv(dev);
3516 	struct netdev_hw_addr *ha;
3517 	int i, nfilter;
3518 	u32 reg;
3519 
3520 	netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3521 
3522 	/* Number of filters needed */
3523 	nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3524 
3525 	/*
3526 	 * Turn on promicuous mode for three scenarios
3527 	 * 1. IFF_PROMISC flag is set
3528 	 * 2. IFF_ALLMULTI flag is set
3529 	 * 3. The number of filters needed exceeds the number filters
3530 	 *    supported by the hardware.
3531 	*/
3532 	spin_lock(&priv->reg_lock);
3533 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3534 	if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3535 	    (nfilter > MAX_MDF_FILTER)) {
3536 		reg |= CMD_PROMISC;
3537 		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3538 		spin_unlock(&priv->reg_lock);
3539 		bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3540 		return;
3541 	} else {
3542 		reg &= ~CMD_PROMISC;
3543 		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3544 		spin_unlock(&priv->reg_lock);
3545 	}
3546 
3547 	/* update MDF filter */
3548 	i = 0;
3549 	/* Broadcast */
3550 	bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3551 	/* my own address.*/
3552 	bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3553 
3554 	/* Unicast */
3555 	netdev_for_each_uc_addr(ha, dev)
3556 		bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3557 
3558 	/* Multicast */
3559 	netdev_for_each_mc_addr(ha, dev)
3560 		bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3561 
3562 	/* Enable filters */
3563 	reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3564 	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3565 }
3566 
3567 /* Set the hardware MAC address. */
3568 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3569 {
3570 	struct sockaddr *addr = p;
3571 
3572 	/* Setting the MAC address at the hardware level is not possible
3573 	 * without disabling the UniMAC RX/TX enable bits.
3574 	 */
3575 	if (netif_running(dev))
3576 		return -EBUSY;
3577 
3578 	eth_hw_addr_set(dev, addr->sa_data);
3579 
3580 	return 0;
3581 }
3582 
3583 static void bcmgenet_get_stats64(struct net_device *dev,
3584 				 struct rtnl_link_stats64 *stats)
3585 {
3586 	struct bcmgenet_priv *priv = netdev_priv(dev);
3587 	struct bcmgenet_tx_stats64 *tx_stats;
3588 	struct bcmgenet_rx_stats64 *rx_stats;
3589 	u64 rx_length_errors, rx_over_errors;
3590 	u64 rx_missed, rx_fragmented_errors;
3591 	u64 rx_crc_errors, rx_frame_errors;
3592 	u64 tx_errors, tx_dropped;
3593 	u64 rx_errors, rx_dropped;
3594 	u64 tx_bytes, tx_packets;
3595 	u64 rx_bytes, rx_packets;
3596 	unsigned int start;
3597 	unsigned int q;
3598 	u64 multicast;
3599 
3600 	for (q = 0; q <= priv->hw_params->tx_queues; q++) {
3601 		tx_stats = &priv->tx_rings[q].stats64;
3602 		do {
3603 			start = u64_stats_fetch_begin(&tx_stats->syncp);
3604 			tx_bytes = u64_stats_read(&tx_stats->bytes);
3605 			tx_packets = u64_stats_read(&tx_stats->packets);
3606 			tx_errors = u64_stats_read(&tx_stats->errors);
3607 			tx_dropped = u64_stats_read(&tx_stats->dropped);
3608 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
3609 
3610 		stats->tx_bytes += tx_bytes;
3611 		stats->tx_packets += tx_packets;
3612 		stats->tx_errors += tx_errors;
3613 		stats->tx_dropped += tx_dropped;
3614 	}
3615 
3616 	for (q = 0; q <= priv->hw_params->rx_queues; q++) {
3617 		rx_stats = &priv->rx_rings[q].stats64;
3618 		do {
3619 			start = u64_stats_fetch_begin(&rx_stats->syncp);
3620 			rx_bytes = u64_stats_read(&rx_stats->bytes);
3621 			rx_packets = u64_stats_read(&rx_stats->packets);
3622 			rx_errors = u64_stats_read(&rx_stats->errors);
3623 			rx_dropped = u64_stats_read(&rx_stats->dropped);
3624 			rx_missed = u64_stats_read(&rx_stats->missed);
3625 			rx_length_errors = u64_stats_read(&rx_stats->length_errors);
3626 			rx_over_errors = u64_stats_read(&rx_stats->over_errors);
3627 			rx_crc_errors = u64_stats_read(&rx_stats->crc_errors);
3628 			rx_frame_errors = u64_stats_read(&rx_stats->frame_errors);
3629 			rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors);
3630 			multicast = u64_stats_read(&rx_stats->multicast);
3631 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
3632 
3633 		rx_errors += rx_length_errors;
3634 		rx_errors += rx_crc_errors;
3635 		rx_errors += rx_frame_errors;
3636 		rx_errors += rx_fragmented_errors;
3637 
3638 		stats->rx_bytes += rx_bytes;
3639 		stats->rx_packets += rx_packets;
3640 		stats->rx_errors += rx_errors;
3641 		stats->rx_dropped += rx_dropped;
3642 		stats->rx_missed_errors += rx_missed;
3643 		stats->rx_length_errors += rx_length_errors;
3644 		stats->rx_over_errors += rx_over_errors;
3645 		stats->rx_crc_errors += rx_crc_errors;
3646 		stats->rx_frame_errors += rx_frame_errors;
3647 		stats->multicast += multicast;
3648 	}
3649 }
3650 
3651 static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
3652 {
3653 	struct bcmgenet_priv *priv = netdev_priv(dev);
3654 
3655 	if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
3656 	    priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
3657 		return -EOPNOTSUPP;
3658 
3659 	if (new_carrier)
3660 		netif_carrier_on(dev);
3661 	else
3662 		netif_carrier_off(dev);
3663 
3664 	return 0;
3665 }
3666 
3667 static const struct net_device_ops bcmgenet_netdev_ops = {
3668 	.ndo_open		= bcmgenet_open,
3669 	.ndo_stop		= bcmgenet_close,
3670 	.ndo_start_xmit		= bcmgenet_xmit,
3671 	.ndo_tx_timeout		= bcmgenet_timeout,
3672 	.ndo_set_rx_mode	= bcmgenet_set_rx_mode,
3673 	.ndo_set_mac_address	= bcmgenet_set_mac_addr,
3674 	.ndo_eth_ioctl		= phy_do_ioctl_running,
3675 	.ndo_set_features	= bcmgenet_set_features,
3676 	.ndo_get_stats64	= bcmgenet_get_stats64,
3677 	.ndo_change_carrier	= bcmgenet_change_carrier,
3678 };
3679 
3680 /* GENET hardware parameters/characteristics */
3681 static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = {
3682 	.tx_queues = 0,
3683 	.tx_bds_per_q = 0,
3684 	.rx_queues = 0,
3685 	.rx_bds_per_q = 0,
3686 	.bp_in_en_shift = 16,
3687 	.bp_in_mask = 0xffff,
3688 	.hfb_filter_cnt = 16,
3689 	.hfb_filter_size = 64,
3690 	.qtag_mask = 0x1F,
3691 	.hfb_offset = 0x1000,
3692 	.hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1,
3693 	.rdma_offset = 0x2000,
3694 	.tdma_offset = 0x3000,
3695 	.words_per_bd = 2,
3696 };
3697 
3698 static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = {
3699 	.tx_queues = 4,
3700 	.tx_bds_per_q = 32,
3701 	.rx_queues = 0,
3702 	.rx_bds_per_q = 0,
3703 	.bp_in_en_shift = 16,
3704 	.bp_in_mask = 0xffff,
3705 	.hfb_filter_cnt = 16,
3706 	.hfb_filter_size = 64,
3707 	.qtag_mask = 0x1F,
3708 	.tbuf_offset = 0x0600,
3709 	.hfb_offset = 0x1000,
3710 	.hfb_reg_offset = 0x2000,
3711 	.rdma_offset = 0x3000,
3712 	.tdma_offset = 0x4000,
3713 	.words_per_bd = 2,
3714 };
3715 
3716 static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = {
3717 	.tx_queues = 4,
3718 	.tx_bds_per_q = 32,
3719 	.rx_queues = 0,
3720 	.rx_bds_per_q = 0,
3721 	.bp_in_en_shift = 17,
3722 	.bp_in_mask = 0x1ffff,
3723 	.hfb_filter_cnt = 48,
3724 	.hfb_filter_size = 128,
3725 	.qtag_mask = 0x3F,
3726 	.tbuf_offset = 0x0600,
3727 	.hfb_offset = 0x8000,
3728 	.hfb_reg_offset = 0xfc00,
3729 	.rdma_offset = 0x10000,
3730 	.tdma_offset = 0x11000,
3731 	.words_per_bd = 2,
3732 };
3733 
3734 static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = {
3735 	.tx_queues = 4,
3736 	.tx_bds_per_q = 32,
3737 	.rx_queues = 0,
3738 	.rx_bds_per_q = 0,
3739 	.bp_in_en_shift = 17,
3740 	.bp_in_mask = 0x1ffff,
3741 	.hfb_filter_cnt = 48,
3742 	.hfb_filter_size = 128,
3743 	.qtag_mask = 0x3F,
3744 	.tbuf_offset = 0x0600,
3745 	.hfb_offset = 0x8000,
3746 	.hfb_reg_offset = 0xfc00,
3747 	.rdma_offset = 0x2000,
3748 	.tdma_offset = 0x4000,
3749 	.words_per_bd = 3,
3750 };
3751 
3752 /* Infer hardware parameters from the detected GENET version */
3753 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3754 {
3755 	const struct bcmgenet_hw_params *params;
3756 	u32 reg;
3757 	u8 major;
3758 	u16 gphy_rev;
3759 
3760 	/* default to latest values */
3761 	params = &bcmgenet_hw_params_v4;
3762 	bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3763 	genet_dma_ring_regs = genet_dma_ring_regs_v4;
3764 	if (GENET_IS_V3(priv)) {
3765 		params = &bcmgenet_hw_params_v3;
3766 		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3767 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3768 	} else if (GENET_IS_V2(priv)) {
3769 		params = &bcmgenet_hw_params_v2;
3770 		bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3771 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3772 	} else if (GENET_IS_V1(priv)) {
3773 		params = &bcmgenet_hw_params_v1;
3774 		bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3775 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3776 	}
3777 	priv->hw_params = params;
3778 
3779 	/* Read GENET HW version */
3780 	reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3781 	major = (reg >> 24 & 0x0f);
3782 	if (major == 6 || major == 7)
3783 		major = 5;
3784 	else if (major == 5)
3785 		major = 4;
3786 	else if (major == 0)
3787 		major = 1;
3788 	if (major != priv->version) {
3789 		dev_err(&priv->pdev->dev,
3790 			"GENET version mismatch, got: %d, configured for: %d\n",
3791 			major, priv->version);
3792 	}
3793 
3794 	/* Print the GENET core version */
3795 	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3796 		 major, (reg >> 16) & 0x0f, reg & 0xffff);
3797 
3798 	/* Store the integrated PHY revision for the MDIO probing function
3799 	 * to pass this information to the PHY driver. The PHY driver expects
3800 	 * to find the PHY major revision in bits 15:8 while the GENET register
3801 	 * stores that information in bits 7:0, account for that.
3802 	 *
3803 	 * On newer chips, starting with PHY revision G0, a new scheme is
3804 	 * deployed similar to the Starfighter 2 switch with GPHY major
3805 	 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3806 	 * is reserved as well as special value 0x01ff, we have a small
3807 	 * heuristic to check for the new GPHY revision and re-arrange things
3808 	 * so the GPHY driver is happy.
3809 	 */
3810 	gphy_rev = reg & 0xffff;
3811 
3812 	if (GENET_IS_V5(priv)) {
3813 		/* The EPHY revision should come from the MDIO registers of
3814 		 * the PHY not from GENET.
3815 		 */
3816 		if (gphy_rev != 0) {
3817 			pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3818 				gphy_rev);
3819 		}
3820 	/* This is reserved so should require special treatment */
3821 	} else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3822 		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3823 		return;
3824 	/* This is the good old scheme, just GPHY major, no minor nor patch */
3825 	} else if ((gphy_rev & 0xf0) != 0) {
3826 		priv->gphy_rev = gphy_rev << 8;
3827 	/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3828 	} else if ((gphy_rev & 0xff00) != 0) {
3829 		priv->gphy_rev = gphy_rev;
3830 	}
3831 
3832 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3833 	if (!bcmgenet_has_40bits(priv))
3834 		pr_warn("GENET does not support 40-bits PA\n");
3835 #endif
3836 
3837 	pr_debug("Configuration for version: %d\n"
3838 		"TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3839 		"BP << en: %2d, BP msk: 0x%05x\n"
3840 		"HFB count: %2d, QTAQ msk: 0x%05x\n"
3841 		"TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3842 		"RDMA: 0x%05x, TDMA: 0x%05x\n"
3843 		"Words/BD: %d\n",
3844 		priv->version,
3845 		params->tx_queues, params->tx_bds_per_q,
3846 		params->rx_queues, params->rx_bds_per_q,
3847 		params->bp_in_en_shift, params->bp_in_mask,
3848 		params->hfb_filter_cnt, params->qtag_mask,
3849 		params->tbuf_offset, params->hfb_offset,
3850 		params->hfb_reg_offset,
3851 		params->rdma_offset, params->tdma_offset,
3852 		params->words_per_bd);
3853 }
3854 
3855 struct bcmgenet_plat_data {
3856 	enum bcmgenet_version version;
3857 	u32 dma_max_burst_length;
3858 	u32 flags;
3859 };
3860 
3861 static const struct bcmgenet_plat_data v1_plat_data = {
3862 	.version = GENET_V1,
3863 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3864 };
3865 
3866 static const struct bcmgenet_plat_data v2_plat_data = {
3867 	.version = GENET_V2,
3868 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3869 	.flags = GENET_HAS_EXT,
3870 };
3871 
3872 static const struct bcmgenet_plat_data v3_plat_data = {
3873 	.version = GENET_V3,
3874 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3875 	.flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3876 		 GENET_HAS_MOCA_LINK_DET,
3877 };
3878 
3879 static const struct bcmgenet_plat_data v4_plat_data = {
3880 	.version = GENET_V4,
3881 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3882 	.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3883 		 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3884 };
3885 
3886 static const struct bcmgenet_plat_data v5_plat_data = {
3887 	.version = GENET_V5,
3888 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3889 	.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3890 		 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3891 };
3892 
3893 static const struct bcmgenet_plat_data bcm2711_plat_data = {
3894 	.version = GENET_V5,
3895 	.dma_max_burst_length = 0x08,
3896 	.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3897 		 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3898 };
3899 
3900 static const struct bcmgenet_plat_data bcm7712_plat_data = {
3901 	.version = GENET_V5,
3902 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3903 	.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3904 		 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET |
3905 		 GENET_HAS_EPHY_16NM,
3906 };
3907 
3908 static const struct of_device_id bcmgenet_match[] = {
3909 	{ .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3910 	{ .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3911 	{ .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3912 	{ .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3913 	{ .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3914 	{ .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3915 	{ .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
3916 	{ },
3917 };
3918 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3919 
3920 static int bcmgenet_probe(struct platform_device *pdev)
3921 {
3922 	const struct bcmgenet_plat_data *pdata;
3923 	struct bcmgenet_priv *priv;
3924 	struct net_device *dev;
3925 	unsigned int i;
3926 	int err = -EIO;
3927 
3928 	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3929 	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3930 				 GENET_MAX_MQ_CNT + 1);
3931 	if (!dev) {
3932 		dev_err(&pdev->dev, "can't allocate net device\n");
3933 		return -ENOMEM;
3934 	}
3935 
3936 	priv = netdev_priv(dev);
3937 	priv->irq0 = platform_get_irq(pdev, 0);
3938 	if (priv->irq0 < 0) {
3939 		err = priv->irq0;
3940 		goto err;
3941 	}
3942 	priv->irq1 = platform_get_irq(pdev, 1);
3943 	if (priv->irq1 < 0) {
3944 		err = priv->irq1;
3945 		goto err;
3946 	}
3947 	priv->wol_irq = platform_get_irq_optional(pdev, 2);
3948 	if (priv->wol_irq == -EPROBE_DEFER) {
3949 		err = priv->wol_irq;
3950 		goto err;
3951 	}
3952 
3953 	priv->base = devm_platform_ioremap_resource(pdev, 0);
3954 	if (IS_ERR(priv->base)) {
3955 		err = PTR_ERR(priv->base);
3956 		goto err;
3957 	}
3958 
3959 	spin_lock_init(&priv->reg_lock);
3960 	spin_lock_init(&priv->lock);
3961 
3962 	/* Set default pause parameters */
3963 	priv->autoneg_pause = 1;
3964 	priv->tx_pause = 1;
3965 	priv->rx_pause = 1;
3966 
3967 	SET_NETDEV_DEV(dev, &pdev->dev);
3968 	dev_set_drvdata(&pdev->dev, dev);
3969 	dev->watchdog_timeo = 2 * HZ;
3970 	dev->ethtool_ops = &bcmgenet_ethtool_ops;
3971 	dev->netdev_ops = &bcmgenet_netdev_ops;
3972 
3973 	priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3974 
3975 	/* Set default features */
3976 	dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
3977 			 NETIF_F_RXCSUM;
3978 	dev->hw_features |= dev->features;
3979 	dev->vlan_features |= dev->features;
3980 
3981 	netdev_sw_irq_coalesce_default_on(dev);
3982 
3983 	/* Request the WOL interrupt and advertise suspend if available */
3984 	priv->wol_irq_disabled = true;
3985 	if (priv->wol_irq > 0) {
3986 		err = devm_request_irq(&pdev->dev, priv->wol_irq,
3987 				       bcmgenet_wol_isr, 0, dev->name, priv);
3988 		if (!err)
3989 			device_set_wakeup_capable(&pdev->dev, 1);
3990 	}
3991 
3992 	/* Set the needed headroom to account for any possible
3993 	 * features enabling/disabling at runtime
3994 	 */
3995 	dev->needed_headroom += 64;
3996 
3997 	priv->dev = dev;
3998 	priv->pdev = pdev;
3999 
4000 	pdata = device_get_match_data(&pdev->dev);
4001 	if (pdata) {
4002 		priv->version = pdata->version;
4003 		priv->dma_max_burst_length = pdata->dma_max_burst_length;
4004 		priv->flags = pdata->flags;
4005 	}
4006 
4007 	priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
4008 	if (IS_ERR(priv->clk)) {
4009 		dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
4010 		err = PTR_ERR(priv->clk);
4011 		goto err;
4012 	}
4013 
4014 	err = clk_prepare_enable(priv->clk);
4015 	if (err)
4016 		goto err;
4017 
4018 	bcmgenet_set_hw_params(priv);
4019 
4020 	err = -EIO;
4021 	if (bcmgenet_has_40bits(priv))
4022 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
4023 	if (err)
4024 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4025 	if (err)
4026 		goto err_clk_disable;
4027 
4028 	/* Mii wait queue */
4029 	init_waitqueue_head(&priv->wq);
4030 	/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
4031 	priv->rx_buf_len = RX_BUF_LENGTH;
4032 	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
4033 
4034 	priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
4035 	if (IS_ERR(priv->clk_wol)) {
4036 		dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
4037 		err = PTR_ERR(priv->clk_wol);
4038 		goto err_clk_disable;
4039 	}
4040 
4041 	priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
4042 	if (IS_ERR(priv->clk_eee)) {
4043 		dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
4044 		err = PTR_ERR(priv->clk_eee);
4045 		goto err_clk_disable;
4046 	}
4047 
4048 	/* If this is an internal GPHY, power it on now, before UniMAC is
4049 	 * brought out of reset as absolutely no UniMAC activity is allowed
4050 	 */
4051 	if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
4052 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4053 
4054 	if (device_get_ethdev_address(&pdev->dev, dev))
4055 		if (has_acpi_companion(&pdev->dev)) {
4056 			u8 addr[ETH_ALEN];
4057 
4058 			bcmgenet_get_hw_addr(priv, addr);
4059 			eth_hw_addr_set(dev, addr);
4060 		}
4061 
4062 	if (!is_valid_ether_addr(dev->dev_addr)) {
4063 		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
4064 		eth_hw_addr_random(dev);
4065 	}
4066 
4067 	reset_umac(priv);
4068 
4069 	err = bcmgenet_mii_init(dev);
4070 	if (err)
4071 		goto err_clk_disable;
4072 
4073 	/* setup number of real queues + 1 */
4074 	netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
4075 	netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
4076 
4077 	/* Set default coalescing parameters */
4078 	for (i = 0; i <= priv->hw_params->rx_queues; i++)
4079 		priv->rx_rings[i].rx_max_coalesced_frames = 1;
4080 
4081 	/* Initialize u64 stats seq counter for 32bit machines */
4082 	for (i = 0; i <= priv->hw_params->rx_queues; i++)
4083 		u64_stats_init(&priv->rx_rings[i].stats64.syncp);
4084 	for (i = 0; i <= priv->hw_params->tx_queues; i++)
4085 		u64_stats_init(&priv->tx_rings[i].stats64.syncp);
4086 
4087 	/* libphy will determine the link state */
4088 	netif_carrier_off(dev);
4089 
4090 	/* Turn off the main clock, WOL clock is handled separately */
4091 	clk_disable_unprepare(priv->clk);
4092 
4093 	err = register_netdev(dev);
4094 	if (err) {
4095 		bcmgenet_mii_exit(dev);
4096 		goto err;
4097 	}
4098 
4099 	return err;
4100 
4101 err_clk_disable:
4102 	clk_disable_unprepare(priv->clk);
4103 err:
4104 	free_netdev(dev);
4105 	return err;
4106 }
4107 
4108 static void bcmgenet_remove(struct platform_device *pdev)
4109 {
4110 	struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
4111 
4112 	dev_set_drvdata(&pdev->dev, NULL);
4113 	unregister_netdev(priv->dev);
4114 	bcmgenet_mii_exit(priv->dev);
4115 	free_netdev(priv->dev);
4116 }
4117 
4118 static void bcmgenet_shutdown(struct platform_device *pdev)
4119 {
4120 	bcmgenet_remove(pdev);
4121 }
4122 
4123 #ifdef CONFIG_PM_SLEEP
4124 static int bcmgenet_resume_noirq(struct device *d)
4125 {
4126 	struct net_device *dev = dev_get_drvdata(d);
4127 	struct bcmgenet_priv *priv = netdev_priv(dev);
4128 	int ret;
4129 	u32 reg;
4130 
4131 	if (!netif_running(dev))
4132 		return 0;
4133 
4134 	/* Turn on the clock */
4135 	ret = clk_prepare_enable(priv->clk);
4136 	if (ret)
4137 		return ret;
4138 
4139 	if (device_may_wakeup(d) && priv->wolopts) {
4140 		/* Account for Wake-on-LAN events and clear those events
4141 		 * (Some devices need more time between enabling the clocks
4142 		 *  and the interrupt register reflecting the wake event so
4143 		 *  read the register twice)
4144 		 */
4145 		reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4146 		reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4147 		if (reg & UMAC_IRQ_WAKE_EVENT)
4148 			pm_wakeup_event(&priv->pdev->dev, 0);
4149 
4150 		/* From WOL-enabled suspend, switch to regular clock */
4151 		if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC))
4152 			return 0;
4153 
4154 		/* Failed so fall through to reset MAC */
4155 	}
4156 
4157 	/* If this is an internal GPHY, power it back on now, before UniMAC is
4158 	 * brought out of reset as absolutely no UniMAC activity is allowed
4159 	 */
4160 	if (priv->internal_phy)
4161 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4162 
4163 	/* take MAC out of reset */
4164 	bcmgenet_umac_reset(priv);
4165 
4166 	return 0;
4167 }
4168 
4169 static int bcmgenet_resume(struct device *d)
4170 {
4171 	struct net_device *dev = dev_get_drvdata(d);
4172 	struct bcmgenet_priv *priv = netdev_priv(dev);
4173 	struct bcmgenet_rxnfc_rule *rule;
4174 	int ret;
4175 	u32 reg;
4176 
4177 	if (!netif_running(dev))
4178 		return 0;
4179 
4180 	if (device_may_wakeup(d) && priv->wolopts) {
4181 		reg = bcmgenet_umac_readl(priv, UMAC_CMD);
4182 		if (reg & CMD_RX_EN) {
4183 			/* Successfully exited WoL, just resume data flows */
4184 			list_for_each_entry(rule, &priv->rxnfc_list, list)
4185 				if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
4186 					bcmgenet_hfb_enable_filter(priv,
4187 							rule->fs.location + 1);
4188 			bcmgenet_hfb_enable_filter(priv, 0);
4189 			bcmgenet_set_rx_mode(dev);
4190 			bcmgenet_enable_rx_napi(priv);
4191 
4192 			/* Reinitialize Tx flows */
4193 			bcmgenet_tdma_disable(priv);
4194 			bcmgenet_init_tx_queues(priv->dev);
4195 			reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
4196 			reg |= DMA_EN;
4197 			bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
4198 			bcmgenet_enable_tx_napi(priv);
4199 
4200 			bcmgenet_link_intr_enable(priv);
4201 			phy_start_machine(dev->phydev);
4202 
4203 			netif_device_attach(dev);
4204 			enable_irq(priv->irq1);
4205 			return 0;
4206 		}
4207 		/* MAC was reset so complete bcmgenet_netif_stop() */
4208 		umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false);
4209 		bcmgenet_rdma_disable(priv);
4210 		bcmgenet_intr_disable(priv);
4211 		bcmgenet_fini_dma(priv);
4212 		enable_irq(priv->irq1);
4213 	}
4214 
4215 	init_umac(priv);
4216 
4217 	phy_init_hw(dev->phydev);
4218 
4219 	/* Speed settings must be restored */
4220 	genphy_config_aneg(dev->phydev);
4221 	bcmgenet_mii_config(priv->dev, false);
4222 
4223 	/* Restore enabled features */
4224 	bcmgenet_set_features(dev, dev->features);
4225 
4226 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
4227 
4228 	/* Restore hardware filters */
4229 	bcmgenet_hfb_clear(priv);
4230 	list_for_each_entry(rule, &priv->rxnfc_list, list)
4231 		if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
4232 			bcmgenet_hfb_create_rxnfc_filter(priv, rule);
4233 
4234 	/* Reinitialize TDMA and RDMA and SW housekeeping */
4235 	ret = bcmgenet_init_dma(priv, false);
4236 	if (ret) {
4237 		netdev_err(dev, "failed to initialize DMA\n");
4238 		goto out_clk_disable;
4239 	}
4240 
4241 	if (!device_may_wakeup(d))
4242 		phy_resume(dev->phydev);
4243 
4244 	bcmgenet_netif_start(dev);
4245 
4246 	netif_device_attach(dev);
4247 
4248 	return 0;
4249 
4250 out_clk_disable:
4251 	if (priv->internal_phy)
4252 		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
4253 	clk_disable_unprepare(priv->clk);
4254 	return ret;
4255 }
4256 
4257 static int bcmgenet_suspend(struct device *d)
4258 {
4259 	struct net_device *dev = dev_get_drvdata(d);
4260 	struct bcmgenet_priv *priv = netdev_priv(dev);
4261 	struct bcmgenet_rxnfc_rule *rule;
4262 	u32 reg, hfb_enable = 0;
4263 
4264 	if (!netif_running(dev))
4265 		return 0;
4266 
4267 	netif_device_detach(dev);
4268 
4269 	if (device_may_wakeup(d) && priv->wolopts) {
4270 		netif_tx_disable(dev);
4271 
4272 		/* Suspend non-wake Rx data flows */
4273 		if (priv->wolopts & WAKE_FILTER)
4274 			list_for_each_entry(rule, &priv->rxnfc_list, list)
4275 				if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE &&
4276 				    rule->state == BCMGENET_RXNFC_STATE_ENABLED)
4277 					hfb_enable |= 1 << rule->fs.location;
4278 		reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
4279 		if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
4280 			reg &= ~RBUF_HFB_FILTER_EN_MASK;
4281 			reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1);
4282 		} else {
4283 			bcmgenet_hfb_reg_writel(priv, hfb_enable << 1,
4284 						HFB_FLT_ENABLE_V3PLUS + 4);
4285 		}
4286 		if (!hfb_enable)
4287 			reg &= ~RBUF_HFB_EN;
4288 		bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
4289 
4290 		/* Clear any old filter matches so only new matches wake */
4291 		bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
4292 		bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
4293 
4294 		if (-ETIMEDOUT == bcmgenet_tdma_disable(priv))
4295 			netdev_warn(priv->dev,
4296 				    "Timed out while disabling TX DMA\n");
4297 
4298 		bcmgenet_disable_tx_napi(priv);
4299 		bcmgenet_disable_rx_napi(priv);
4300 		disable_irq(priv->irq1);
4301 		bcmgenet_tx_reclaim_all(dev);
4302 		bcmgenet_fini_tx_napi(priv);
4303 	} else {
4304 		/* Teardown the interface */
4305 		bcmgenet_netif_stop(dev, true);
4306 	}
4307 
4308 	return 0;
4309 }
4310 
4311 static int bcmgenet_suspend_noirq(struct device *d)
4312 {
4313 	struct net_device *dev = dev_get_drvdata(d);
4314 	struct bcmgenet_priv *priv = netdev_priv(dev);
4315 	int ret = 0;
4316 
4317 	if (!netif_running(dev))
4318 		return 0;
4319 
4320 	/* Prepare the device for Wake-on-LAN and switch to the slow clock */
4321 	if (device_may_wakeup(d) && priv->wolopts)
4322 		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
4323 	else if (priv->internal_phy)
4324 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
4325 
4326 	/* Let the framework handle resumption and leave the clocks on */
4327 	if (ret)
4328 		return ret;
4329 
4330 	/* Turn off the clocks */
4331 	clk_disable_unprepare(priv->clk);
4332 
4333 	return 0;
4334 }
4335 #else
4336 #define bcmgenet_suspend	NULL
4337 #define bcmgenet_suspend_noirq	NULL
4338 #define bcmgenet_resume		NULL
4339 #define bcmgenet_resume_noirq	NULL
4340 #endif /* CONFIG_PM_SLEEP */
4341 
4342 static const struct dev_pm_ops bcmgenet_pm_ops = {
4343 	.suspend	= bcmgenet_suspend,
4344 	.suspend_noirq	= bcmgenet_suspend_noirq,
4345 	.resume		= bcmgenet_resume,
4346 	.resume_noirq	= bcmgenet_resume_noirq,
4347 };
4348 
4349 static const struct acpi_device_id genet_acpi_match[] = {
4350 	{ "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
4351 	{ },
4352 };
4353 MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
4354 
4355 static struct platform_driver bcmgenet_driver = {
4356 	.probe	= bcmgenet_probe,
4357 	.remove = bcmgenet_remove,
4358 	.shutdown = bcmgenet_shutdown,
4359 	.driver	= {
4360 		.name	= "bcmgenet",
4361 		.of_match_table = bcmgenet_match,
4362 		.pm	= &bcmgenet_pm_ops,
4363 		.acpi_match_table = genet_acpi_match,
4364 	},
4365 };
4366 module_platform_driver(bcmgenet_driver);
4367 
4368 MODULE_AUTHOR("Broadcom Corporation");
4369 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
4370 MODULE_ALIAS("platform:bcmgenet");
4371 MODULE_LICENSE("GPL");
4372 MODULE_SOFTDEP("pre: mdio-bcm-unimac");
4373