xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision b04df400c30235fa347313c9e2a0695549bd2c8e)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "hwif.h"
55 
56 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
57 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
58 
59 /* Module parameters */
60 #define TX_TIMEO	5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, 0644);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64 
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68 
69 static int phyaddr = -1;
70 module_param(phyaddr, int, 0444);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72 
73 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
74 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
75 
76 static int flow_ctrl = FLOW_OFF;
77 module_param(flow_ctrl, int, 0644);
78 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79 
80 static int pause = PAUSE_TIME;
81 module_param(pause, int, 0644);
82 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83 
84 #define TC_DEFAULT 64
85 static int tc = TC_DEFAULT;
86 module_param(tc, int, 0644);
87 MODULE_PARM_DESC(tc, "DMA threshold control value");
88 
89 #define	DEFAULT_BUFSIZE	1536
90 static int buf_sz = DEFAULT_BUFSIZE;
91 module_param(buf_sz, int, 0644);
92 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93 
94 #define	STMMAC_RX_COPYBREAK	256
95 
96 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
98 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99 
100 #define STMMAC_DEFAULT_LPI_TIMER	1000
101 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102 module_param(eee_timer, int, 0644);
103 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105 
106 /* By default the driver will use the ring mode to manage tx and rx descriptors,
107  * but allow user to force to use the chain instead of the ring
108  */
109 static unsigned int chain_mode;
110 module_param(chain_mode, int, 0444);
111 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112 
113 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114 
115 #ifdef CONFIG_DEBUG_FS
116 static int stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
118 #endif
119 
120 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121 
122 /**
123  * stmmac_verify_args - verify the driver parameters.
124  * Description: it checks the driver parameters and set a default in case of
125  * errors.
126  */
127 static void stmmac_verify_args(void)
128 {
129 	if (unlikely(watchdog < 0))
130 		watchdog = TX_TIMEO;
131 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132 		buf_sz = DEFAULT_BUFSIZE;
133 	if (unlikely(flow_ctrl > 1))
134 		flow_ctrl = FLOW_AUTO;
135 	else if (likely(flow_ctrl < 0))
136 		flow_ctrl = FLOW_OFF;
137 	if (unlikely((pause < 0) || (pause > 0xffff)))
138 		pause = PAUSE_TIME;
139 	if (eee_timer < 0)
140 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141 }
142 
143 /**
144  * stmmac_disable_all_queues - Disable all queues
145  * @priv: driver private structure
146  */
147 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148 {
149 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150 	u32 queue;
151 
152 	for (queue = 0; queue < rx_queues_cnt; queue++) {
153 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154 
155 		napi_disable(&rx_q->napi);
156 	}
157 }
158 
159 /**
160  * stmmac_enable_all_queues - Enable all queues
161  * @priv: driver private structure
162  */
163 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164 {
165 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166 	u32 queue;
167 
168 	for (queue = 0; queue < rx_queues_cnt; queue++) {
169 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170 
171 		napi_enable(&rx_q->napi);
172 	}
173 }
174 
175 /**
176  * stmmac_stop_all_queues - Stop all queues
177  * @priv: driver private structure
178  */
179 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180 {
181 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182 	u32 queue;
183 
184 	for (queue = 0; queue < tx_queues_cnt; queue++)
185 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186 }
187 
188 /**
189  * stmmac_start_all_queues - Start all queues
190  * @priv: driver private structure
191  */
192 static void stmmac_start_all_queues(struct stmmac_priv *priv)
193 {
194 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195 	u32 queue;
196 
197 	for (queue = 0; queue < tx_queues_cnt; queue++)
198 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199 }
200 
201 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202 {
203 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
204 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205 		queue_work(priv->wq, &priv->service_task);
206 }
207 
208 static void stmmac_global_err(struct stmmac_priv *priv)
209 {
210 	netif_carrier_off(priv->dev);
211 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212 	stmmac_service_event_schedule(priv);
213 }
214 
215 /**
216  * stmmac_clk_csr_set - dynamically set the MDC clock
217  * @priv: driver private structure
218  * Description: this is to dynamically set the MDC clock according to the csr
219  * clock input.
220  * Note:
221  *	If a specific clk_csr value is passed from the platform
222  *	this means that the CSR Clock Range selection cannot be
223  *	changed at run-time and it is fixed (as reported in the driver
224  *	documentation). Viceversa the driver will try to set the MDC
225  *	clock dynamically according to the actual clock input.
226  */
227 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228 {
229 	u32 clk_rate;
230 
231 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232 
233 	/* Platform provided default clk_csr would be assumed valid
234 	 * for all other cases except for the below mentioned ones.
235 	 * For values higher than the IEEE 802.3 specified frequency
236 	 * we can not estimate the proper divider as it is not known
237 	 * the frequency of clk_csr_i. So we do not change the default
238 	 * divider.
239 	 */
240 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241 		if (clk_rate < CSR_F_35M)
242 			priv->clk_csr = STMMAC_CSR_20_35M;
243 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244 			priv->clk_csr = STMMAC_CSR_35_60M;
245 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246 			priv->clk_csr = STMMAC_CSR_60_100M;
247 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248 			priv->clk_csr = STMMAC_CSR_100_150M;
249 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250 			priv->clk_csr = STMMAC_CSR_150_250M;
251 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252 			priv->clk_csr = STMMAC_CSR_250_300M;
253 	}
254 
255 	if (priv->plat->has_sun8i) {
256 		if (clk_rate > 160000000)
257 			priv->clk_csr = 0x03;
258 		else if (clk_rate > 80000000)
259 			priv->clk_csr = 0x02;
260 		else if (clk_rate > 40000000)
261 			priv->clk_csr = 0x01;
262 		else
263 			priv->clk_csr = 0;
264 	}
265 }
266 
267 static void print_pkt(unsigned char *buf, int len)
268 {
269 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271 }
272 
273 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274 {
275 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276 	u32 avail;
277 
278 	if (tx_q->dirty_tx > tx_q->cur_tx)
279 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280 	else
281 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282 
283 	return avail;
284 }
285 
286 /**
287  * stmmac_rx_dirty - Get RX queue dirty
288  * @priv: driver private structure
289  * @queue: RX queue index
290  */
291 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292 {
293 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294 	u32 dirty;
295 
296 	if (rx_q->dirty_rx <= rx_q->cur_rx)
297 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
298 	else
299 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300 
301 	return dirty;
302 }
303 
304 /**
305  * stmmac_hw_fix_mac_speed - callback for speed selection
306  * @priv: driver private structure
307  * Description: on some platforms (e.g. ST), some HW system configuration
308  * registers have to be set according to the link speed negotiated.
309  */
310 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311 {
312 	struct net_device *ndev = priv->dev;
313 	struct phy_device *phydev = ndev->phydev;
314 
315 	if (likely(priv->plat->fix_mac_speed))
316 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317 }
318 
319 /**
320  * stmmac_enable_eee_mode - check and enter in LPI mode
321  * @priv: driver private structure
322  * Description: this function is to verify and enter in LPI mode in case of
323  * EEE.
324  */
325 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326 {
327 	u32 tx_cnt = priv->plat->tx_queues_to_use;
328 	u32 queue;
329 
330 	/* check if all TX queues have the work finished */
331 	for (queue = 0; queue < tx_cnt; queue++) {
332 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333 
334 		if (tx_q->dirty_tx != tx_q->cur_tx)
335 			return; /* still unfinished work */
336 	}
337 
338 	/* Check and enter in LPI mode */
339 	if (!priv->tx_path_in_lpi_mode)
340 		stmmac_set_eee_mode(priv, priv->hw,
341 				priv->plat->en_tx_lpi_clockgating);
342 }
343 
344 /**
345  * stmmac_disable_eee_mode - disable and exit from LPI mode
346  * @priv: driver private structure
347  * Description: this function is to exit and disable EEE in case of
348  * LPI state is true. This is called by the xmit.
349  */
350 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351 {
352 	stmmac_reset_eee_mode(priv, priv->hw);
353 	del_timer_sync(&priv->eee_ctrl_timer);
354 	priv->tx_path_in_lpi_mode = false;
355 }
356 
357 /**
358  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359  * @arg : data hook
360  * Description:
361  *  if there is no data transfer and if we are not in LPI state,
362  *  then MAC Transmitter can be moved to LPI state.
363  */
364 static void stmmac_eee_ctrl_timer(struct timer_list *t)
365 {
366 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367 
368 	stmmac_enable_eee_mode(priv);
369 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370 }
371 
372 /**
373  * stmmac_eee_init - init EEE
374  * @priv: driver private structure
375  * Description:
376  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377  *  can also manage EEE, this function enable the LPI state and start related
378  *  timer.
379  */
380 bool stmmac_eee_init(struct stmmac_priv *priv)
381 {
382 	struct net_device *ndev = priv->dev;
383 	int interface = priv->plat->interface;
384 	unsigned long flags;
385 	bool ret = false;
386 
387 	if ((interface != PHY_INTERFACE_MODE_MII) &&
388 	    (interface != PHY_INTERFACE_MODE_GMII) &&
389 	    !phy_interface_mode_is_rgmii(interface))
390 		goto out;
391 
392 	/* Using PCS we cannot dial with the phy registers at this stage
393 	 * so we do not support extra feature like EEE.
394 	 */
395 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
396 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
397 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
398 		goto out;
399 
400 	/* MAC core supports the EEE feature. */
401 	if (priv->dma_cap.eee) {
402 		int tx_lpi_timer = priv->tx_lpi_timer;
403 
404 		/* Check if the PHY supports EEE */
405 		if (phy_init_eee(ndev->phydev, 1)) {
406 			/* To manage at run-time if the EEE cannot be supported
407 			 * anymore (for example because the lp caps have been
408 			 * changed).
409 			 * In that case the driver disable own timers.
410 			 */
411 			spin_lock_irqsave(&priv->lock, flags);
412 			if (priv->eee_active) {
413 				netdev_dbg(priv->dev, "disable EEE\n");
414 				del_timer_sync(&priv->eee_ctrl_timer);
415 				stmmac_set_eee_timer(priv, priv->hw, 0,
416 						tx_lpi_timer);
417 			}
418 			priv->eee_active = 0;
419 			spin_unlock_irqrestore(&priv->lock, flags);
420 			goto out;
421 		}
422 		/* Activate the EEE and start timers */
423 		spin_lock_irqsave(&priv->lock, flags);
424 		if (!priv->eee_active) {
425 			priv->eee_active = 1;
426 			timer_setup(&priv->eee_ctrl_timer,
427 				    stmmac_eee_ctrl_timer, 0);
428 			mod_timer(&priv->eee_ctrl_timer,
429 				  STMMAC_LPI_T(eee_timer));
430 
431 			stmmac_set_eee_timer(priv, priv->hw,
432 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
433 		}
434 		/* Set HW EEE according to the speed */
435 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
436 
437 		ret = true;
438 		spin_unlock_irqrestore(&priv->lock, flags);
439 
440 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
441 	}
442 out:
443 	return ret;
444 }
445 
446 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
447  * @priv: driver private structure
448  * @p : descriptor pointer
449  * @skb : the socket buffer
450  * Description :
451  * This function will read timestamp from the descriptor & pass it to stack.
452  * and also perform some sanity checks.
453  */
454 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
455 				   struct dma_desc *p, struct sk_buff *skb)
456 {
457 	struct skb_shared_hwtstamps shhwtstamp;
458 	u64 ns;
459 
460 	if (!priv->hwts_tx_en)
461 		return;
462 
463 	/* exit if skb doesn't support hw tstamp */
464 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
465 		return;
466 
467 	/* check tx tstamp status */
468 	if (stmmac_get_tx_timestamp_status(priv, p)) {
469 		/* get the valid tstamp */
470 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
471 
472 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
473 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
474 
475 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
476 		/* pass tstamp to stack */
477 		skb_tstamp_tx(skb, &shhwtstamp);
478 	}
479 
480 	return;
481 }
482 
483 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
484  * @priv: driver private structure
485  * @p : descriptor pointer
486  * @np : next descriptor pointer
487  * @skb : the socket buffer
488  * Description :
489  * This function will read received packet's timestamp from the descriptor
490  * and pass it to stack. It also perform some sanity checks.
491  */
492 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
493 				   struct dma_desc *np, struct sk_buff *skb)
494 {
495 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
496 	struct dma_desc *desc = p;
497 	u64 ns;
498 
499 	if (!priv->hwts_rx_en)
500 		return;
501 	/* For GMAC4, the valid timestamp is from CTX next desc. */
502 	if (priv->plat->has_gmac4)
503 		desc = np;
504 
505 	/* Check if timestamp is available */
506 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
507 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
508 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
509 		shhwtstamp = skb_hwtstamps(skb);
510 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
511 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
512 	} else  {
513 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
514 	}
515 }
516 
517 /**
518  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
519  *  @dev: device pointer.
520  *  @ifr: An IOCTL specific structure, that can contain a pointer to
521  *  a proprietary structure used to pass information to the driver.
522  *  Description:
523  *  This function configures the MAC to enable/disable both outgoing(TX)
524  *  and incoming(RX) packets time stamping based on user input.
525  *  Return Value:
526  *  0 on success and an appropriate -ve integer on failure.
527  */
528 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
529 {
530 	struct stmmac_priv *priv = netdev_priv(dev);
531 	struct hwtstamp_config config;
532 	struct timespec64 now;
533 	u64 temp = 0;
534 	u32 ptp_v2 = 0;
535 	u32 tstamp_all = 0;
536 	u32 ptp_over_ipv4_udp = 0;
537 	u32 ptp_over_ipv6_udp = 0;
538 	u32 ptp_over_ethernet = 0;
539 	u32 snap_type_sel = 0;
540 	u32 ts_master_en = 0;
541 	u32 ts_event_en = 0;
542 	u32 value = 0;
543 	u32 sec_inc;
544 
545 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
546 		netdev_alert(priv->dev, "No support for HW time stamping\n");
547 		priv->hwts_tx_en = 0;
548 		priv->hwts_rx_en = 0;
549 
550 		return -EOPNOTSUPP;
551 	}
552 
553 	if (copy_from_user(&config, ifr->ifr_data,
554 			   sizeof(struct hwtstamp_config)))
555 		return -EFAULT;
556 
557 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
558 		   __func__, config.flags, config.tx_type, config.rx_filter);
559 
560 	/* reserved for future extensions */
561 	if (config.flags)
562 		return -EINVAL;
563 
564 	if (config.tx_type != HWTSTAMP_TX_OFF &&
565 	    config.tx_type != HWTSTAMP_TX_ON)
566 		return -ERANGE;
567 
568 	if (priv->adv_ts) {
569 		switch (config.rx_filter) {
570 		case HWTSTAMP_FILTER_NONE:
571 			/* time stamp no incoming packet at all */
572 			config.rx_filter = HWTSTAMP_FILTER_NONE;
573 			break;
574 
575 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
576 			/* PTP v1, UDP, any kind of event packet */
577 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
578 			/* take time stamp for all event messages */
579 			if (priv->plat->has_gmac4)
580 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
581 			else
582 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
583 
584 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586 			break;
587 
588 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
589 			/* PTP v1, UDP, Sync packet */
590 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
591 			/* take time stamp for SYNC messages only */
592 			ts_event_en = PTP_TCR_TSEVNTENA;
593 
594 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596 			break;
597 
598 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
599 			/* PTP v1, UDP, Delay_req packet */
600 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
601 			/* take time stamp for Delay_Req messages only */
602 			ts_master_en = PTP_TCR_TSMSTRENA;
603 			ts_event_en = PTP_TCR_TSEVNTENA;
604 
605 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
606 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
607 			break;
608 
609 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
610 			/* PTP v2, UDP, any kind of event packet */
611 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
612 			ptp_v2 = PTP_TCR_TSVER2ENA;
613 			/* take time stamp for all event messages */
614 			if (priv->plat->has_gmac4)
615 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
616 			else
617 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
618 
619 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
620 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
621 			break;
622 
623 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
624 			/* PTP v2, UDP, Sync packet */
625 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
626 			ptp_v2 = PTP_TCR_TSVER2ENA;
627 			/* take time stamp for SYNC messages only */
628 			ts_event_en = PTP_TCR_TSEVNTENA;
629 
630 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
631 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
632 			break;
633 
634 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
635 			/* PTP v2, UDP, Delay_req packet */
636 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
637 			ptp_v2 = PTP_TCR_TSVER2ENA;
638 			/* take time stamp for Delay_Req messages only */
639 			ts_master_en = PTP_TCR_TSMSTRENA;
640 			ts_event_en = PTP_TCR_TSEVNTENA;
641 
642 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644 			break;
645 
646 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
647 			/* PTP v2/802.AS1 any layer, any kind of event packet */
648 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
649 			ptp_v2 = PTP_TCR_TSVER2ENA;
650 			/* take time stamp for all event messages */
651 			if (priv->plat->has_gmac4)
652 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
653 			else
654 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
655 
656 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 			ptp_over_ethernet = PTP_TCR_TSIPENA;
659 			break;
660 
661 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
662 			/* PTP v2/802.AS1, any layer, Sync packet */
663 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
664 			ptp_v2 = PTP_TCR_TSVER2ENA;
665 			/* take time stamp for SYNC messages only */
666 			ts_event_en = PTP_TCR_TSEVNTENA;
667 
668 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670 			ptp_over_ethernet = PTP_TCR_TSIPENA;
671 			break;
672 
673 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
674 			/* PTP v2/802.AS1, any layer, Delay_req packet */
675 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
676 			ptp_v2 = PTP_TCR_TSVER2ENA;
677 			/* take time stamp for Delay_Req messages only */
678 			ts_master_en = PTP_TCR_TSMSTRENA;
679 			ts_event_en = PTP_TCR_TSEVNTENA;
680 
681 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 			ptp_over_ethernet = PTP_TCR_TSIPENA;
684 			break;
685 
686 		case HWTSTAMP_FILTER_NTP_ALL:
687 		case HWTSTAMP_FILTER_ALL:
688 			/* time stamp any incoming packet */
689 			config.rx_filter = HWTSTAMP_FILTER_ALL;
690 			tstamp_all = PTP_TCR_TSENALL;
691 			break;
692 
693 		default:
694 			return -ERANGE;
695 		}
696 	} else {
697 		switch (config.rx_filter) {
698 		case HWTSTAMP_FILTER_NONE:
699 			config.rx_filter = HWTSTAMP_FILTER_NONE;
700 			break;
701 		default:
702 			/* PTP v1, UDP, any kind of event packet */
703 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
704 			break;
705 		}
706 	}
707 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
708 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
709 
710 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
711 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
712 	else {
713 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
714 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
715 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
716 			 ts_master_en | snap_type_sel);
717 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
718 
719 		/* program Sub Second Increment reg */
720 		stmmac_config_sub_second_increment(priv,
721 				priv->ptpaddr, priv->plat->clk_ptp_rate,
722 				priv->plat->has_gmac4, &sec_inc);
723 		temp = div_u64(1000000000ULL, sec_inc);
724 
725 		/* calculate default added value:
726 		 * formula is :
727 		 * addend = (2^32)/freq_div_ratio;
728 		 * where, freq_div_ratio = 1e9ns/sec_inc
729 		 */
730 		temp = (u64)(temp << 32);
731 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
732 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
733 
734 		/* initialize system time */
735 		ktime_get_real_ts64(&now);
736 
737 		/* lower 32 bits of tv_sec are safe until y2106 */
738 		stmmac_init_systime(priv, priv->ptpaddr,
739 				(u32)now.tv_sec, now.tv_nsec);
740 	}
741 
742 	return copy_to_user(ifr->ifr_data, &config,
743 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
744 }
745 
746 /**
747  * stmmac_init_ptp - init PTP
748  * @priv: driver private structure
749  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
750  * This is done by looking at the HW cap. register.
751  * This function also registers the ptp driver.
752  */
753 static int stmmac_init_ptp(struct stmmac_priv *priv)
754 {
755 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
756 		return -EOPNOTSUPP;
757 
758 	priv->adv_ts = 0;
759 	/* Check if adv_ts can be enabled for dwmac 4.x core */
760 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
761 		priv->adv_ts = 1;
762 	/* Dwmac 3.x core with extend_desc can support adv_ts */
763 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
764 		priv->adv_ts = 1;
765 
766 	if (priv->dma_cap.time_stamp)
767 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
768 
769 	if (priv->adv_ts)
770 		netdev_info(priv->dev,
771 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
772 
773 	priv->hwts_tx_en = 0;
774 	priv->hwts_rx_en = 0;
775 
776 	stmmac_ptp_register(priv);
777 
778 	return 0;
779 }
780 
781 static void stmmac_release_ptp(struct stmmac_priv *priv)
782 {
783 	if (priv->plat->clk_ptp_ref)
784 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
785 	stmmac_ptp_unregister(priv);
786 }
787 
788 /**
789  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
790  *  @priv: driver private structure
791  *  Description: It is used for configuring the flow control in all queues
792  */
793 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
794 {
795 	u32 tx_cnt = priv->plat->tx_queues_to_use;
796 
797 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
798 			priv->pause, tx_cnt);
799 }
800 
801 /**
802  * stmmac_adjust_link - adjusts the link parameters
803  * @dev: net device structure
804  * Description: this is the helper called by the physical abstraction layer
805  * drivers to communicate the phy link status. According the speed and duplex
806  * this driver can invoke registered glue-logic as well.
807  * It also invoke the eee initialization because it could happen when switch
808  * on different networks (that are eee capable).
809  */
810 static void stmmac_adjust_link(struct net_device *dev)
811 {
812 	struct stmmac_priv *priv = netdev_priv(dev);
813 	struct phy_device *phydev = dev->phydev;
814 	unsigned long flags;
815 	bool new_state = false;
816 
817 	if (!phydev)
818 		return;
819 
820 	spin_lock_irqsave(&priv->lock, flags);
821 
822 	if (phydev->link) {
823 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
824 
825 		/* Now we make sure that we can be in full duplex mode.
826 		 * If not, we operate in half-duplex mode. */
827 		if (phydev->duplex != priv->oldduplex) {
828 			new_state = true;
829 			if (!phydev->duplex)
830 				ctrl &= ~priv->hw->link.duplex;
831 			else
832 				ctrl |= priv->hw->link.duplex;
833 			priv->oldduplex = phydev->duplex;
834 		}
835 		/* Flow Control operation */
836 		if (phydev->pause)
837 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
838 
839 		if (phydev->speed != priv->speed) {
840 			new_state = true;
841 			ctrl &= ~priv->hw->link.speed_mask;
842 			switch (phydev->speed) {
843 			case SPEED_1000:
844 				ctrl |= priv->hw->link.speed1000;
845 				break;
846 			case SPEED_100:
847 				ctrl |= priv->hw->link.speed100;
848 				break;
849 			case SPEED_10:
850 				ctrl |= priv->hw->link.speed10;
851 				break;
852 			default:
853 				netif_warn(priv, link, priv->dev,
854 					   "broken speed: %d\n", phydev->speed);
855 				phydev->speed = SPEED_UNKNOWN;
856 				break;
857 			}
858 			if (phydev->speed != SPEED_UNKNOWN)
859 				stmmac_hw_fix_mac_speed(priv);
860 			priv->speed = phydev->speed;
861 		}
862 
863 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
864 
865 		if (!priv->oldlink) {
866 			new_state = true;
867 			priv->oldlink = true;
868 		}
869 	} else if (priv->oldlink) {
870 		new_state = true;
871 		priv->oldlink = false;
872 		priv->speed = SPEED_UNKNOWN;
873 		priv->oldduplex = DUPLEX_UNKNOWN;
874 	}
875 
876 	if (new_state && netif_msg_link(priv))
877 		phy_print_status(phydev);
878 
879 	spin_unlock_irqrestore(&priv->lock, flags);
880 
881 	if (phydev->is_pseudo_fixed_link)
882 		/* Stop PHY layer to call the hook to adjust the link in case
883 		 * of a switch is attached to the stmmac driver.
884 		 */
885 		phydev->irq = PHY_IGNORE_INTERRUPT;
886 	else
887 		/* At this stage, init the EEE if supported.
888 		 * Never called in case of fixed_link.
889 		 */
890 		priv->eee_enabled = stmmac_eee_init(priv);
891 }
892 
893 /**
894  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
895  * @priv: driver private structure
896  * Description: this is to verify if the HW supports the PCS.
897  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
898  * configured for the TBI, RTBI, or SGMII PHY interface.
899  */
900 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
901 {
902 	int interface = priv->plat->interface;
903 
904 	if (priv->dma_cap.pcs) {
905 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
906 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
907 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
908 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
909 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
910 			priv->hw->pcs = STMMAC_PCS_RGMII;
911 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
912 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
913 			priv->hw->pcs = STMMAC_PCS_SGMII;
914 		}
915 	}
916 }
917 
918 /**
919  * stmmac_init_phy - PHY initialization
920  * @dev: net device structure
921  * Description: it initializes the driver's PHY state, and attaches the PHY
922  * to the mac driver.
923  *  Return value:
924  *  0 on success
925  */
926 static int stmmac_init_phy(struct net_device *dev)
927 {
928 	struct stmmac_priv *priv = netdev_priv(dev);
929 	struct phy_device *phydev;
930 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
931 	char bus_id[MII_BUS_ID_SIZE];
932 	int interface = priv->plat->interface;
933 	int max_speed = priv->plat->max_speed;
934 	priv->oldlink = false;
935 	priv->speed = SPEED_UNKNOWN;
936 	priv->oldduplex = DUPLEX_UNKNOWN;
937 
938 	if (priv->plat->phy_node) {
939 		phydev = of_phy_connect(dev, priv->plat->phy_node,
940 					&stmmac_adjust_link, 0, interface);
941 	} else {
942 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
943 			 priv->plat->bus_id);
944 
945 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
946 			 priv->plat->phy_addr);
947 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
948 			   phy_id_fmt);
949 
950 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
951 				     interface);
952 	}
953 
954 	if (IS_ERR_OR_NULL(phydev)) {
955 		netdev_err(priv->dev, "Could not attach to PHY\n");
956 		if (!phydev)
957 			return -ENODEV;
958 
959 		return PTR_ERR(phydev);
960 	}
961 
962 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
963 	if ((interface == PHY_INTERFACE_MODE_MII) ||
964 	    (interface == PHY_INTERFACE_MODE_RMII) ||
965 		(max_speed < 1000 && max_speed > 0))
966 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
967 					 SUPPORTED_1000baseT_Full);
968 
969 	/*
970 	 * Broken HW is sometimes missing the pull-up resistor on the
971 	 * MDIO line, which results in reads to non-existent devices returning
972 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
973 	 * device as well.
974 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
975 	 */
976 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
977 		phy_disconnect(phydev);
978 		return -ENODEV;
979 	}
980 
981 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
982 	 * subsequent PHY polling, make sure we force a link transition if
983 	 * we have a UP/DOWN/UP transition
984 	 */
985 	if (phydev->is_pseudo_fixed_link)
986 		phydev->irq = PHY_POLL;
987 
988 	phy_attached_info(phydev);
989 	return 0;
990 }
991 
992 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
993 {
994 	u32 rx_cnt = priv->plat->rx_queues_to_use;
995 	void *head_rx;
996 	u32 queue;
997 
998 	/* Display RX rings */
999 	for (queue = 0; queue < rx_cnt; queue++) {
1000 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1001 
1002 		pr_info("\tRX Queue %u rings\n", queue);
1003 
1004 		if (priv->extend_desc)
1005 			head_rx = (void *)rx_q->dma_erx;
1006 		else
1007 			head_rx = (void *)rx_q->dma_rx;
1008 
1009 		/* Display RX ring */
1010 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1011 	}
1012 }
1013 
1014 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1015 {
1016 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1017 	void *head_tx;
1018 	u32 queue;
1019 
1020 	/* Display TX rings */
1021 	for (queue = 0; queue < tx_cnt; queue++) {
1022 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1023 
1024 		pr_info("\tTX Queue %d rings\n", queue);
1025 
1026 		if (priv->extend_desc)
1027 			head_tx = (void *)tx_q->dma_etx;
1028 		else
1029 			head_tx = (void *)tx_q->dma_tx;
1030 
1031 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1032 	}
1033 }
1034 
1035 static void stmmac_display_rings(struct stmmac_priv *priv)
1036 {
1037 	/* Display RX ring */
1038 	stmmac_display_rx_rings(priv);
1039 
1040 	/* Display TX ring */
1041 	stmmac_display_tx_rings(priv);
1042 }
1043 
1044 static int stmmac_set_bfsize(int mtu, int bufsize)
1045 {
1046 	int ret = bufsize;
1047 
1048 	if (mtu >= BUF_SIZE_4KiB)
1049 		ret = BUF_SIZE_8KiB;
1050 	else if (mtu >= BUF_SIZE_2KiB)
1051 		ret = BUF_SIZE_4KiB;
1052 	else if (mtu > DEFAULT_BUFSIZE)
1053 		ret = BUF_SIZE_2KiB;
1054 	else
1055 		ret = DEFAULT_BUFSIZE;
1056 
1057 	return ret;
1058 }
1059 
1060 /**
1061  * stmmac_clear_rx_descriptors - clear RX descriptors
1062  * @priv: driver private structure
1063  * @queue: RX queue index
1064  * Description: this function is called to clear the RX descriptors
1065  * in case of both basic and extended descriptors are used.
1066  */
1067 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1068 {
1069 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1070 	int i;
1071 
1072 	/* Clear the RX descriptors */
1073 	for (i = 0; i < DMA_RX_SIZE; i++)
1074 		if (priv->extend_desc)
1075 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1076 					priv->use_riwt, priv->mode,
1077 					(i == DMA_RX_SIZE - 1));
1078 		else
1079 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1080 					priv->use_riwt, priv->mode,
1081 					(i == DMA_RX_SIZE - 1));
1082 }
1083 
1084 /**
1085  * stmmac_clear_tx_descriptors - clear tx descriptors
1086  * @priv: driver private structure
1087  * @queue: TX queue index.
1088  * Description: this function is called to clear the TX descriptors
1089  * in case of both basic and extended descriptors are used.
1090  */
1091 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1092 {
1093 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1094 	int i;
1095 
1096 	/* Clear the TX descriptors */
1097 	for (i = 0; i < DMA_TX_SIZE; i++)
1098 		if (priv->extend_desc)
1099 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1100 					priv->mode, (i == DMA_TX_SIZE - 1));
1101 		else
1102 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1103 					priv->mode, (i == DMA_TX_SIZE - 1));
1104 }
1105 
1106 /**
1107  * stmmac_clear_descriptors - clear descriptors
1108  * @priv: driver private structure
1109  * Description: this function is called to clear the TX and RX descriptors
1110  * in case of both basic and extended descriptors are used.
1111  */
1112 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1113 {
1114 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1115 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1116 	u32 queue;
1117 
1118 	/* Clear the RX descriptors */
1119 	for (queue = 0; queue < rx_queue_cnt; queue++)
1120 		stmmac_clear_rx_descriptors(priv, queue);
1121 
1122 	/* Clear the TX descriptors */
1123 	for (queue = 0; queue < tx_queue_cnt; queue++)
1124 		stmmac_clear_tx_descriptors(priv, queue);
1125 }
1126 
1127 /**
1128  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1129  * @priv: driver private structure
1130  * @p: descriptor pointer
1131  * @i: descriptor index
1132  * @flags: gfp flag
1133  * @queue: RX queue index
1134  * Description: this function is called to allocate a receive buffer, perform
1135  * the DMA mapping and init the descriptor.
1136  */
1137 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1138 				  int i, gfp_t flags, u32 queue)
1139 {
1140 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1141 	struct sk_buff *skb;
1142 
1143 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1144 	if (!skb) {
1145 		netdev_err(priv->dev,
1146 			   "%s: Rx init fails; skb is NULL\n", __func__);
1147 		return -ENOMEM;
1148 	}
1149 	rx_q->rx_skbuff[i] = skb;
1150 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1151 						priv->dma_buf_sz,
1152 						DMA_FROM_DEVICE);
1153 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1154 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1155 		dev_kfree_skb_any(skb);
1156 		return -EINVAL;
1157 	}
1158 
1159 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1160 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1161 	else
1162 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1163 
1164 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1165 		stmmac_init_desc3(priv, p);
1166 
1167 	return 0;
1168 }
1169 
1170 /**
1171  * stmmac_free_rx_buffer - free RX dma buffers
1172  * @priv: private structure
1173  * @queue: RX queue index
1174  * @i: buffer index.
1175  */
1176 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1177 {
1178 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1179 
1180 	if (rx_q->rx_skbuff[i]) {
1181 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1182 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1183 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1184 	}
1185 	rx_q->rx_skbuff[i] = NULL;
1186 }
1187 
1188 /**
1189  * stmmac_free_tx_buffer - free RX dma buffers
1190  * @priv: private structure
1191  * @queue: RX queue index
1192  * @i: buffer index.
1193  */
1194 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1195 {
1196 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1197 
1198 	if (tx_q->tx_skbuff_dma[i].buf) {
1199 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1200 			dma_unmap_page(priv->device,
1201 				       tx_q->tx_skbuff_dma[i].buf,
1202 				       tx_q->tx_skbuff_dma[i].len,
1203 				       DMA_TO_DEVICE);
1204 		else
1205 			dma_unmap_single(priv->device,
1206 					 tx_q->tx_skbuff_dma[i].buf,
1207 					 tx_q->tx_skbuff_dma[i].len,
1208 					 DMA_TO_DEVICE);
1209 	}
1210 
1211 	if (tx_q->tx_skbuff[i]) {
1212 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1213 		tx_q->tx_skbuff[i] = NULL;
1214 		tx_q->tx_skbuff_dma[i].buf = 0;
1215 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1216 	}
1217 }
1218 
1219 /**
1220  * init_dma_rx_desc_rings - init the RX descriptor rings
1221  * @dev: net device structure
1222  * @flags: gfp flag.
1223  * Description: this function initializes the DMA RX descriptors
1224  * and allocates the socket buffers. It supports the chained and ring
1225  * modes.
1226  */
1227 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1228 {
1229 	struct stmmac_priv *priv = netdev_priv(dev);
1230 	u32 rx_count = priv->plat->rx_queues_to_use;
1231 	int ret = -ENOMEM;
1232 	int bfsize = 0;
1233 	int queue;
1234 	int i;
1235 
1236 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1237 	if (bfsize < 0)
1238 		bfsize = 0;
1239 
1240 	if (bfsize < BUF_SIZE_16KiB)
1241 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1242 
1243 	priv->dma_buf_sz = bfsize;
1244 
1245 	/* RX INITIALIZATION */
1246 	netif_dbg(priv, probe, priv->dev,
1247 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1248 
1249 	for (queue = 0; queue < rx_count; queue++) {
1250 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1251 
1252 		netif_dbg(priv, probe, priv->dev,
1253 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1254 			  (u32)rx_q->dma_rx_phy);
1255 
1256 		for (i = 0; i < DMA_RX_SIZE; i++) {
1257 			struct dma_desc *p;
1258 
1259 			if (priv->extend_desc)
1260 				p = &((rx_q->dma_erx + i)->basic);
1261 			else
1262 				p = rx_q->dma_rx + i;
1263 
1264 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1265 						     queue);
1266 			if (ret)
1267 				goto err_init_rx_buffers;
1268 
1269 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1270 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1271 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1272 		}
1273 
1274 		rx_q->cur_rx = 0;
1275 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1276 
1277 		stmmac_clear_rx_descriptors(priv, queue);
1278 
1279 		/* Setup the chained descriptor addresses */
1280 		if (priv->mode == STMMAC_CHAIN_MODE) {
1281 			if (priv->extend_desc)
1282 				stmmac_mode_init(priv, rx_q->dma_erx,
1283 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1284 			else
1285 				stmmac_mode_init(priv, rx_q->dma_rx,
1286 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1287 		}
1288 	}
1289 
1290 	buf_sz = bfsize;
1291 
1292 	return 0;
1293 
1294 err_init_rx_buffers:
1295 	while (queue >= 0) {
1296 		while (--i >= 0)
1297 			stmmac_free_rx_buffer(priv, queue, i);
1298 
1299 		if (queue == 0)
1300 			break;
1301 
1302 		i = DMA_RX_SIZE;
1303 		queue--;
1304 	}
1305 
1306 	return ret;
1307 }
1308 
1309 /**
1310  * init_dma_tx_desc_rings - init the TX descriptor rings
1311  * @dev: net device structure.
1312  * Description: this function initializes the DMA TX descriptors
1313  * and allocates the socket buffers. It supports the chained and ring
1314  * modes.
1315  */
1316 static int init_dma_tx_desc_rings(struct net_device *dev)
1317 {
1318 	struct stmmac_priv *priv = netdev_priv(dev);
1319 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1320 	u32 queue;
1321 	int i;
1322 
1323 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1324 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1325 
1326 		netif_dbg(priv, probe, priv->dev,
1327 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1328 			 (u32)tx_q->dma_tx_phy);
1329 
1330 		/* Setup the chained descriptor addresses */
1331 		if (priv->mode == STMMAC_CHAIN_MODE) {
1332 			if (priv->extend_desc)
1333 				stmmac_mode_init(priv, tx_q->dma_etx,
1334 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1335 			else
1336 				stmmac_mode_init(priv, tx_q->dma_tx,
1337 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1338 		}
1339 
1340 		for (i = 0; i < DMA_TX_SIZE; i++) {
1341 			struct dma_desc *p;
1342 			if (priv->extend_desc)
1343 				p = &((tx_q->dma_etx + i)->basic);
1344 			else
1345 				p = tx_q->dma_tx + i;
1346 
1347 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1348 				p->des0 = 0;
1349 				p->des1 = 0;
1350 				p->des2 = 0;
1351 				p->des3 = 0;
1352 			} else {
1353 				p->des2 = 0;
1354 			}
1355 
1356 			tx_q->tx_skbuff_dma[i].buf = 0;
1357 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1358 			tx_q->tx_skbuff_dma[i].len = 0;
1359 			tx_q->tx_skbuff_dma[i].last_segment = false;
1360 			tx_q->tx_skbuff[i] = NULL;
1361 		}
1362 
1363 		tx_q->dirty_tx = 0;
1364 		tx_q->cur_tx = 0;
1365 		tx_q->mss = 0;
1366 
1367 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1368 	}
1369 
1370 	return 0;
1371 }
1372 
1373 /**
1374  * init_dma_desc_rings - init the RX/TX descriptor rings
1375  * @dev: net device structure
1376  * @flags: gfp flag.
1377  * Description: this function initializes the DMA RX/TX descriptors
1378  * and allocates the socket buffers. It supports the chained and ring
1379  * modes.
1380  */
1381 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1382 {
1383 	struct stmmac_priv *priv = netdev_priv(dev);
1384 	int ret;
1385 
1386 	ret = init_dma_rx_desc_rings(dev, flags);
1387 	if (ret)
1388 		return ret;
1389 
1390 	ret = init_dma_tx_desc_rings(dev);
1391 
1392 	stmmac_clear_descriptors(priv);
1393 
1394 	if (netif_msg_hw(priv))
1395 		stmmac_display_rings(priv);
1396 
1397 	return ret;
1398 }
1399 
1400 /**
1401  * dma_free_rx_skbufs - free RX dma buffers
1402  * @priv: private structure
1403  * @queue: RX queue index
1404  */
1405 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1406 {
1407 	int i;
1408 
1409 	for (i = 0; i < DMA_RX_SIZE; i++)
1410 		stmmac_free_rx_buffer(priv, queue, i);
1411 }
1412 
1413 /**
1414  * dma_free_tx_skbufs - free TX dma buffers
1415  * @priv: private structure
1416  * @queue: TX queue index
1417  */
1418 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1419 {
1420 	int i;
1421 
1422 	for (i = 0; i < DMA_TX_SIZE; i++)
1423 		stmmac_free_tx_buffer(priv, queue, i);
1424 }
1425 
1426 /**
1427  * free_dma_rx_desc_resources - free RX dma desc resources
1428  * @priv: private structure
1429  */
1430 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1431 {
1432 	u32 rx_count = priv->plat->rx_queues_to_use;
1433 	u32 queue;
1434 
1435 	/* Free RX queue resources */
1436 	for (queue = 0; queue < rx_count; queue++) {
1437 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1438 
1439 		/* Release the DMA RX socket buffers */
1440 		dma_free_rx_skbufs(priv, queue);
1441 
1442 		/* Free DMA regions of consistent memory previously allocated */
1443 		if (!priv->extend_desc)
1444 			dma_free_coherent(priv->device,
1445 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1446 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1447 		else
1448 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1449 					  sizeof(struct dma_extended_desc),
1450 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1451 
1452 		kfree(rx_q->rx_skbuff_dma);
1453 		kfree(rx_q->rx_skbuff);
1454 	}
1455 }
1456 
1457 /**
1458  * free_dma_tx_desc_resources - free TX dma desc resources
1459  * @priv: private structure
1460  */
1461 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1462 {
1463 	u32 tx_count = priv->plat->tx_queues_to_use;
1464 	u32 queue;
1465 
1466 	/* Free TX queue resources */
1467 	for (queue = 0; queue < tx_count; queue++) {
1468 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1469 
1470 		/* Release the DMA TX socket buffers */
1471 		dma_free_tx_skbufs(priv, queue);
1472 
1473 		/* Free DMA regions of consistent memory previously allocated */
1474 		if (!priv->extend_desc)
1475 			dma_free_coherent(priv->device,
1476 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1477 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1478 		else
1479 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1480 					  sizeof(struct dma_extended_desc),
1481 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1482 
1483 		kfree(tx_q->tx_skbuff_dma);
1484 		kfree(tx_q->tx_skbuff);
1485 	}
1486 }
1487 
1488 /**
1489  * alloc_dma_rx_desc_resources - alloc RX resources.
1490  * @priv: private structure
1491  * Description: according to which descriptor can be used (extend or basic)
1492  * this function allocates the resources for TX and RX paths. In case of
1493  * reception, for example, it pre-allocated the RX socket buffer in order to
1494  * allow zero-copy mechanism.
1495  */
1496 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1497 {
1498 	u32 rx_count = priv->plat->rx_queues_to_use;
1499 	int ret = -ENOMEM;
1500 	u32 queue;
1501 
1502 	/* RX queues buffers and DMA */
1503 	for (queue = 0; queue < rx_count; queue++) {
1504 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1505 
1506 		rx_q->queue_index = queue;
1507 		rx_q->priv_data = priv;
1508 
1509 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1510 						    sizeof(dma_addr_t),
1511 						    GFP_KERNEL);
1512 		if (!rx_q->rx_skbuff_dma)
1513 			goto err_dma;
1514 
1515 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1516 						sizeof(struct sk_buff *),
1517 						GFP_KERNEL);
1518 		if (!rx_q->rx_skbuff)
1519 			goto err_dma;
1520 
1521 		if (priv->extend_desc) {
1522 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1523 							    DMA_RX_SIZE *
1524 							    sizeof(struct
1525 							    dma_extended_desc),
1526 							    &rx_q->dma_rx_phy,
1527 							    GFP_KERNEL);
1528 			if (!rx_q->dma_erx)
1529 				goto err_dma;
1530 
1531 		} else {
1532 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1533 							   DMA_RX_SIZE *
1534 							   sizeof(struct
1535 							   dma_desc),
1536 							   &rx_q->dma_rx_phy,
1537 							   GFP_KERNEL);
1538 			if (!rx_q->dma_rx)
1539 				goto err_dma;
1540 		}
1541 	}
1542 
1543 	return 0;
1544 
1545 err_dma:
1546 	free_dma_rx_desc_resources(priv);
1547 
1548 	return ret;
1549 }
1550 
1551 /**
1552  * alloc_dma_tx_desc_resources - alloc TX resources.
1553  * @priv: private structure
1554  * Description: according to which descriptor can be used (extend or basic)
1555  * this function allocates the resources for TX and RX paths. In case of
1556  * reception, for example, it pre-allocated the RX socket buffer in order to
1557  * allow zero-copy mechanism.
1558  */
1559 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1560 {
1561 	u32 tx_count = priv->plat->tx_queues_to_use;
1562 	int ret = -ENOMEM;
1563 	u32 queue;
1564 
1565 	/* TX queues buffers and DMA */
1566 	for (queue = 0; queue < tx_count; queue++) {
1567 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1568 
1569 		tx_q->queue_index = queue;
1570 		tx_q->priv_data = priv;
1571 
1572 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1573 						    sizeof(*tx_q->tx_skbuff_dma),
1574 						    GFP_KERNEL);
1575 		if (!tx_q->tx_skbuff_dma)
1576 			goto err_dma;
1577 
1578 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1579 						sizeof(struct sk_buff *),
1580 						GFP_KERNEL);
1581 		if (!tx_q->tx_skbuff)
1582 			goto err_dma;
1583 
1584 		if (priv->extend_desc) {
1585 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1586 							    DMA_TX_SIZE *
1587 							    sizeof(struct
1588 							    dma_extended_desc),
1589 							    &tx_q->dma_tx_phy,
1590 							    GFP_KERNEL);
1591 			if (!tx_q->dma_etx)
1592 				goto err_dma;
1593 		} else {
1594 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1595 							   DMA_TX_SIZE *
1596 							   sizeof(struct
1597 								  dma_desc),
1598 							   &tx_q->dma_tx_phy,
1599 							   GFP_KERNEL);
1600 			if (!tx_q->dma_tx)
1601 				goto err_dma;
1602 		}
1603 	}
1604 
1605 	return 0;
1606 
1607 err_dma:
1608 	free_dma_tx_desc_resources(priv);
1609 
1610 	return ret;
1611 }
1612 
1613 /**
1614  * alloc_dma_desc_resources - alloc TX/RX resources.
1615  * @priv: private structure
1616  * Description: according to which descriptor can be used (extend or basic)
1617  * this function allocates the resources for TX and RX paths. In case of
1618  * reception, for example, it pre-allocated the RX socket buffer in order to
1619  * allow zero-copy mechanism.
1620  */
1621 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1622 {
1623 	/* RX Allocation */
1624 	int ret = alloc_dma_rx_desc_resources(priv);
1625 
1626 	if (ret)
1627 		return ret;
1628 
1629 	ret = alloc_dma_tx_desc_resources(priv);
1630 
1631 	return ret;
1632 }
1633 
1634 /**
1635  * free_dma_desc_resources - free dma desc resources
1636  * @priv: private structure
1637  */
1638 static void free_dma_desc_resources(struct stmmac_priv *priv)
1639 {
1640 	/* Release the DMA RX socket buffers */
1641 	free_dma_rx_desc_resources(priv);
1642 
1643 	/* Release the DMA TX socket buffers */
1644 	free_dma_tx_desc_resources(priv);
1645 }
1646 
1647 /**
1648  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1649  *  @priv: driver private structure
1650  *  Description: It is used for enabling the rx queues in the MAC
1651  */
1652 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1653 {
1654 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1655 	int queue;
1656 	u8 mode;
1657 
1658 	for (queue = 0; queue < rx_queues_count; queue++) {
1659 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1660 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1661 	}
1662 }
1663 
1664 /**
1665  * stmmac_start_rx_dma - start RX DMA channel
1666  * @priv: driver private structure
1667  * @chan: RX channel index
1668  * Description:
1669  * This starts a RX DMA channel
1670  */
1671 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1672 {
1673 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1674 	stmmac_start_rx(priv, priv->ioaddr, chan);
1675 }
1676 
1677 /**
1678  * stmmac_start_tx_dma - start TX DMA channel
1679  * @priv: driver private structure
1680  * @chan: TX channel index
1681  * Description:
1682  * This starts a TX DMA channel
1683  */
1684 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1685 {
1686 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1687 	stmmac_start_tx(priv, priv->ioaddr, chan);
1688 }
1689 
1690 /**
1691  * stmmac_stop_rx_dma - stop RX DMA channel
1692  * @priv: driver private structure
1693  * @chan: RX channel index
1694  * Description:
1695  * This stops a RX DMA channel
1696  */
1697 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1698 {
1699 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1700 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1701 }
1702 
1703 /**
1704  * stmmac_stop_tx_dma - stop TX DMA channel
1705  * @priv: driver private structure
1706  * @chan: TX channel index
1707  * Description:
1708  * This stops a TX DMA channel
1709  */
1710 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1711 {
1712 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1713 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1714 }
1715 
1716 /**
1717  * stmmac_start_all_dma - start all RX and TX DMA channels
1718  * @priv: driver private structure
1719  * Description:
1720  * This starts all the RX and TX DMA channels
1721  */
1722 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1723 {
1724 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1725 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1726 	u32 chan = 0;
1727 
1728 	for (chan = 0; chan < rx_channels_count; chan++)
1729 		stmmac_start_rx_dma(priv, chan);
1730 
1731 	for (chan = 0; chan < tx_channels_count; chan++)
1732 		stmmac_start_tx_dma(priv, chan);
1733 }
1734 
1735 /**
1736  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1737  * @priv: driver private structure
1738  * Description:
1739  * This stops the RX and TX DMA channels
1740  */
1741 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1742 {
1743 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1744 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1745 	u32 chan = 0;
1746 
1747 	for (chan = 0; chan < rx_channels_count; chan++)
1748 		stmmac_stop_rx_dma(priv, chan);
1749 
1750 	for (chan = 0; chan < tx_channels_count; chan++)
1751 		stmmac_stop_tx_dma(priv, chan);
1752 }
1753 
1754 /**
1755  *  stmmac_dma_operation_mode - HW DMA operation mode
1756  *  @priv: driver private structure
1757  *  Description: it is used for configuring the DMA operation mode register in
1758  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1759  */
1760 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1761 {
1762 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1763 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1764 	int rxfifosz = priv->plat->rx_fifo_size;
1765 	int txfifosz = priv->plat->tx_fifo_size;
1766 	u32 txmode = 0;
1767 	u32 rxmode = 0;
1768 	u32 chan = 0;
1769 	u8 qmode = 0;
1770 
1771 	if (rxfifosz == 0)
1772 		rxfifosz = priv->dma_cap.rx_fifo_size;
1773 	if (txfifosz == 0)
1774 		txfifosz = priv->dma_cap.tx_fifo_size;
1775 
1776 	/* Adjust for real per queue fifo size */
1777 	rxfifosz /= rx_channels_count;
1778 	txfifosz /= tx_channels_count;
1779 
1780 	if (priv->plat->force_thresh_dma_mode) {
1781 		txmode = tc;
1782 		rxmode = tc;
1783 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1784 		/*
1785 		 * In case of GMAC, SF mode can be enabled
1786 		 * to perform the TX COE in HW. This depends on:
1787 		 * 1) TX COE if actually supported
1788 		 * 2) There is no bugged Jumbo frame support
1789 		 *    that needs to not insert csum in the TDES.
1790 		 */
1791 		txmode = SF_DMA_MODE;
1792 		rxmode = SF_DMA_MODE;
1793 		priv->xstats.threshold = SF_DMA_MODE;
1794 	} else {
1795 		txmode = tc;
1796 		rxmode = SF_DMA_MODE;
1797 	}
1798 
1799 	/* configure all channels */
1800 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1801 		for (chan = 0; chan < rx_channels_count; chan++) {
1802 			qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1803 
1804 			stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1805 					rxfifosz, qmode);
1806 		}
1807 
1808 		for (chan = 0; chan < tx_channels_count; chan++) {
1809 			qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1810 
1811 			stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1812 					txfifosz, qmode);
1813 		}
1814 	} else {
1815 		stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
1816 	}
1817 }
1818 
1819 /**
1820  * stmmac_tx_clean - to manage the transmission completion
1821  * @priv: driver private structure
1822  * @queue: TX queue index
1823  * Description: it reclaims the transmit resources after transmission completes.
1824  */
1825 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1826 {
1827 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1828 	unsigned int bytes_compl = 0, pkts_compl = 0;
1829 	unsigned int entry;
1830 
1831 	netif_tx_lock(priv->dev);
1832 
1833 	priv->xstats.tx_clean++;
1834 
1835 	entry = tx_q->dirty_tx;
1836 	while (entry != tx_q->cur_tx) {
1837 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1838 		struct dma_desc *p;
1839 		int status;
1840 
1841 		if (priv->extend_desc)
1842 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1843 		else
1844 			p = tx_q->dma_tx + entry;
1845 
1846 		status = stmmac_tx_status(priv, &priv->dev->stats,
1847 				&priv->xstats, p, priv->ioaddr);
1848 		/* Check if the descriptor is owned by the DMA */
1849 		if (unlikely(status & tx_dma_own))
1850 			break;
1851 
1852 		/* Make sure descriptor fields are read after reading
1853 		 * the own bit.
1854 		 */
1855 		dma_rmb();
1856 
1857 		/* Just consider the last segment and ...*/
1858 		if (likely(!(status & tx_not_ls))) {
1859 			/* ... verify the status error condition */
1860 			if (unlikely(status & tx_err)) {
1861 				priv->dev->stats.tx_errors++;
1862 			} else {
1863 				priv->dev->stats.tx_packets++;
1864 				priv->xstats.tx_pkt_n++;
1865 			}
1866 			stmmac_get_tx_hwtstamp(priv, p, skb);
1867 		}
1868 
1869 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1870 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1871 				dma_unmap_page(priv->device,
1872 					       tx_q->tx_skbuff_dma[entry].buf,
1873 					       tx_q->tx_skbuff_dma[entry].len,
1874 					       DMA_TO_DEVICE);
1875 			else
1876 				dma_unmap_single(priv->device,
1877 						 tx_q->tx_skbuff_dma[entry].buf,
1878 						 tx_q->tx_skbuff_dma[entry].len,
1879 						 DMA_TO_DEVICE);
1880 			tx_q->tx_skbuff_dma[entry].buf = 0;
1881 			tx_q->tx_skbuff_dma[entry].len = 0;
1882 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1883 		}
1884 
1885 		stmmac_clean_desc3(priv, tx_q, p);
1886 
1887 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1888 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1889 
1890 		if (likely(skb != NULL)) {
1891 			pkts_compl++;
1892 			bytes_compl += skb->len;
1893 			dev_consume_skb_any(skb);
1894 			tx_q->tx_skbuff[entry] = NULL;
1895 		}
1896 
1897 		stmmac_release_tx_desc(priv, p, priv->mode);
1898 
1899 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1900 	}
1901 	tx_q->dirty_tx = entry;
1902 
1903 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1904 				  pkts_compl, bytes_compl);
1905 
1906 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1907 								queue))) &&
1908 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1909 
1910 		netif_dbg(priv, tx_done, priv->dev,
1911 			  "%s: restart transmit\n", __func__);
1912 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1913 	}
1914 
1915 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1916 		stmmac_enable_eee_mode(priv);
1917 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1918 	}
1919 	netif_tx_unlock(priv->dev);
1920 }
1921 
1922 /**
1923  * stmmac_tx_err - to manage the tx error
1924  * @priv: driver private structure
1925  * @chan: channel index
1926  * Description: it cleans the descriptors and restarts the transmission
1927  * in case of transmission errors.
1928  */
1929 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1930 {
1931 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1932 	int i;
1933 
1934 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1935 
1936 	stmmac_stop_tx_dma(priv, chan);
1937 	dma_free_tx_skbufs(priv, chan);
1938 	for (i = 0; i < DMA_TX_SIZE; i++)
1939 		if (priv->extend_desc)
1940 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1941 					priv->mode, (i == DMA_TX_SIZE - 1));
1942 		else
1943 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1944 					priv->mode, (i == DMA_TX_SIZE - 1));
1945 	tx_q->dirty_tx = 0;
1946 	tx_q->cur_tx = 0;
1947 	tx_q->mss = 0;
1948 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1949 	stmmac_start_tx_dma(priv, chan);
1950 
1951 	priv->dev->stats.tx_errors++;
1952 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1953 }
1954 
1955 /**
1956  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1957  *  @priv: driver private structure
1958  *  @txmode: TX operating mode
1959  *  @rxmode: RX operating mode
1960  *  @chan: channel index
1961  *  Description: it is used for configuring of the DMA operation mode in
1962  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1963  *  mode.
1964  */
1965 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1966 					  u32 rxmode, u32 chan)
1967 {
1968 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1969 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1970 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1971 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1972 	int rxfifosz = priv->plat->rx_fifo_size;
1973 	int txfifosz = priv->plat->tx_fifo_size;
1974 
1975 	if (rxfifosz == 0)
1976 		rxfifosz = priv->dma_cap.rx_fifo_size;
1977 	if (txfifosz == 0)
1978 		txfifosz = priv->dma_cap.tx_fifo_size;
1979 
1980 	/* Adjust for real per queue fifo size */
1981 	rxfifosz /= rx_channels_count;
1982 	txfifosz /= tx_channels_count;
1983 
1984 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1985 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz,
1986 				rxqmode);
1987 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
1988 				txqmode);
1989 	} else {
1990 		stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
1991 	}
1992 }
1993 
1994 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1995 {
1996 	int ret = false;
1997 
1998 	/* Safety features are only available in cores >= 5.10 */
1999 	if (priv->synopsys_id < DWMAC_CORE_5_10)
2000 		return ret;
2001 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2002 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2003 	if (ret && (ret != -EINVAL)) {
2004 		stmmac_global_err(priv);
2005 		return true;
2006 	}
2007 
2008 	return false;
2009 }
2010 
2011 /**
2012  * stmmac_dma_interrupt - DMA ISR
2013  * @priv: driver private structure
2014  * Description: this is the DMA ISR. It is called by the main ISR.
2015  * It calls the dwmac dma routine and schedule poll method in case of some
2016  * work can be done.
2017  */
2018 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2019 {
2020 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2021 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2022 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2023 				tx_channel_count : rx_channel_count;
2024 	u32 chan;
2025 	bool poll_scheduled = false;
2026 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2027 
2028 	/* Make sure we never check beyond our status buffer. */
2029 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2030 		channels_to_check = ARRAY_SIZE(status);
2031 
2032 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2033 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2034 	 * stmmac_channel struct.
2035 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2036 	 * all tx queues rather than just a single tx queue.
2037 	 */
2038 	for (chan = 0; chan < channels_to_check; chan++)
2039 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2040 				&priv->xstats, chan);
2041 
2042 	for (chan = 0; chan < rx_channel_count; chan++) {
2043 		if (likely(status[chan] & handle_rx)) {
2044 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2045 
2046 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2047 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2048 				__napi_schedule(&rx_q->napi);
2049 				poll_scheduled = true;
2050 			}
2051 		}
2052 	}
2053 
2054 	/* If we scheduled poll, we already know that tx queues will be checked.
2055 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2056 	 * completed transmission, if so, call stmmac_poll (once).
2057 	 */
2058 	if (!poll_scheduled) {
2059 		for (chan = 0; chan < tx_channel_count; chan++) {
2060 			if (status[chan] & handle_tx) {
2061 				/* It doesn't matter what rx queue we choose
2062 				 * here. We use 0 since it always exists.
2063 				 */
2064 				struct stmmac_rx_queue *rx_q =
2065 					&priv->rx_queue[0];
2066 
2067 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2068 					stmmac_disable_dma_irq(priv,
2069 							priv->ioaddr, chan);
2070 					__napi_schedule(&rx_q->napi);
2071 				}
2072 				break;
2073 			}
2074 		}
2075 	}
2076 
2077 	for (chan = 0; chan < tx_channel_count; chan++) {
2078 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2079 			/* Try to bump up the dma threshold on this failure */
2080 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2081 			    (tc <= 256)) {
2082 				tc += 64;
2083 				if (priv->plat->force_thresh_dma_mode)
2084 					stmmac_set_dma_operation_mode(priv,
2085 								      tc,
2086 								      tc,
2087 								      chan);
2088 				else
2089 					stmmac_set_dma_operation_mode(priv,
2090 								    tc,
2091 								    SF_DMA_MODE,
2092 								    chan);
2093 				priv->xstats.threshold = tc;
2094 			}
2095 		} else if (unlikely(status[chan] == tx_hard_error)) {
2096 			stmmac_tx_err(priv, chan);
2097 		}
2098 	}
2099 }
2100 
2101 /**
2102  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2103  * @priv: driver private structure
2104  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2105  */
2106 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2107 {
2108 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2109 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2110 
2111 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2112 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2113 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2114 	} else {
2115 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2116 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2117 	}
2118 
2119 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2120 
2121 	if (priv->dma_cap.rmon) {
2122 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2123 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2124 	} else
2125 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2126 }
2127 
2128 /**
2129  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2130  * @priv: driver private structure
2131  * Description:
2132  *  new GMAC chip generations have a new register to indicate the
2133  *  presence of the optional feature/functions.
2134  *  This can be also used to override the value passed through the
2135  *  platform and necessary for old MAC10/100 and GMAC chips.
2136  */
2137 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2138 {
2139 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2140 }
2141 
2142 /**
2143  * stmmac_check_ether_addr - check if the MAC addr is valid
2144  * @priv: driver private structure
2145  * Description:
2146  * it is to verify if the MAC address is valid, in case of failures it
2147  * generates a random MAC address
2148  */
2149 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2150 {
2151 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2152 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2153 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2154 			eth_hw_addr_random(priv->dev);
2155 		netdev_info(priv->dev, "device MAC address %pM\n",
2156 			    priv->dev->dev_addr);
2157 	}
2158 }
2159 
2160 /**
2161  * stmmac_init_dma_engine - DMA init.
2162  * @priv: driver private structure
2163  * Description:
2164  * It inits the DMA invoking the specific MAC/GMAC callback.
2165  * Some DMA parameters can be passed from the platform;
2166  * in case of these are not passed a default is kept for the MAC or GMAC.
2167  */
2168 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2169 {
2170 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2171 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2172 	struct stmmac_rx_queue *rx_q;
2173 	struct stmmac_tx_queue *tx_q;
2174 	u32 dummy_dma_rx_phy = 0;
2175 	u32 dummy_dma_tx_phy = 0;
2176 	u32 chan = 0;
2177 	int atds = 0;
2178 	int ret = 0;
2179 
2180 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2181 		dev_err(priv->device, "Invalid DMA configuration\n");
2182 		return -EINVAL;
2183 	}
2184 
2185 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2186 		atds = 1;
2187 
2188 	ret = stmmac_reset(priv, priv->ioaddr);
2189 	if (ret) {
2190 		dev_err(priv->device, "Failed to reset the dma\n");
2191 		return ret;
2192 	}
2193 
2194 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2195 		/* DMA Configuration */
2196 		stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
2197 				dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2198 
2199 		/* DMA RX Channel Configuration */
2200 		for (chan = 0; chan < rx_channels_count; chan++) {
2201 			rx_q = &priv->rx_queue[chan];
2202 
2203 			stmmac_init_rx_chan(priv, priv->ioaddr,
2204 					priv->plat->dma_cfg, rx_q->dma_rx_phy,
2205 					chan);
2206 
2207 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2208 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2209 			stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2210 					rx_q->rx_tail_addr, chan);
2211 		}
2212 
2213 		/* DMA TX Channel Configuration */
2214 		for (chan = 0; chan < tx_channels_count; chan++) {
2215 			tx_q = &priv->tx_queue[chan];
2216 
2217 			stmmac_init_chan(priv, priv->ioaddr,
2218 					priv->plat->dma_cfg, chan);
2219 
2220 			stmmac_init_tx_chan(priv, priv->ioaddr,
2221 					priv->plat->dma_cfg, tx_q->dma_tx_phy,
2222 					chan);
2223 
2224 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2225 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2226 			stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2227 					tx_q->tx_tail_addr, chan);
2228 		}
2229 	} else {
2230 		rx_q = &priv->rx_queue[chan];
2231 		tx_q = &priv->tx_queue[chan];
2232 		stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
2233 				tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2234 	}
2235 
2236 	if (priv->plat->axi)
2237 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2238 
2239 	return ret;
2240 }
2241 
2242 /**
2243  * stmmac_tx_timer - mitigation sw timer for tx.
2244  * @data: data pointer
2245  * Description:
2246  * This is the timer handler to directly invoke the stmmac_tx_clean.
2247  */
2248 static void stmmac_tx_timer(struct timer_list *t)
2249 {
2250 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2251 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2252 	u32 queue;
2253 
2254 	/* let's scan all the tx queues */
2255 	for (queue = 0; queue < tx_queues_count; queue++)
2256 		stmmac_tx_clean(priv, queue);
2257 }
2258 
2259 /**
2260  * stmmac_init_tx_coalesce - init tx mitigation options.
2261  * @priv: driver private structure
2262  * Description:
2263  * This inits the transmit coalesce parameters: i.e. timer rate,
2264  * timer handler and default threshold used for enabling the
2265  * interrupt on completion bit.
2266  */
2267 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2268 {
2269 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2270 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2271 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2272 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2273 	add_timer(&priv->txtimer);
2274 }
2275 
2276 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2277 {
2278 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2279 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2280 	u32 chan;
2281 
2282 	/* set TX ring length */
2283 	for (chan = 0; chan < tx_channels_count; chan++)
2284 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2285 				(DMA_TX_SIZE - 1), chan);
2286 
2287 	/* set RX ring length */
2288 	for (chan = 0; chan < rx_channels_count; chan++)
2289 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2290 				(DMA_RX_SIZE - 1), chan);
2291 }
2292 
2293 /**
2294  *  stmmac_set_tx_queue_weight - Set TX queue weight
2295  *  @priv: driver private structure
2296  *  Description: It is used for setting TX queues weight
2297  */
2298 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2299 {
2300 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2301 	u32 weight;
2302 	u32 queue;
2303 
2304 	for (queue = 0; queue < tx_queues_count; queue++) {
2305 		weight = priv->plat->tx_queues_cfg[queue].weight;
2306 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2307 	}
2308 }
2309 
2310 /**
2311  *  stmmac_configure_cbs - Configure CBS in TX queue
2312  *  @priv: driver private structure
2313  *  Description: It is used for configuring CBS in AVB TX queues
2314  */
2315 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2316 {
2317 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2318 	u32 mode_to_use;
2319 	u32 queue;
2320 
2321 	/* queue 0 is reserved for legacy traffic */
2322 	for (queue = 1; queue < tx_queues_count; queue++) {
2323 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2324 		if (mode_to_use == MTL_QUEUE_DCB)
2325 			continue;
2326 
2327 		stmmac_config_cbs(priv, priv->hw,
2328 				priv->plat->tx_queues_cfg[queue].send_slope,
2329 				priv->plat->tx_queues_cfg[queue].idle_slope,
2330 				priv->plat->tx_queues_cfg[queue].high_credit,
2331 				priv->plat->tx_queues_cfg[queue].low_credit,
2332 				queue);
2333 	}
2334 }
2335 
2336 /**
2337  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2338  *  @priv: driver private structure
2339  *  Description: It is used for mapping RX queues to RX dma channels
2340  */
2341 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2342 {
2343 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2344 	u32 queue;
2345 	u32 chan;
2346 
2347 	for (queue = 0; queue < rx_queues_count; queue++) {
2348 		chan = priv->plat->rx_queues_cfg[queue].chan;
2349 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2350 	}
2351 }
2352 
2353 /**
2354  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2355  *  @priv: driver private structure
2356  *  Description: It is used for configuring the RX Queue Priority
2357  */
2358 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2359 {
2360 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2361 	u32 queue;
2362 	u32 prio;
2363 
2364 	for (queue = 0; queue < rx_queues_count; queue++) {
2365 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2366 			continue;
2367 
2368 		prio = priv->plat->rx_queues_cfg[queue].prio;
2369 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2370 	}
2371 }
2372 
2373 /**
2374  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2375  *  @priv: driver private structure
2376  *  Description: It is used for configuring the TX Queue Priority
2377  */
2378 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2379 {
2380 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2381 	u32 queue;
2382 	u32 prio;
2383 
2384 	for (queue = 0; queue < tx_queues_count; queue++) {
2385 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2386 			continue;
2387 
2388 		prio = priv->plat->tx_queues_cfg[queue].prio;
2389 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2390 	}
2391 }
2392 
2393 /**
2394  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2395  *  @priv: driver private structure
2396  *  Description: It is used for configuring the RX queue routing
2397  */
2398 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2399 {
2400 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2401 	u32 queue;
2402 	u8 packet;
2403 
2404 	for (queue = 0; queue < rx_queues_count; queue++) {
2405 		/* no specific packet type routing specified for the queue */
2406 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2407 			continue;
2408 
2409 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2410 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2411 	}
2412 }
2413 
2414 /**
2415  *  stmmac_mtl_configuration - Configure MTL
2416  *  @priv: driver private structure
2417  *  Description: It is used for configurring MTL
2418  */
2419 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2420 {
2421 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2422 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2423 
2424 	if (tx_queues_count > 1)
2425 		stmmac_set_tx_queue_weight(priv);
2426 
2427 	/* Configure MTL RX algorithms */
2428 	if (rx_queues_count > 1)
2429 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2430 				priv->plat->rx_sched_algorithm);
2431 
2432 	/* Configure MTL TX algorithms */
2433 	if (tx_queues_count > 1)
2434 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2435 				priv->plat->tx_sched_algorithm);
2436 
2437 	/* Configure CBS in AVB TX queues */
2438 	if (tx_queues_count > 1)
2439 		stmmac_configure_cbs(priv);
2440 
2441 	/* Map RX MTL to DMA channels */
2442 	stmmac_rx_queue_dma_chan_map(priv);
2443 
2444 	/* Enable MAC RX Queues */
2445 	stmmac_mac_enable_rx_queues(priv);
2446 
2447 	/* Set RX priorities */
2448 	if (rx_queues_count > 1)
2449 		stmmac_mac_config_rx_queues_prio(priv);
2450 
2451 	/* Set TX priorities */
2452 	if (tx_queues_count > 1)
2453 		stmmac_mac_config_tx_queues_prio(priv);
2454 
2455 	/* Set RX routing */
2456 	if (rx_queues_count > 1)
2457 		stmmac_mac_config_rx_queues_routing(priv);
2458 }
2459 
2460 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2461 {
2462 	if (priv->dma_cap.asp) {
2463 		netdev_info(priv->dev, "Enabling Safety Features\n");
2464 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2465 	} else {
2466 		netdev_info(priv->dev, "No Safety Features support found\n");
2467 	}
2468 }
2469 
2470 /**
2471  * stmmac_hw_setup - setup mac in a usable state.
2472  *  @dev : pointer to the device structure.
2473  *  Description:
2474  *  this is the main function to setup the HW in a usable state because the
2475  *  dma engine is reset, the core registers are configured (e.g. AXI,
2476  *  Checksum features, timers). The DMA is ready to start receiving and
2477  *  transmitting.
2478  *  Return value:
2479  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2480  *  file on failure.
2481  */
2482 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2483 {
2484 	struct stmmac_priv *priv = netdev_priv(dev);
2485 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2486 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2487 	u32 chan;
2488 	int ret;
2489 
2490 	/* DMA initialization and SW reset */
2491 	ret = stmmac_init_dma_engine(priv);
2492 	if (ret < 0) {
2493 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2494 			   __func__);
2495 		return ret;
2496 	}
2497 
2498 	/* Copy the MAC addr into the HW  */
2499 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2500 
2501 	/* PS and related bits will be programmed according to the speed */
2502 	if (priv->hw->pcs) {
2503 		int speed = priv->plat->mac_port_sel_speed;
2504 
2505 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2506 		    (speed == SPEED_1000)) {
2507 			priv->hw->ps = speed;
2508 		} else {
2509 			dev_warn(priv->device, "invalid port speed\n");
2510 			priv->hw->ps = 0;
2511 		}
2512 	}
2513 
2514 	/* Initialize the MAC Core */
2515 	stmmac_core_init(priv, priv->hw, dev);
2516 
2517 	/* Initialize MTL*/
2518 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2519 		stmmac_mtl_configuration(priv);
2520 
2521 	/* Initialize Safety Features */
2522 	if (priv->synopsys_id >= DWMAC_CORE_5_10)
2523 		stmmac_safety_feat_configuration(priv);
2524 
2525 	ret = stmmac_rx_ipc(priv, priv->hw);
2526 	if (!ret) {
2527 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2528 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2529 		priv->hw->rx_csum = 0;
2530 	}
2531 
2532 	/* Enable the MAC Rx/Tx */
2533 	stmmac_mac_set(priv, priv->ioaddr, true);
2534 
2535 	/* Set the HW DMA mode and the COE */
2536 	stmmac_dma_operation_mode(priv);
2537 
2538 	stmmac_mmc_setup(priv);
2539 
2540 	if (init_ptp) {
2541 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2542 		if (ret < 0)
2543 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2544 
2545 		ret = stmmac_init_ptp(priv);
2546 		if (ret == -EOPNOTSUPP)
2547 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2548 		else if (ret)
2549 			netdev_warn(priv->dev, "PTP init failed\n");
2550 	}
2551 
2552 #ifdef CONFIG_DEBUG_FS
2553 	ret = stmmac_init_fs(dev);
2554 	if (ret < 0)
2555 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2556 			    __func__);
2557 #endif
2558 	/* Start the ball rolling... */
2559 	stmmac_start_all_dma(priv);
2560 
2561 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2562 
2563 	if (priv->use_riwt) {
2564 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2565 		if (!ret)
2566 			priv->rx_riwt = MAX_DMA_RIWT;
2567 	}
2568 
2569 	if (priv->hw->pcs)
2570 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2571 
2572 	/* set TX and RX rings length */
2573 	stmmac_set_rings_length(priv);
2574 
2575 	/* Enable TSO */
2576 	if (priv->tso) {
2577 		for (chan = 0; chan < tx_cnt; chan++)
2578 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2579 	}
2580 
2581 	return 0;
2582 }
2583 
2584 static void stmmac_hw_teardown(struct net_device *dev)
2585 {
2586 	struct stmmac_priv *priv = netdev_priv(dev);
2587 
2588 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2589 }
2590 
2591 /**
2592  *  stmmac_open - open entry point of the driver
2593  *  @dev : pointer to the device structure.
2594  *  Description:
2595  *  This function is the open entry point of the driver.
2596  *  Return value:
2597  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2598  *  file on failure.
2599  */
2600 static int stmmac_open(struct net_device *dev)
2601 {
2602 	struct stmmac_priv *priv = netdev_priv(dev);
2603 	int ret;
2604 
2605 	stmmac_check_ether_addr(priv);
2606 
2607 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2608 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2609 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2610 		ret = stmmac_init_phy(dev);
2611 		if (ret) {
2612 			netdev_err(priv->dev,
2613 				   "%s: Cannot attach to PHY (error: %d)\n",
2614 				   __func__, ret);
2615 			return ret;
2616 		}
2617 	}
2618 
2619 	/* Extra statistics */
2620 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2621 	priv->xstats.threshold = tc;
2622 
2623 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2624 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2625 
2626 	ret = alloc_dma_desc_resources(priv);
2627 	if (ret < 0) {
2628 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2629 			   __func__);
2630 		goto dma_desc_error;
2631 	}
2632 
2633 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2634 	if (ret < 0) {
2635 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2636 			   __func__);
2637 		goto init_error;
2638 	}
2639 
2640 	ret = stmmac_hw_setup(dev, true);
2641 	if (ret < 0) {
2642 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2643 		goto init_error;
2644 	}
2645 
2646 	stmmac_init_tx_coalesce(priv);
2647 
2648 	if (dev->phydev)
2649 		phy_start(dev->phydev);
2650 
2651 	/* Request the IRQ lines */
2652 	ret = request_irq(dev->irq, stmmac_interrupt,
2653 			  IRQF_SHARED, dev->name, dev);
2654 	if (unlikely(ret < 0)) {
2655 		netdev_err(priv->dev,
2656 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2657 			   __func__, dev->irq, ret);
2658 		goto irq_error;
2659 	}
2660 
2661 	/* Request the Wake IRQ in case of another line is used for WoL */
2662 	if (priv->wol_irq != dev->irq) {
2663 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2664 				  IRQF_SHARED, dev->name, dev);
2665 		if (unlikely(ret < 0)) {
2666 			netdev_err(priv->dev,
2667 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2668 				   __func__, priv->wol_irq, ret);
2669 			goto wolirq_error;
2670 		}
2671 	}
2672 
2673 	/* Request the IRQ lines */
2674 	if (priv->lpi_irq > 0) {
2675 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2676 				  dev->name, dev);
2677 		if (unlikely(ret < 0)) {
2678 			netdev_err(priv->dev,
2679 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2680 				   __func__, priv->lpi_irq, ret);
2681 			goto lpiirq_error;
2682 		}
2683 	}
2684 
2685 	stmmac_enable_all_queues(priv);
2686 	stmmac_start_all_queues(priv);
2687 
2688 	return 0;
2689 
2690 lpiirq_error:
2691 	if (priv->wol_irq != dev->irq)
2692 		free_irq(priv->wol_irq, dev);
2693 wolirq_error:
2694 	free_irq(dev->irq, dev);
2695 irq_error:
2696 	if (dev->phydev)
2697 		phy_stop(dev->phydev);
2698 
2699 	del_timer_sync(&priv->txtimer);
2700 	stmmac_hw_teardown(dev);
2701 init_error:
2702 	free_dma_desc_resources(priv);
2703 dma_desc_error:
2704 	if (dev->phydev)
2705 		phy_disconnect(dev->phydev);
2706 
2707 	return ret;
2708 }
2709 
2710 /**
2711  *  stmmac_release - close entry point of the driver
2712  *  @dev : device pointer.
2713  *  Description:
2714  *  This is the stop entry point of the driver.
2715  */
2716 static int stmmac_release(struct net_device *dev)
2717 {
2718 	struct stmmac_priv *priv = netdev_priv(dev);
2719 
2720 	if (priv->eee_enabled)
2721 		del_timer_sync(&priv->eee_ctrl_timer);
2722 
2723 	/* Stop and disconnect the PHY */
2724 	if (dev->phydev) {
2725 		phy_stop(dev->phydev);
2726 		phy_disconnect(dev->phydev);
2727 	}
2728 
2729 	stmmac_stop_all_queues(priv);
2730 
2731 	stmmac_disable_all_queues(priv);
2732 
2733 	del_timer_sync(&priv->txtimer);
2734 
2735 	/* Free the IRQ lines */
2736 	free_irq(dev->irq, dev);
2737 	if (priv->wol_irq != dev->irq)
2738 		free_irq(priv->wol_irq, dev);
2739 	if (priv->lpi_irq > 0)
2740 		free_irq(priv->lpi_irq, dev);
2741 
2742 	/* Stop TX/RX DMA and clear the descriptors */
2743 	stmmac_stop_all_dma(priv);
2744 
2745 	/* Release and free the Rx/Tx resources */
2746 	free_dma_desc_resources(priv);
2747 
2748 	/* Disable the MAC Rx/Tx */
2749 	stmmac_mac_set(priv, priv->ioaddr, false);
2750 
2751 	netif_carrier_off(dev);
2752 
2753 #ifdef CONFIG_DEBUG_FS
2754 	stmmac_exit_fs(dev);
2755 #endif
2756 
2757 	stmmac_release_ptp(priv);
2758 
2759 	return 0;
2760 }
2761 
2762 /**
2763  *  stmmac_tso_allocator - close entry point of the driver
2764  *  @priv: driver private structure
2765  *  @des: buffer start address
2766  *  @total_len: total length to fill in descriptors
2767  *  @last_segmant: condition for the last descriptor
2768  *  @queue: TX queue index
2769  *  Description:
2770  *  This function fills descriptor and request new descriptors according to
2771  *  buffer length to fill
2772  */
2773 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2774 				 int total_len, bool last_segment, u32 queue)
2775 {
2776 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2777 	struct dma_desc *desc;
2778 	u32 buff_size;
2779 	int tmp_len;
2780 
2781 	tmp_len = total_len;
2782 
2783 	while (tmp_len > 0) {
2784 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2785 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2786 		desc = tx_q->dma_tx + tx_q->cur_tx;
2787 
2788 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2789 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2790 			    TSO_MAX_BUFF_SIZE : tmp_len;
2791 
2792 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2793 				0, 1,
2794 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2795 				0, 0);
2796 
2797 		tmp_len -= TSO_MAX_BUFF_SIZE;
2798 	}
2799 }
2800 
2801 /**
2802  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2803  *  @skb : the socket buffer
2804  *  @dev : device pointer
2805  *  Description: this is the transmit function that is called on TSO frames
2806  *  (support available on GMAC4 and newer chips).
2807  *  Diagram below show the ring programming in case of TSO frames:
2808  *
2809  *  First Descriptor
2810  *   --------
2811  *   | DES0 |---> buffer1 = L2/L3/L4 header
2812  *   | DES1 |---> TCP Payload (can continue on next descr...)
2813  *   | DES2 |---> buffer 1 and 2 len
2814  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2815  *   --------
2816  *	|
2817  *     ...
2818  *	|
2819  *   --------
2820  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2821  *   | DES1 | --|
2822  *   | DES2 | --> buffer 1 and 2 len
2823  *   | DES3 |
2824  *   --------
2825  *
2826  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2827  */
2828 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2829 {
2830 	struct dma_desc *desc, *first, *mss_desc = NULL;
2831 	struct stmmac_priv *priv = netdev_priv(dev);
2832 	int nfrags = skb_shinfo(skb)->nr_frags;
2833 	u32 queue = skb_get_queue_mapping(skb);
2834 	unsigned int first_entry, des;
2835 	struct stmmac_tx_queue *tx_q;
2836 	int tmp_pay_len = 0;
2837 	u32 pay_len, mss;
2838 	u8 proto_hdr_len;
2839 	int i;
2840 
2841 	tx_q = &priv->tx_queue[queue];
2842 
2843 	/* Compute header lengths */
2844 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2845 
2846 	/* Desc availability based on threshold should be enough safe */
2847 	if (unlikely(stmmac_tx_avail(priv, queue) <
2848 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2849 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2850 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2851 								queue));
2852 			/* This is a hard error, log it. */
2853 			netdev_err(priv->dev,
2854 				   "%s: Tx Ring full when queue awake\n",
2855 				   __func__);
2856 		}
2857 		return NETDEV_TX_BUSY;
2858 	}
2859 
2860 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2861 
2862 	mss = skb_shinfo(skb)->gso_size;
2863 
2864 	/* set new MSS value if needed */
2865 	if (mss != tx_q->mss) {
2866 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2867 		stmmac_set_mss(priv, mss_desc, mss);
2868 		tx_q->mss = mss;
2869 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2870 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2871 	}
2872 
2873 	if (netif_msg_tx_queued(priv)) {
2874 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2875 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2876 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2877 			skb->data_len);
2878 	}
2879 
2880 	first_entry = tx_q->cur_tx;
2881 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2882 
2883 	desc = tx_q->dma_tx + first_entry;
2884 	first = desc;
2885 
2886 	/* first descriptor: fill Headers on Buf1 */
2887 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2888 			     DMA_TO_DEVICE);
2889 	if (dma_mapping_error(priv->device, des))
2890 		goto dma_map_err;
2891 
2892 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2893 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2894 
2895 	first->des0 = cpu_to_le32(des);
2896 
2897 	/* Fill start of payload in buff2 of first descriptor */
2898 	if (pay_len)
2899 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2900 
2901 	/* If needed take extra descriptors to fill the remaining payload */
2902 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2903 
2904 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2905 
2906 	/* Prepare fragments */
2907 	for (i = 0; i < nfrags; i++) {
2908 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2909 
2910 		des = skb_frag_dma_map(priv->device, frag, 0,
2911 				       skb_frag_size(frag),
2912 				       DMA_TO_DEVICE);
2913 		if (dma_mapping_error(priv->device, des))
2914 			goto dma_map_err;
2915 
2916 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2917 				     (i == nfrags - 1), queue);
2918 
2919 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2920 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2921 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2922 	}
2923 
2924 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2925 
2926 	/* Only the last descriptor gets to point to the skb. */
2927 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2928 
2929 	/* We've used all descriptors we need for this skb, however,
2930 	 * advance cur_tx so that it references a fresh descriptor.
2931 	 * ndo_start_xmit will fill this descriptor the next time it's
2932 	 * called and stmmac_tx_clean may clean up to this descriptor.
2933 	 */
2934 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2935 
2936 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2937 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2938 			  __func__);
2939 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2940 	}
2941 
2942 	dev->stats.tx_bytes += skb->len;
2943 	priv->xstats.tx_tso_frames++;
2944 	priv->xstats.tx_tso_nfrags += nfrags;
2945 
2946 	/* Manage tx mitigation */
2947 	priv->tx_count_frames += nfrags + 1;
2948 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2949 		mod_timer(&priv->txtimer,
2950 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2951 	} else {
2952 		priv->tx_count_frames = 0;
2953 		stmmac_set_tx_ic(priv, desc);
2954 		priv->xstats.tx_set_ic_bit++;
2955 	}
2956 
2957 	skb_tx_timestamp(skb);
2958 
2959 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2960 		     priv->hwts_tx_en)) {
2961 		/* declare that device is doing timestamping */
2962 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2963 		stmmac_enable_tx_timestamp(priv, first);
2964 	}
2965 
2966 	/* Complete the first descriptor before granting the DMA */
2967 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2968 			proto_hdr_len,
2969 			pay_len,
2970 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2971 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2972 
2973 	/* If context desc is used to change MSS */
2974 	if (mss_desc) {
2975 		/* Make sure that first descriptor has been completely
2976 		 * written, including its own bit. This is because MSS is
2977 		 * actually before first descriptor, so we need to make
2978 		 * sure that MSS's own bit is the last thing written.
2979 		 */
2980 		dma_wmb();
2981 		stmmac_set_tx_owner(priv, mss_desc);
2982 	}
2983 
2984 	/* The own bit must be the latest setting done when prepare the
2985 	 * descriptor and then barrier is needed to make sure that
2986 	 * all is coherent before granting the DMA engine.
2987 	 */
2988 	wmb();
2989 
2990 	if (netif_msg_pktdata(priv)) {
2991 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2992 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2993 			tx_q->cur_tx, first, nfrags);
2994 
2995 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2996 
2997 		pr_info(">>> frame to be transmitted: ");
2998 		print_pkt(skb->data, skb_headlen(skb));
2999 	}
3000 
3001 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3002 
3003 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3004 
3005 	return NETDEV_TX_OK;
3006 
3007 dma_map_err:
3008 	dev_err(priv->device, "Tx dma map failed\n");
3009 	dev_kfree_skb(skb);
3010 	priv->dev->stats.tx_dropped++;
3011 	return NETDEV_TX_OK;
3012 }
3013 
3014 /**
3015  *  stmmac_xmit - Tx entry point of the driver
3016  *  @skb : the socket buffer
3017  *  @dev : device pointer
3018  *  Description : this is the tx entry point of the driver.
3019  *  It programs the chain or the ring and supports oversized frames
3020  *  and SG feature.
3021  */
3022 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3023 {
3024 	struct stmmac_priv *priv = netdev_priv(dev);
3025 	unsigned int nopaged_len = skb_headlen(skb);
3026 	int i, csum_insertion = 0, is_jumbo = 0;
3027 	u32 queue = skb_get_queue_mapping(skb);
3028 	int nfrags = skb_shinfo(skb)->nr_frags;
3029 	int entry;
3030 	unsigned int first_entry;
3031 	struct dma_desc *desc, *first;
3032 	struct stmmac_tx_queue *tx_q;
3033 	unsigned int enh_desc;
3034 	unsigned int des;
3035 
3036 	tx_q = &priv->tx_queue[queue];
3037 
3038 	/* Manage oversized TCP frames for GMAC4 device */
3039 	if (skb_is_gso(skb) && priv->tso) {
3040 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3041 			return stmmac_tso_xmit(skb, dev);
3042 	}
3043 
3044 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3045 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3046 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3047 								queue));
3048 			/* This is a hard error, log it. */
3049 			netdev_err(priv->dev,
3050 				   "%s: Tx Ring full when queue awake\n",
3051 				   __func__);
3052 		}
3053 		return NETDEV_TX_BUSY;
3054 	}
3055 
3056 	if (priv->tx_path_in_lpi_mode)
3057 		stmmac_disable_eee_mode(priv);
3058 
3059 	entry = tx_q->cur_tx;
3060 	first_entry = entry;
3061 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3062 
3063 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3064 
3065 	if (likely(priv->extend_desc))
3066 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3067 	else
3068 		desc = tx_q->dma_tx + entry;
3069 
3070 	first = desc;
3071 
3072 	enh_desc = priv->plat->enh_desc;
3073 	/* To program the descriptors according to the size of the frame */
3074 	if (enh_desc)
3075 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3076 
3077 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3078 					 DWMAC_CORE_4_00)) {
3079 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3080 		if (unlikely(entry < 0))
3081 			goto dma_map_err;
3082 	}
3083 
3084 	for (i = 0; i < nfrags; i++) {
3085 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3086 		int len = skb_frag_size(frag);
3087 		bool last_segment = (i == (nfrags - 1));
3088 
3089 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3090 		WARN_ON(tx_q->tx_skbuff[entry]);
3091 
3092 		if (likely(priv->extend_desc))
3093 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3094 		else
3095 			desc = tx_q->dma_tx + entry;
3096 
3097 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3098 				       DMA_TO_DEVICE);
3099 		if (dma_mapping_error(priv->device, des))
3100 			goto dma_map_err; /* should reuse desc w/o issues */
3101 
3102 		tx_q->tx_skbuff_dma[entry].buf = des;
3103 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3104 			desc->des0 = cpu_to_le32(des);
3105 		else
3106 			desc->des2 = cpu_to_le32(des);
3107 
3108 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3109 		tx_q->tx_skbuff_dma[entry].len = len;
3110 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3111 
3112 		/* Prepare the descriptor and set the own bit too */
3113 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3114 				priv->mode, 1, last_segment, skb->len);
3115 	}
3116 
3117 	/* Only the last descriptor gets to point to the skb. */
3118 	tx_q->tx_skbuff[entry] = skb;
3119 
3120 	/* We've used all descriptors we need for this skb, however,
3121 	 * advance cur_tx so that it references a fresh descriptor.
3122 	 * ndo_start_xmit will fill this descriptor the next time it's
3123 	 * called and stmmac_tx_clean may clean up to this descriptor.
3124 	 */
3125 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3126 	tx_q->cur_tx = entry;
3127 
3128 	if (netif_msg_pktdata(priv)) {
3129 		void *tx_head;
3130 
3131 		netdev_dbg(priv->dev,
3132 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3133 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3134 			   entry, first, nfrags);
3135 
3136 		if (priv->extend_desc)
3137 			tx_head = (void *)tx_q->dma_etx;
3138 		else
3139 			tx_head = (void *)tx_q->dma_tx;
3140 
3141 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3142 
3143 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3144 		print_pkt(skb->data, skb->len);
3145 	}
3146 
3147 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3148 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3149 			  __func__);
3150 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3151 	}
3152 
3153 	dev->stats.tx_bytes += skb->len;
3154 
3155 	/* According to the coalesce parameter the IC bit for the latest
3156 	 * segment is reset and the timer re-started to clean the tx status.
3157 	 * This approach takes care about the fragments: desc is the first
3158 	 * element in case of no SG.
3159 	 */
3160 	priv->tx_count_frames += nfrags + 1;
3161 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3162 		mod_timer(&priv->txtimer,
3163 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3164 	} else {
3165 		priv->tx_count_frames = 0;
3166 		stmmac_set_tx_ic(priv, desc);
3167 		priv->xstats.tx_set_ic_bit++;
3168 	}
3169 
3170 	skb_tx_timestamp(skb);
3171 
3172 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3173 	 * problems because all the descriptors are actually ready to be
3174 	 * passed to the DMA engine.
3175 	 */
3176 	if (likely(!is_jumbo)) {
3177 		bool last_segment = (nfrags == 0);
3178 
3179 		des = dma_map_single(priv->device, skb->data,
3180 				     nopaged_len, DMA_TO_DEVICE);
3181 		if (dma_mapping_error(priv->device, des))
3182 			goto dma_map_err;
3183 
3184 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3185 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3186 			first->des0 = cpu_to_le32(des);
3187 		else
3188 			first->des2 = cpu_to_le32(des);
3189 
3190 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3191 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3192 
3193 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3194 			     priv->hwts_tx_en)) {
3195 			/* declare that device is doing timestamping */
3196 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3197 			stmmac_enable_tx_timestamp(priv, first);
3198 		}
3199 
3200 		/* Prepare the first descriptor setting the OWN bit too */
3201 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3202 				csum_insertion, priv->mode, 1, last_segment,
3203 				skb->len);
3204 
3205 		/* The own bit must be the latest setting done when prepare the
3206 		 * descriptor and then barrier is needed to make sure that
3207 		 * all is coherent before granting the DMA engine.
3208 		 */
3209 		wmb();
3210 	}
3211 
3212 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3213 
3214 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3215 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
3216 	else
3217 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3218 				queue);
3219 
3220 	return NETDEV_TX_OK;
3221 
3222 dma_map_err:
3223 	netdev_err(priv->dev, "Tx DMA map failed\n");
3224 	dev_kfree_skb(skb);
3225 	priv->dev->stats.tx_dropped++;
3226 	return NETDEV_TX_OK;
3227 }
3228 
3229 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3230 {
3231 	struct ethhdr *ehdr;
3232 	u16 vlanid;
3233 
3234 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3235 	    NETIF_F_HW_VLAN_CTAG_RX &&
3236 	    !__vlan_get_tag(skb, &vlanid)) {
3237 		/* pop the vlan tag */
3238 		ehdr = (struct ethhdr *)skb->data;
3239 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3240 		skb_pull(skb, VLAN_HLEN);
3241 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3242 	}
3243 }
3244 
3245 
3246 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3247 {
3248 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3249 		return 0;
3250 
3251 	return 1;
3252 }
3253 
3254 /**
3255  * stmmac_rx_refill - refill used skb preallocated buffers
3256  * @priv: driver private structure
3257  * @queue: RX queue index
3258  * Description : this is to reallocate the skb for the reception process
3259  * that is based on zero-copy.
3260  */
3261 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3262 {
3263 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3264 	int dirty = stmmac_rx_dirty(priv, queue);
3265 	unsigned int entry = rx_q->dirty_rx;
3266 
3267 	int bfsize = priv->dma_buf_sz;
3268 
3269 	while (dirty-- > 0) {
3270 		struct dma_desc *p;
3271 
3272 		if (priv->extend_desc)
3273 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3274 		else
3275 			p = rx_q->dma_rx + entry;
3276 
3277 		if (likely(!rx_q->rx_skbuff[entry])) {
3278 			struct sk_buff *skb;
3279 
3280 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3281 			if (unlikely(!skb)) {
3282 				/* so for a while no zero-copy! */
3283 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3284 				if (unlikely(net_ratelimit()))
3285 					dev_err(priv->device,
3286 						"fail to alloc skb entry %d\n",
3287 						entry);
3288 				break;
3289 			}
3290 
3291 			rx_q->rx_skbuff[entry] = skb;
3292 			rx_q->rx_skbuff_dma[entry] =
3293 			    dma_map_single(priv->device, skb->data, bfsize,
3294 					   DMA_FROM_DEVICE);
3295 			if (dma_mapping_error(priv->device,
3296 					      rx_q->rx_skbuff_dma[entry])) {
3297 				netdev_err(priv->dev, "Rx DMA map failed\n");
3298 				dev_kfree_skb(skb);
3299 				break;
3300 			}
3301 
3302 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3303 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3304 				p->des1 = 0;
3305 			} else {
3306 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3307 			}
3308 
3309 			stmmac_refill_desc3(priv, rx_q, p);
3310 
3311 			if (rx_q->rx_zeroc_thresh > 0)
3312 				rx_q->rx_zeroc_thresh--;
3313 
3314 			netif_dbg(priv, rx_status, priv->dev,
3315 				  "refill entry #%d\n", entry);
3316 		}
3317 		dma_wmb();
3318 
3319 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3320 			stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
3321 		else
3322 			stmmac_set_rx_owner(priv, p);
3323 
3324 		dma_wmb();
3325 
3326 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3327 	}
3328 	rx_q->dirty_rx = entry;
3329 }
3330 
3331 /**
3332  * stmmac_rx - manage the receive process
3333  * @priv: driver private structure
3334  * @limit: napi bugget
3335  * @queue: RX queue index.
3336  * Description :  this the function called by the napi poll method.
3337  * It gets all the frames inside the ring.
3338  */
3339 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3340 {
3341 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3342 	unsigned int entry = rx_q->cur_rx;
3343 	int coe = priv->hw->rx_csum;
3344 	unsigned int next_entry;
3345 	unsigned int count = 0;
3346 
3347 	if (netif_msg_rx_status(priv)) {
3348 		void *rx_head;
3349 
3350 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3351 		if (priv->extend_desc)
3352 			rx_head = (void *)rx_q->dma_erx;
3353 		else
3354 			rx_head = (void *)rx_q->dma_rx;
3355 
3356 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3357 	}
3358 	while (count < limit) {
3359 		int status;
3360 		struct dma_desc *p;
3361 		struct dma_desc *np;
3362 
3363 		if (priv->extend_desc)
3364 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3365 		else
3366 			p = rx_q->dma_rx + entry;
3367 
3368 		/* read the status of the incoming frame */
3369 		status = stmmac_rx_status(priv, &priv->dev->stats,
3370 				&priv->xstats, p);
3371 		/* check if managed by the DMA otherwise go ahead */
3372 		if (unlikely(status & dma_own))
3373 			break;
3374 
3375 		count++;
3376 
3377 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3378 		next_entry = rx_q->cur_rx;
3379 
3380 		if (priv->extend_desc)
3381 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3382 		else
3383 			np = rx_q->dma_rx + next_entry;
3384 
3385 		prefetch(np);
3386 
3387 		if (priv->extend_desc)
3388 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3389 					&priv->xstats, rx_q->dma_erx + entry);
3390 		if (unlikely(status == discard_frame)) {
3391 			priv->dev->stats.rx_errors++;
3392 			if (priv->hwts_rx_en && !priv->extend_desc) {
3393 				/* DESC2 & DESC3 will be overwritten by device
3394 				 * with timestamp value, hence reinitialize
3395 				 * them in stmmac_rx_refill() function so that
3396 				 * device can reuse it.
3397 				 */
3398 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3399 				rx_q->rx_skbuff[entry] = NULL;
3400 				dma_unmap_single(priv->device,
3401 						 rx_q->rx_skbuff_dma[entry],
3402 						 priv->dma_buf_sz,
3403 						 DMA_FROM_DEVICE);
3404 			}
3405 		} else {
3406 			struct sk_buff *skb;
3407 			int frame_len;
3408 			unsigned int des;
3409 
3410 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3411 				des = le32_to_cpu(p->des0);
3412 			else
3413 				des = le32_to_cpu(p->des2);
3414 
3415 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3416 
3417 			/*  If frame length is greater than skb buffer size
3418 			 *  (preallocated during init) then the packet is
3419 			 *  ignored
3420 			 */
3421 			if (frame_len > priv->dma_buf_sz) {
3422 				netdev_err(priv->dev,
3423 					   "len %d larger than size (%d)\n",
3424 					   frame_len, priv->dma_buf_sz);
3425 				priv->dev->stats.rx_length_errors++;
3426 				break;
3427 			}
3428 
3429 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3430 			 * Type frames (LLC/LLC-SNAP)
3431 			 *
3432 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3433 			 * feature is always disabled and packets need to be
3434 			 * stripped manually.
3435 			 */
3436 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3437 			    unlikely(status != llc_snap))
3438 				frame_len -= ETH_FCS_LEN;
3439 
3440 			if (netif_msg_rx_status(priv)) {
3441 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3442 					   p, entry, des);
3443 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3444 					   frame_len, status);
3445 			}
3446 
3447 			/* The zero-copy is always used for all the sizes
3448 			 * in case of GMAC4 because it needs
3449 			 * to refill the used descriptors, always.
3450 			 */
3451 			if (unlikely(!priv->plat->has_gmac4 &&
3452 				     ((frame_len < priv->rx_copybreak) ||
3453 				     stmmac_rx_threshold_count(rx_q)))) {
3454 				skb = netdev_alloc_skb_ip_align(priv->dev,
3455 								frame_len);
3456 				if (unlikely(!skb)) {
3457 					if (net_ratelimit())
3458 						dev_warn(priv->device,
3459 							 "packet dropped\n");
3460 					priv->dev->stats.rx_dropped++;
3461 					break;
3462 				}
3463 
3464 				dma_sync_single_for_cpu(priv->device,
3465 							rx_q->rx_skbuff_dma
3466 							[entry], frame_len,
3467 							DMA_FROM_DEVICE);
3468 				skb_copy_to_linear_data(skb,
3469 							rx_q->
3470 							rx_skbuff[entry]->data,
3471 							frame_len);
3472 
3473 				skb_put(skb, frame_len);
3474 				dma_sync_single_for_device(priv->device,
3475 							   rx_q->rx_skbuff_dma
3476 							   [entry], frame_len,
3477 							   DMA_FROM_DEVICE);
3478 			} else {
3479 				skb = rx_q->rx_skbuff[entry];
3480 				if (unlikely(!skb)) {
3481 					netdev_err(priv->dev,
3482 						   "%s: Inconsistent Rx chain\n",
3483 						   priv->dev->name);
3484 					priv->dev->stats.rx_dropped++;
3485 					break;
3486 				}
3487 				prefetch(skb->data - NET_IP_ALIGN);
3488 				rx_q->rx_skbuff[entry] = NULL;
3489 				rx_q->rx_zeroc_thresh++;
3490 
3491 				skb_put(skb, frame_len);
3492 				dma_unmap_single(priv->device,
3493 						 rx_q->rx_skbuff_dma[entry],
3494 						 priv->dma_buf_sz,
3495 						 DMA_FROM_DEVICE);
3496 			}
3497 
3498 			if (netif_msg_pktdata(priv)) {
3499 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3500 					   frame_len);
3501 				print_pkt(skb->data, frame_len);
3502 			}
3503 
3504 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3505 
3506 			stmmac_rx_vlan(priv->dev, skb);
3507 
3508 			skb->protocol = eth_type_trans(skb, priv->dev);
3509 
3510 			if (unlikely(!coe))
3511 				skb_checksum_none_assert(skb);
3512 			else
3513 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3514 
3515 			napi_gro_receive(&rx_q->napi, skb);
3516 
3517 			priv->dev->stats.rx_packets++;
3518 			priv->dev->stats.rx_bytes += frame_len;
3519 		}
3520 		entry = next_entry;
3521 	}
3522 
3523 	stmmac_rx_refill(priv, queue);
3524 
3525 	priv->xstats.rx_pkt_n += count;
3526 
3527 	return count;
3528 }
3529 
3530 /**
3531  *  stmmac_poll - stmmac poll method (NAPI)
3532  *  @napi : pointer to the napi structure.
3533  *  @budget : maximum number of packets that the current CPU can receive from
3534  *	      all interfaces.
3535  *  Description :
3536  *  To look at the incoming frames and clear the tx resources.
3537  */
3538 static int stmmac_poll(struct napi_struct *napi, int budget)
3539 {
3540 	struct stmmac_rx_queue *rx_q =
3541 		container_of(napi, struct stmmac_rx_queue, napi);
3542 	struct stmmac_priv *priv = rx_q->priv_data;
3543 	u32 tx_count = priv->plat->tx_queues_to_use;
3544 	u32 chan = rx_q->queue_index;
3545 	int work_done = 0;
3546 	u32 queue;
3547 
3548 	priv->xstats.napi_poll++;
3549 
3550 	/* check all the queues */
3551 	for (queue = 0; queue < tx_count; queue++)
3552 		stmmac_tx_clean(priv, queue);
3553 
3554 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3555 	if (work_done < budget) {
3556 		napi_complete_done(napi, work_done);
3557 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3558 	}
3559 	return work_done;
3560 }
3561 
3562 /**
3563  *  stmmac_tx_timeout
3564  *  @dev : Pointer to net device structure
3565  *  Description: this function is called when a packet transmission fails to
3566  *   complete within a reasonable time. The driver will mark the error in the
3567  *   netdev structure and arrange for the device to be reset to a sane state
3568  *   in order to transmit a new packet.
3569  */
3570 static void stmmac_tx_timeout(struct net_device *dev)
3571 {
3572 	struct stmmac_priv *priv = netdev_priv(dev);
3573 
3574 	stmmac_global_err(priv);
3575 }
3576 
3577 /**
3578  *  stmmac_set_rx_mode - entry point for multicast addressing
3579  *  @dev : pointer to the device structure
3580  *  Description:
3581  *  This function is a driver entry point which gets called by the kernel
3582  *  whenever multicast addresses must be enabled/disabled.
3583  *  Return value:
3584  *  void.
3585  */
3586 static void stmmac_set_rx_mode(struct net_device *dev)
3587 {
3588 	struct stmmac_priv *priv = netdev_priv(dev);
3589 
3590 	stmmac_set_filter(priv, priv->hw, dev);
3591 }
3592 
3593 /**
3594  *  stmmac_change_mtu - entry point to change MTU size for the device.
3595  *  @dev : device pointer.
3596  *  @new_mtu : the new MTU size for the device.
3597  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3598  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3599  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3600  *  Return value:
3601  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3602  *  file on failure.
3603  */
3604 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3605 {
3606 	struct stmmac_priv *priv = netdev_priv(dev);
3607 
3608 	if (netif_running(dev)) {
3609 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3610 		return -EBUSY;
3611 	}
3612 
3613 	dev->mtu = new_mtu;
3614 
3615 	netdev_update_features(dev);
3616 
3617 	return 0;
3618 }
3619 
3620 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3621 					     netdev_features_t features)
3622 {
3623 	struct stmmac_priv *priv = netdev_priv(dev);
3624 
3625 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3626 		features &= ~NETIF_F_RXCSUM;
3627 
3628 	if (!priv->plat->tx_coe)
3629 		features &= ~NETIF_F_CSUM_MASK;
3630 
3631 	/* Some GMAC devices have a bugged Jumbo frame support that
3632 	 * needs to have the Tx COE disabled for oversized frames
3633 	 * (due to limited buffer sizes). In this case we disable
3634 	 * the TX csum insertion in the TDES and not use SF.
3635 	 */
3636 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3637 		features &= ~NETIF_F_CSUM_MASK;
3638 
3639 	/* Disable tso if asked by ethtool */
3640 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3641 		if (features & NETIF_F_TSO)
3642 			priv->tso = true;
3643 		else
3644 			priv->tso = false;
3645 	}
3646 
3647 	return features;
3648 }
3649 
3650 static int stmmac_set_features(struct net_device *netdev,
3651 			       netdev_features_t features)
3652 {
3653 	struct stmmac_priv *priv = netdev_priv(netdev);
3654 
3655 	/* Keep the COE Type in case of csum is supporting */
3656 	if (features & NETIF_F_RXCSUM)
3657 		priv->hw->rx_csum = priv->plat->rx_coe;
3658 	else
3659 		priv->hw->rx_csum = 0;
3660 	/* No check needed because rx_coe has been set before and it will be
3661 	 * fixed in case of issue.
3662 	 */
3663 	stmmac_rx_ipc(priv, priv->hw);
3664 
3665 	return 0;
3666 }
3667 
3668 /**
3669  *  stmmac_interrupt - main ISR
3670  *  @irq: interrupt number.
3671  *  @dev_id: to pass the net device pointer.
3672  *  Description: this is the main driver interrupt service routine.
3673  *  It can call:
3674  *  o DMA service routine (to manage incoming frame reception and transmission
3675  *    status)
3676  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3677  *    interrupts.
3678  */
3679 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3680 {
3681 	struct net_device *dev = (struct net_device *)dev_id;
3682 	struct stmmac_priv *priv = netdev_priv(dev);
3683 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3684 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3685 	u32 queues_count;
3686 	u32 queue;
3687 
3688 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3689 
3690 	if (priv->irq_wake)
3691 		pm_wakeup_event(priv->device, 0);
3692 
3693 	if (unlikely(!dev)) {
3694 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3695 		return IRQ_NONE;
3696 	}
3697 
3698 	/* Check if adapter is up */
3699 	if (test_bit(STMMAC_DOWN, &priv->state))
3700 		return IRQ_HANDLED;
3701 	/* Check if a fatal error happened */
3702 	if (stmmac_safety_feat_interrupt(priv))
3703 		return IRQ_HANDLED;
3704 
3705 	/* To handle GMAC own interrupts */
3706 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3707 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3708 
3709 		if (unlikely(status)) {
3710 			/* For LPI we need to save the tx status */
3711 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3712 				priv->tx_path_in_lpi_mode = true;
3713 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3714 				priv->tx_path_in_lpi_mode = false;
3715 		}
3716 
3717 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3718 			for (queue = 0; queue < queues_count; queue++) {
3719 				struct stmmac_rx_queue *rx_q =
3720 				&priv->rx_queue[queue];
3721 
3722 				status |= stmmac_host_mtl_irq_status(priv,
3723 						priv->hw, queue);
3724 
3725 				if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3726 					stmmac_set_rx_tail_ptr(priv,
3727 							priv->ioaddr,
3728 							rx_q->rx_tail_addr,
3729 							queue);
3730 			}
3731 		}
3732 
3733 		/* PCS link status */
3734 		if (priv->hw->pcs) {
3735 			if (priv->xstats.pcs_link)
3736 				netif_carrier_on(dev);
3737 			else
3738 				netif_carrier_off(dev);
3739 		}
3740 	}
3741 
3742 	/* To handle DMA interrupts */
3743 	stmmac_dma_interrupt(priv);
3744 
3745 	return IRQ_HANDLED;
3746 }
3747 
3748 #ifdef CONFIG_NET_POLL_CONTROLLER
3749 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3750  * to allow network I/O with interrupts disabled.
3751  */
3752 static void stmmac_poll_controller(struct net_device *dev)
3753 {
3754 	disable_irq(dev->irq);
3755 	stmmac_interrupt(dev->irq, dev);
3756 	enable_irq(dev->irq);
3757 }
3758 #endif
3759 
3760 /**
3761  *  stmmac_ioctl - Entry point for the Ioctl
3762  *  @dev: Device pointer.
3763  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3764  *  a proprietary structure used to pass information to the driver.
3765  *  @cmd: IOCTL command
3766  *  Description:
3767  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3768  */
3769 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3770 {
3771 	int ret = -EOPNOTSUPP;
3772 
3773 	if (!netif_running(dev))
3774 		return -EINVAL;
3775 
3776 	switch (cmd) {
3777 	case SIOCGMIIPHY:
3778 	case SIOCGMIIREG:
3779 	case SIOCSMIIREG:
3780 		if (!dev->phydev)
3781 			return -EINVAL;
3782 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3783 		break;
3784 	case SIOCSHWTSTAMP:
3785 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3786 		break;
3787 	default:
3788 		break;
3789 	}
3790 
3791 	return ret;
3792 }
3793 
3794 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3795 				    void *cb_priv)
3796 {
3797 	struct stmmac_priv *priv = cb_priv;
3798 	int ret = -EOPNOTSUPP;
3799 
3800 	stmmac_disable_all_queues(priv);
3801 
3802 	switch (type) {
3803 	case TC_SETUP_CLSU32:
3804 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3805 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3806 		break;
3807 	default:
3808 		break;
3809 	}
3810 
3811 	stmmac_enable_all_queues(priv);
3812 	return ret;
3813 }
3814 
3815 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3816 				 struct tc_block_offload *f)
3817 {
3818 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3819 		return -EOPNOTSUPP;
3820 
3821 	switch (f->command) {
3822 	case TC_BLOCK_BIND:
3823 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3824 				priv, priv);
3825 	case TC_BLOCK_UNBIND:
3826 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3827 		return 0;
3828 	default:
3829 		return -EOPNOTSUPP;
3830 	}
3831 }
3832 
3833 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3834 			   void *type_data)
3835 {
3836 	struct stmmac_priv *priv = netdev_priv(ndev);
3837 
3838 	switch (type) {
3839 	case TC_SETUP_BLOCK:
3840 		return stmmac_setup_tc_block(priv, type_data);
3841 	default:
3842 		return -EOPNOTSUPP;
3843 	}
3844 }
3845 
3846 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3847 {
3848 	struct stmmac_priv *priv = netdev_priv(ndev);
3849 	int ret = 0;
3850 
3851 	ret = eth_mac_addr(ndev, addr);
3852 	if (ret)
3853 		return ret;
3854 
3855 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3856 
3857 	return ret;
3858 }
3859 
3860 #ifdef CONFIG_DEBUG_FS
3861 static struct dentry *stmmac_fs_dir;
3862 
3863 static void sysfs_display_ring(void *head, int size, int extend_desc,
3864 			       struct seq_file *seq)
3865 {
3866 	int i;
3867 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3868 	struct dma_desc *p = (struct dma_desc *)head;
3869 
3870 	for (i = 0; i < size; i++) {
3871 		if (extend_desc) {
3872 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3873 				   i, (unsigned int)virt_to_phys(ep),
3874 				   le32_to_cpu(ep->basic.des0),
3875 				   le32_to_cpu(ep->basic.des1),
3876 				   le32_to_cpu(ep->basic.des2),
3877 				   le32_to_cpu(ep->basic.des3));
3878 			ep++;
3879 		} else {
3880 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3881 				   i, (unsigned int)virt_to_phys(p),
3882 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3883 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3884 			p++;
3885 		}
3886 		seq_printf(seq, "\n");
3887 	}
3888 }
3889 
3890 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3891 {
3892 	struct net_device *dev = seq->private;
3893 	struct stmmac_priv *priv = netdev_priv(dev);
3894 	u32 rx_count = priv->plat->rx_queues_to_use;
3895 	u32 tx_count = priv->plat->tx_queues_to_use;
3896 	u32 queue;
3897 
3898 	for (queue = 0; queue < rx_count; queue++) {
3899 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3900 
3901 		seq_printf(seq, "RX Queue %d:\n", queue);
3902 
3903 		if (priv->extend_desc) {
3904 			seq_printf(seq, "Extended descriptor ring:\n");
3905 			sysfs_display_ring((void *)rx_q->dma_erx,
3906 					   DMA_RX_SIZE, 1, seq);
3907 		} else {
3908 			seq_printf(seq, "Descriptor ring:\n");
3909 			sysfs_display_ring((void *)rx_q->dma_rx,
3910 					   DMA_RX_SIZE, 0, seq);
3911 		}
3912 	}
3913 
3914 	for (queue = 0; queue < tx_count; queue++) {
3915 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3916 
3917 		seq_printf(seq, "TX Queue %d:\n", queue);
3918 
3919 		if (priv->extend_desc) {
3920 			seq_printf(seq, "Extended descriptor ring:\n");
3921 			sysfs_display_ring((void *)tx_q->dma_etx,
3922 					   DMA_TX_SIZE, 1, seq);
3923 		} else {
3924 			seq_printf(seq, "Descriptor ring:\n");
3925 			sysfs_display_ring((void *)tx_q->dma_tx,
3926 					   DMA_TX_SIZE, 0, seq);
3927 		}
3928 	}
3929 
3930 	return 0;
3931 }
3932 
3933 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3934 {
3935 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3936 }
3937 
3938 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3939 
3940 static const struct file_operations stmmac_rings_status_fops = {
3941 	.owner = THIS_MODULE,
3942 	.open = stmmac_sysfs_ring_open,
3943 	.read = seq_read,
3944 	.llseek = seq_lseek,
3945 	.release = single_release,
3946 };
3947 
3948 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3949 {
3950 	struct net_device *dev = seq->private;
3951 	struct stmmac_priv *priv = netdev_priv(dev);
3952 
3953 	if (!priv->hw_cap_support) {
3954 		seq_printf(seq, "DMA HW features not supported\n");
3955 		return 0;
3956 	}
3957 
3958 	seq_printf(seq, "==============================\n");
3959 	seq_printf(seq, "\tDMA HW features\n");
3960 	seq_printf(seq, "==============================\n");
3961 
3962 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3963 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3964 	seq_printf(seq, "\t1000 Mbps: %s\n",
3965 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3966 	seq_printf(seq, "\tHalf duplex: %s\n",
3967 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3968 	seq_printf(seq, "\tHash Filter: %s\n",
3969 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3970 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3971 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3972 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3973 		   (priv->dma_cap.pcs) ? "Y" : "N");
3974 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3975 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3976 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3977 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3978 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3979 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3980 	seq_printf(seq, "\tRMON module: %s\n",
3981 		   (priv->dma_cap.rmon) ? "Y" : "N");
3982 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3983 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3984 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3985 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3986 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3987 		   (priv->dma_cap.eee) ? "Y" : "N");
3988 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3989 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3990 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3991 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3992 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3993 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3994 	} else {
3995 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3996 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3997 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3998 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3999 	}
4000 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4001 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4002 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4003 		   priv->dma_cap.number_rx_channel);
4004 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4005 		   priv->dma_cap.number_tx_channel);
4006 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4007 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4008 
4009 	return 0;
4010 }
4011 
4012 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4013 {
4014 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4015 }
4016 
4017 static const struct file_operations stmmac_dma_cap_fops = {
4018 	.owner = THIS_MODULE,
4019 	.open = stmmac_sysfs_dma_cap_open,
4020 	.read = seq_read,
4021 	.llseek = seq_lseek,
4022 	.release = single_release,
4023 };
4024 
4025 static int stmmac_init_fs(struct net_device *dev)
4026 {
4027 	struct stmmac_priv *priv = netdev_priv(dev);
4028 
4029 	/* Create per netdev entries */
4030 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4031 
4032 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4033 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4034 
4035 		return -ENOMEM;
4036 	}
4037 
4038 	/* Entry to report DMA RX/TX rings */
4039 	priv->dbgfs_rings_status =
4040 		debugfs_create_file("descriptors_status", 0444,
4041 				    priv->dbgfs_dir, dev,
4042 				    &stmmac_rings_status_fops);
4043 
4044 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4045 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4046 		debugfs_remove_recursive(priv->dbgfs_dir);
4047 
4048 		return -ENOMEM;
4049 	}
4050 
4051 	/* Entry to report the DMA HW features */
4052 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4053 						  priv->dbgfs_dir,
4054 						  dev, &stmmac_dma_cap_fops);
4055 
4056 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4057 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4058 		debugfs_remove_recursive(priv->dbgfs_dir);
4059 
4060 		return -ENOMEM;
4061 	}
4062 
4063 	return 0;
4064 }
4065 
4066 static void stmmac_exit_fs(struct net_device *dev)
4067 {
4068 	struct stmmac_priv *priv = netdev_priv(dev);
4069 
4070 	debugfs_remove_recursive(priv->dbgfs_dir);
4071 }
4072 #endif /* CONFIG_DEBUG_FS */
4073 
4074 static const struct net_device_ops stmmac_netdev_ops = {
4075 	.ndo_open = stmmac_open,
4076 	.ndo_start_xmit = stmmac_xmit,
4077 	.ndo_stop = stmmac_release,
4078 	.ndo_change_mtu = stmmac_change_mtu,
4079 	.ndo_fix_features = stmmac_fix_features,
4080 	.ndo_set_features = stmmac_set_features,
4081 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4082 	.ndo_tx_timeout = stmmac_tx_timeout,
4083 	.ndo_do_ioctl = stmmac_ioctl,
4084 	.ndo_setup_tc = stmmac_setup_tc,
4085 #ifdef CONFIG_NET_POLL_CONTROLLER
4086 	.ndo_poll_controller = stmmac_poll_controller,
4087 #endif
4088 	.ndo_set_mac_address = stmmac_set_mac_address,
4089 };
4090 
4091 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4092 {
4093 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4094 		return;
4095 	if (test_bit(STMMAC_DOWN, &priv->state))
4096 		return;
4097 
4098 	netdev_err(priv->dev, "Reset adapter.\n");
4099 
4100 	rtnl_lock();
4101 	netif_trans_update(priv->dev);
4102 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4103 		usleep_range(1000, 2000);
4104 
4105 	set_bit(STMMAC_DOWN, &priv->state);
4106 	dev_close(priv->dev);
4107 	dev_open(priv->dev);
4108 	clear_bit(STMMAC_DOWN, &priv->state);
4109 	clear_bit(STMMAC_RESETING, &priv->state);
4110 	rtnl_unlock();
4111 }
4112 
4113 static void stmmac_service_task(struct work_struct *work)
4114 {
4115 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4116 			service_task);
4117 
4118 	stmmac_reset_subtask(priv);
4119 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4120 }
4121 
4122 /**
4123  *  stmmac_hw_init - Init the MAC device
4124  *  @priv: driver private structure
4125  *  Description: this function is to configure the MAC device according to
4126  *  some platform parameters or the HW capability register. It prepares the
4127  *  driver to use either ring or chain modes and to setup either enhanced or
4128  *  normal descriptors.
4129  */
4130 static int stmmac_hw_init(struct stmmac_priv *priv)
4131 {
4132 	int ret;
4133 
4134 	/* dwmac-sun8i only work in chain mode */
4135 	if (priv->plat->has_sun8i)
4136 		chain_mode = 1;
4137 	priv->chain_mode = chain_mode;
4138 
4139 	/* Initialize HW Interface */
4140 	ret = stmmac_hwif_init(priv);
4141 	if (ret)
4142 		return ret;
4143 
4144 	/* Get the HW capability (new GMAC newer than 3.50a) */
4145 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4146 	if (priv->hw_cap_support) {
4147 		dev_info(priv->device, "DMA HW capability register supported\n");
4148 
4149 		/* We can override some gmac/dma configuration fields: e.g.
4150 		 * enh_desc, tx_coe (e.g. that are passed through the
4151 		 * platform) with the values from the HW capability
4152 		 * register (if supported).
4153 		 */
4154 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4155 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4156 		priv->hw->pmt = priv->plat->pmt;
4157 
4158 		/* TXCOE doesn't work in thresh DMA mode */
4159 		if (priv->plat->force_thresh_dma_mode)
4160 			priv->plat->tx_coe = 0;
4161 		else
4162 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4163 
4164 		/* In case of GMAC4 rx_coe is from HW cap register. */
4165 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4166 
4167 		if (priv->dma_cap.rx_coe_type2)
4168 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4169 		else if (priv->dma_cap.rx_coe_type1)
4170 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4171 
4172 	} else {
4173 		dev_info(priv->device, "No HW DMA feature register supported\n");
4174 	}
4175 
4176 	if (priv->plat->rx_coe) {
4177 		priv->hw->rx_csum = priv->plat->rx_coe;
4178 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4179 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4180 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4181 	}
4182 	if (priv->plat->tx_coe)
4183 		dev_info(priv->device, "TX Checksum insertion supported\n");
4184 
4185 	if (priv->plat->pmt) {
4186 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4187 		device_set_wakeup_capable(priv->device, 1);
4188 	}
4189 
4190 	if (priv->dma_cap.tsoen)
4191 		dev_info(priv->device, "TSO supported\n");
4192 
4193 	return 0;
4194 }
4195 
4196 /**
4197  * stmmac_dvr_probe
4198  * @device: device pointer
4199  * @plat_dat: platform data pointer
4200  * @res: stmmac resource pointer
4201  * Description: this is the main probe function used to
4202  * call the alloc_etherdev, allocate the priv structure.
4203  * Return:
4204  * returns 0 on success, otherwise errno.
4205  */
4206 int stmmac_dvr_probe(struct device *device,
4207 		     struct plat_stmmacenet_data *plat_dat,
4208 		     struct stmmac_resources *res)
4209 {
4210 	struct net_device *ndev = NULL;
4211 	struct stmmac_priv *priv;
4212 	int ret = 0;
4213 	u32 queue;
4214 
4215 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4216 				  MTL_MAX_TX_QUEUES,
4217 				  MTL_MAX_RX_QUEUES);
4218 	if (!ndev)
4219 		return -ENOMEM;
4220 
4221 	SET_NETDEV_DEV(ndev, device);
4222 
4223 	priv = netdev_priv(ndev);
4224 	priv->device = device;
4225 	priv->dev = ndev;
4226 
4227 	stmmac_set_ethtool_ops(ndev);
4228 	priv->pause = pause;
4229 	priv->plat = plat_dat;
4230 	priv->ioaddr = res->addr;
4231 	priv->dev->base_addr = (unsigned long)res->addr;
4232 
4233 	priv->dev->irq = res->irq;
4234 	priv->wol_irq = res->wol_irq;
4235 	priv->lpi_irq = res->lpi_irq;
4236 
4237 	if (res->mac)
4238 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4239 
4240 	dev_set_drvdata(device, priv->dev);
4241 
4242 	/* Verify driver arguments */
4243 	stmmac_verify_args();
4244 
4245 	/* Allocate workqueue */
4246 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4247 	if (!priv->wq) {
4248 		dev_err(priv->device, "failed to create workqueue\n");
4249 		goto error_wq;
4250 	}
4251 
4252 	INIT_WORK(&priv->service_task, stmmac_service_task);
4253 
4254 	/* Override with kernel parameters if supplied XXX CRS XXX
4255 	 * this needs to have multiple instances
4256 	 */
4257 	if ((phyaddr >= 0) && (phyaddr <= 31))
4258 		priv->plat->phy_addr = phyaddr;
4259 
4260 	if (priv->plat->stmmac_rst) {
4261 		ret = reset_control_assert(priv->plat->stmmac_rst);
4262 		reset_control_deassert(priv->plat->stmmac_rst);
4263 		/* Some reset controllers have only reset callback instead of
4264 		 * assert + deassert callbacks pair.
4265 		 */
4266 		if (ret == -ENOTSUPP)
4267 			reset_control_reset(priv->plat->stmmac_rst);
4268 	}
4269 
4270 	/* Init MAC and get the capabilities */
4271 	ret = stmmac_hw_init(priv);
4272 	if (ret)
4273 		goto error_hw_init;
4274 
4275 	/* Configure real RX and TX queues */
4276 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4277 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4278 
4279 	ndev->netdev_ops = &stmmac_netdev_ops;
4280 
4281 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4282 			    NETIF_F_RXCSUM;
4283 
4284 	ret = stmmac_tc_init(priv, priv);
4285 	if (!ret) {
4286 		ndev->hw_features |= NETIF_F_HW_TC;
4287 	}
4288 
4289 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4290 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4291 		priv->tso = true;
4292 		dev_info(priv->device, "TSO feature enabled\n");
4293 	}
4294 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4295 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4296 #ifdef STMMAC_VLAN_TAG_USED
4297 	/* Both mac100 and gmac support receive VLAN tag detection */
4298 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4299 #endif
4300 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4301 
4302 	/* MTU range: 46 - hw-specific max */
4303 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4304 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4305 		ndev->max_mtu = JUMBO_LEN;
4306 	else
4307 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4308 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4309 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4310 	 */
4311 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4312 	    (priv->plat->maxmtu >= ndev->min_mtu))
4313 		ndev->max_mtu = priv->plat->maxmtu;
4314 	else if (priv->plat->maxmtu < ndev->min_mtu)
4315 		dev_warn(priv->device,
4316 			 "%s: warning: maxmtu having invalid value (%d)\n",
4317 			 __func__, priv->plat->maxmtu);
4318 
4319 	if (flow_ctrl)
4320 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4321 
4322 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4323 	 * In some case, for example on bugged HW this feature
4324 	 * has to be disable and this can be done by passing the
4325 	 * riwt_off field from the platform.
4326 	 */
4327 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4328 		priv->use_riwt = 1;
4329 		dev_info(priv->device,
4330 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4331 	}
4332 
4333 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4334 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4335 
4336 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4337 			       (8 * priv->plat->rx_queues_to_use));
4338 	}
4339 
4340 	spin_lock_init(&priv->lock);
4341 
4342 	/* If a specific clk_csr value is passed from the platform
4343 	 * this means that the CSR Clock Range selection cannot be
4344 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4345 	 * set the MDC clock dynamically according to the csr actual
4346 	 * clock input.
4347 	 */
4348 	if (!priv->plat->clk_csr)
4349 		stmmac_clk_csr_set(priv);
4350 	else
4351 		priv->clk_csr = priv->plat->clk_csr;
4352 
4353 	stmmac_check_pcs_mode(priv);
4354 
4355 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4356 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4357 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4358 		/* MDIO bus Registration */
4359 		ret = stmmac_mdio_register(ndev);
4360 		if (ret < 0) {
4361 			dev_err(priv->device,
4362 				"%s: MDIO bus (id: %d) registration failed",
4363 				__func__, priv->plat->bus_id);
4364 			goto error_mdio_register;
4365 		}
4366 	}
4367 
4368 	ret = register_netdev(ndev);
4369 	if (ret) {
4370 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4371 			__func__, ret);
4372 		goto error_netdev_register;
4373 	}
4374 
4375 	return ret;
4376 
4377 error_netdev_register:
4378 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4379 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4380 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4381 		stmmac_mdio_unregister(ndev);
4382 error_mdio_register:
4383 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4384 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4385 
4386 		netif_napi_del(&rx_q->napi);
4387 	}
4388 error_hw_init:
4389 	destroy_workqueue(priv->wq);
4390 error_wq:
4391 	free_netdev(ndev);
4392 
4393 	return ret;
4394 }
4395 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4396 
4397 /**
4398  * stmmac_dvr_remove
4399  * @dev: device pointer
4400  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4401  * changes the link status, releases the DMA descriptor rings.
4402  */
4403 int stmmac_dvr_remove(struct device *dev)
4404 {
4405 	struct net_device *ndev = dev_get_drvdata(dev);
4406 	struct stmmac_priv *priv = netdev_priv(ndev);
4407 
4408 	netdev_info(priv->dev, "%s: removing driver", __func__);
4409 
4410 	stmmac_stop_all_dma(priv);
4411 
4412 	stmmac_mac_set(priv, priv->ioaddr, false);
4413 	netif_carrier_off(ndev);
4414 	unregister_netdev(ndev);
4415 	if (priv->plat->stmmac_rst)
4416 		reset_control_assert(priv->plat->stmmac_rst);
4417 	clk_disable_unprepare(priv->plat->pclk);
4418 	clk_disable_unprepare(priv->plat->stmmac_clk);
4419 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4420 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4421 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4422 		stmmac_mdio_unregister(ndev);
4423 	destroy_workqueue(priv->wq);
4424 	free_netdev(ndev);
4425 
4426 	return 0;
4427 }
4428 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4429 
4430 /**
4431  * stmmac_suspend - suspend callback
4432  * @dev: device pointer
4433  * Description: this is the function to suspend the device and it is called
4434  * by the platform driver to stop the network queue, release the resources,
4435  * program the PMT register (for WoL), clean and release driver resources.
4436  */
4437 int stmmac_suspend(struct device *dev)
4438 {
4439 	struct net_device *ndev = dev_get_drvdata(dev);
4440 	struct stmmac_priv *priv = netdev_priv(ndev);
4441 	unsigned long flags;
4442 
4443 	if (!ndev || !netif_running(ndev))
4444 		return 0;
4445 
4446 	if (ndev->phydev)
4447 		phy_stop(ndev->phydev);
4448 
4449 	spin_lock_irqsave(&priv->lock, flags);
4450 
4451 	netif_device_detach(ndev);
4452 	stmmac_stop_all_queues(priv);
4453 
4454 	stmmac_disable_all_queues(priv);
4455 
4456 	/* Stop TX/RX DMA */
4457 	stmmac_stop_all_dma(priv);
4458 
4459 	/* Enable Power down mode by programming the PMT regs */
4460 	if (device_may_wakeup(priv->device)) {
4461 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4462 		priv->irq_wake = 1;
4463 	} else {
4464 		stmmac_mac_set(priv, priv->ioaddr, false);
4465 		pinctrl_pm_select_sleep_state(priv->device);
4466 		/* Disable clock in case of PWM is off */
4467 		clk_disable(priv->plat->pclk);
4468 		clk_disable(priv->plat->stmmac_clk);
4469 	}
4470 	spin_unlock_irqrestore(&priv->lock, flags);
4471 
4472 	priv->oldlink = false;
4473 	priv->speed = SPEED_UNKNOWN;
4474 	priv->oldduplex = DUPLEX_UNKNOWN;
4475 	return 0;
4476 }
4477 EXPORT_SYMBOL_GPL(stmmac_suspend);
4478 
4479 /**
4480  * stmmac_reset_queues_param - reset queue parameters
4481  * @dev: device pointer
4482  */
4483 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4484 {
4485 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4486 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4487 	u32 queue;
4488 
4489 	for (queue = 0; queue < rx_cnt; queue++) {
4490 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4491 
4492 		rx_q->cur_rx = 0;
4493 		rx_q->dirty_rx = 0;
4494 	}
4495 
4496 	for (queue = 0; queue < tx_cnt; queue++) {
4497 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4498 
4499 		tx_q->cur_tx = 0;
4500 		tx_q->dirty_tx = 0;
4501 		tx_q->mss = 0;
4502 	}
4503 }
4504 
4505 /**
4506  * stmmac_resume - resume callback
4507  * @dev: device pointer
4508  * Description: when resume this function is invoked to setup the DMA and CORE
4509  * in a usable state.
4510  */
4511 int stmmac_resume(struct device *dev)
4512 {
4513 	struct net_device *ndev = dev_get_drvdata(dev);
4514 	struct stmmac_priv *priv = netdev_priv(ndev);
4515 	unsigned long flags;
4516 
4517 	if (!netif_running(ndev))
4518 		return 0;
4519 
4520 	/* Power Down bit, into the PM register, is cleared
4521 	 * automatically as soon as a magic packet or a Wake-up frame
4522 	 * is received. Anyway, it's better to manually clear
4523 	 * this bit because it can generate problems while resuming
4524 	 * from another devices (e.g. serial console).
4525 	 */
4526 	if (device_may_wakeup(priv->device)) {
4527 		spin_lock_irqsave(&priv->lock, flags);
4528 		stmmac_pmt(priv, priv->hw, 0);
4529 		spin_unlock_irqrestore(&priv->lock, flags);
4530 		priv->irq_wake = 0;
4531 	} else {
4532 		pinctrl_pm_select_default_state(priv->device);
4533 		/* enable the clk previously disabled */
4534 		clk_enable(priv->plat->stmmac_clk);
4535 		clk_enable(priv->plat->pclk);
4536 		/* reset the phy so that it's ready */
4537 		if (priv->mii)
4538 			stmmac_mdio_reset(priv->mii);
4539 	}
4540 
4541 	netif_device_attach(ndev);
4542 
4543 	spin_lock_irqsave(&priv->lock, flags);
4544 
4545 	stmmac_reset_queues_param(priv);
4546 
4547 	stmmac_clear_descriptors(priv);
4548 
4549 	stmmac_hw_setup(ndev, false);
4550 	stmmac_init_tx_coalesce(priv);
4551 	stmmac_set_rx_mode(ndev);
4552 
4553 	stmmac_enable_all_queues(priv);
4554 
4555 	stmmac_start_all_queues(priv);
4556 
4557 	spin_unlock_irqrestore(&priv->lock, flags);
4558 
4559 	if (ndev->phydev)
4560 		phy_start(ndev->phydev);
4561 
4562 	return 0;
4563 }
4564 EXPORT_SYMBOL_GPL(stmmac_resume);
4565 
4566 #ifndef MODULE
4567 static int __init stmmac_cmdline_opt(char *str)
4568 {
4569 	char *opt;
4570 
4571 	if (!str || !*str)
4572 		return -EINVAL;
4573 	while ((opt = strsep(&str, ",")) != NULL) {
4574 		if (!strncmp(opt, "debug:", 6)) {
4575 			if (kstrtoint(opt + 6, 0, &debug))
4576 				goto err;
4577 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4578 			if (kstrtoint(opt + 8, 0, &phyaddr))
4579 				goto err;
4580 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4581 			if (kstrtoint(opt + 7, 0, &buf_sz))
4582 				goto err;
4583 		} else if (!strncmp(opt, "tc:", 3)) {
4584 			if (kstrtoint(opt + 3, 0, &tc))
4585 				goto err;
4586 		} else if (!strncmp(opt, "watchdog:", 9)) {
4587 			if (kstrtoint(opt + 9, 0, &watchdog))
4588 				goto err;
4589 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4590 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4591 				goto err;
4592 		} else if (!strncmp(opt, "pause:", 6)) {
4593 			if (kstrtoint(opt + 6, 0, &pause))
4594 				goto err;
4595 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4596 			if (kstrtoint(opt + 10, 0, &eee_timer))
4597 				goto err;
4598 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4599 			if (kstrtoint(opt + 11, 0, &chain_mode))
4600 				goto err;
4601 		}
4602 	}
4603 	return 0;
4604 
4605 err:
4606 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4607 	return -EINVAL;
4608 }
4609 
4610 __setup("stmmaceth=", stmmac_cmdline_opt);
4611 #endif /* MODULE */
4612 
4613 static int __init stmmac_init(void)
4614 {
4615 #ifdef CONFIG_DEBUG_FS
4616 	/* Create debugfs main directory if it doesn't exist yet */
4617 	if (!stmmac_fs_dir) {
4618 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4619 
4620 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4621 			pr_err("ERROR %s, debugfs create directory failed\n",
4622 			       STMMAC_RESOURCE_NAME);
4623 
4624 			return -ENOMEM;
4625 		}
4626 	}
4627 #endif
4628 
4629 	return 0;
4630 }
4631 
4632 static void __exit stmmac_exit(void)
4633 {
4634 #ifdef CONFIG_DEBUG_FS
4635 	debugfs_remove_recursive(stmmac_fs_dir);
4636 #endif
4637 }
4638 
4639 module_init(stmmac_init)
4640 module_exit(stmmac_exit)
4641 
4642 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4643 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4644 MODULE_LICENSE("GPL");
4645