xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision d96fc832bcb6269d96e33d506f33033d7ed08598)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 
54 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 
57 /* Module parameters */
58 #define TX_TIMEO	5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62 
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66 
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70 
71 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77 
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81 
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86 
87 #define	DEFAULT_BUFSIZE	1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91 
92 #define	STMMAC_RX_COPYBREAK	256
93 
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97 
98 #define STMMAC_DEFAULT_LPI_TIMER	1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103 
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110 
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112 
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117 
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119 
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127 	if (unlikely(watchdog < 0))
128 		watchdog = TX_TIMEO;
129 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 		buf_sz = DEFAULT_BUFSIZE;
131 	if (unlikely(flow_ctrl > 1))
132 		flow_ctrl = FLOW_AUTO;
133 	else if (likely(flow_ctrl < 0))
134 		flow_ctrl = FLOW_OFF;
135 	if (unlikely((pause < 0) || (pause > 0xffff)))
136 		pause = PAUSE_TIME;
137 	if (eee_timer < 0)
138 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140 
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 	u32 queue;
149 
150 	for (queue = 0; queue < rx_queues_cnt; queue++) {
151 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152 
153 		napi_disable(&rx_q->napi);
154 	}
155 }
156 
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 	u32 queue;
165 
166 	for (queue = 0; queue < rx_queues_cnt; queue++) {
167 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168 
169 		napi_enable(&rx_q->napi);
170 	}
171 }
172 
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 	u32 queue;
181 
182 	for (queue = 0; queue < tx_queues_cnt; queue++)
183 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185 
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 	u32 queue;
194 
195 	for (queue = 0; queue < tx_queues_cnt; queue++)
196 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198 
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *	If a specific clk_csr value is passed from the platform
206  *	this means that the CSR Clock Range selection cannot be
207  *	changed at run-time and it is fixed (as reported in the driver
208  *	documentation). Viceversa the driver will try to set the MDC
209  *	clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 	u32 clk_rate;
214 
215 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216 
217 	/* Platform provided default clk_csr would be assumed valid
218 	 * for all other cases except for the below mentioned ones.
219 	 * For values higher than the IEEE 802.3 specified frequency
220 	 * we can not estimate the proper divider as it is not known
221 	 * the frequency of clk_csr_i. So we do not change the default
222 	 * divider.
223 	 */
224 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 		if (clk_rate < CSR_F_35M)
226 			priv->clk_csr = STMMAC_CSR_20_35M;
227 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 			priv->clk_csr = STMMAC_CSR_35_60M;
229 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 			priv->clk_csr = STMMAC_CSR_60_100M;
231 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 			priv->clk_csr = STMMAC_CSR_100_150M;
233 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 			priv->clk_csr = STMMAC_CSR_150_250M;
235 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 			priv->clk_csr = STMMAC_CSR_250_300M;
237 	}
238 
239 	if (priv->plat->has_sun8i) {
240 		if (clk_rate > 160000000)
241 			priv->clk_csr = 0x03;
242 		else if (clk_rate > 80000000)
243 			priv->clk_csr = 0x02;
244 		else if (clk_rate > 40000000)
245 			priv->clk_csr = 0x01;
246 		else
247 			priv->clk_csr = 0;
248 	}
249 }
250 
251 static void print_pkt(unsigned char *buf, int len)
252 {
253 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256 
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260 	u32 avail;
261 
262 	if (tx_q->dirty_tx > tx_q->cur_tx)
263 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264 	else
265 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266 
267 	return avail;
268 }
269 
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278 	u32 dirty;
279 
280 	if (rx_q->dirty_rx <= rx_q->cur_rx)
281 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
282 	else
283 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284 
285 	return dirty;
286 }
287 
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296 	struct net_device *ndev = priv->dev;
297 	struct phy_device *phydev = ndev->phydev;
298 
299 	if (likely(priv->plat->fix_mac_speed))
300 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302 
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311 	u32 tx_cnt = priv->plat->tx_queues_to_use;
312 	u32 queue;
313 
314 	/* check if all TX queues have the work finished */
315 	for (queue = 0; queue < tx_cnt; queue++) {
316 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317 
318 		if (tx_q->dirty_tx != tx_q->cur_tx)
319 			return; /* still unfinished work */
320 	}
321 
322 	/* Check and enter in LPI mode */
323 	if (!priv->tx_path_in_lpi_mode)
324 		priv->hw->mac->set_eee_mode(priv->hw,
325 					    priv->plat->en_tx_lpi_clockgating);
326 }
327 
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336 	priv->hw->mac->reset_eee_mode(priv->hw);
337 	del_timer_sync(&priv->eee_ctrl_timer);
338 	priv->tx_path_in_lpi_mode = false;
339 }
340 
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(struct timer_list *t)
349 {
350 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
351 
352 	stmmac_enable_eee_mode(priv);
353 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355 
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366 	struct net_device *ndev = priv->dev;
367 	int interface = priv->plat->interface;
368 	unsigned long flags;
369 	bool ret = false;
370 
371 	if ((interface != PHY_INTERFACE_MODE_MII) &&
372 	    (interface != PHY_INTERFACE_MODE_GMII) &&
373 	    !phy_interface_mode_is_rgmii(interface))
374 		goto out;
375 
376 	/* Using PCS we cannot dial with the phy registers at this stage
377 	 * so we do not support extra feature like EEE.
378 	 */
379 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
380 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
381 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
382 		goto out;
383 
384 	/* MAC core supports the EEE feature. */
385 	if (priv->dma_cap.eee) {
386 		int tx_lpi_timer = priv->tx_lpi_timer;
387 
388 		/* Check if the PHY supports EEE */
389 		if (phy_init_eee(ndev->phydev, 1)) {
390 			/* To manage at run-time if the EEE cannot be supported
391 			 * anymore (for example because the lp caps have been
392 			 * changed).
393 			 * In that case the driver disable own timers.
394 			 */
395 			spin_lock_irqsave(&priv->lock, flags);
396 			if (priv->eee_active) {
397 				netdev_dbg(priv->dev, "disable EEE\n");
398 				del_timer_sync(&priv->eee_ctrl_timer);
399 				priv->hw->mac->set_eee_timer(priv->hw, 0,
400 							     tx_lpi_timer);
401 			}
402 			priv->eee_active = 0;
403 			spin_unlock_irqrestore(&priv->lock, flags);
404 			goto out;
405 		}
406 		/* Activate the EEE and start timers */
407 		spin_lock_irqsave(&priv->lock, flags);
408 		if (!priv->eee_active) {
409 			priv->eee_active = 1;
410 			timer_setup(&priv->eee_ctrl_timer,
411 				    stmmac_eee_ctrl_timer, 0);
412 			mod_timer(&priv->eee_ctrl_timer,
413 				  STMMAC_LPI_T(eee_timer));
414 
415 			priv->hw->mac->set_eee_timer(priv->hw,
416 						     STMMAC_DEFAULT_LIT_LS,
417 						     tx_lpi_timer);
418 		}
419 		/* Set HW EEE according to the speed */
420 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
421 
422 		ret = true;
423 		spin_unlock_irqrestore(&priv->lock, flags);
424 
425 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426 	}
427 out:
428 	return ret;
429 }
430 
431 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
432  * @priv: driver private structure
433  * @p : descriptor pointer
434  * @skb : the socket buffer
435  * Description :
436  * This function will read timestamp from the descriptor & pass it to stack.
437  * and also perform some sanity checks.
438  */
439 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
440 				   struct dma_desc *p, struct sk_buff *skb)
441 {
442 	struct skb_shared_hwtstamps shhwtstamp;
443 	u64 ns;
444 
445 	if (!priv->hwts_tx_en)
446 		return;
447 
448 	/* exit if skb doesn't support hw tstamp */
449 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
450 		return;
451 
452 	/* check tx tstamp status */
453 	if (priv->hw->desc->get_tx_timestamp_status(p)) {
454 		/* get the valid tstamp */
455 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
456 
457 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
458 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
459 
460 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
461 		/* pass tstamp to stack */
462 		skb_tstamp_tx(skb, &shhwtstamp);
463 	}
464 
465 	return;
466 }
467 
468 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
469  * @priv: driver private structure
470  * @p : descriptor pointer
471  * @np : next descriptor pointer
472  * @skb : the socket buffer
473  * Description :
474  * This function will read received packet's timestamp from the descriptor
475  * and pass it to stack. It also perform some sanity checks.
476  */
477 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
478 				   struct dma_desc *np, struct sk_buff *skb)
479 {
480 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
481 	struct dma_desc *desc = p;
482 	u64 ns;
483 
484 	if (!priv->hwts_rx_en)
485 		return;
486 	/* For GMAC4, the valid timestamp is from CTX next desc. */
487 	if (priv->plat->has_gmac4)
488 		desc = np;
489 
490 	/* Check if timestamp is available */
491 	if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
492 		ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
493 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
494 		shhwtstamp = skb_hwtstamps(skb);
495 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
496 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
497 	} else  {
498 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
499 	}
500 }
501 
502 /**
503  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
504  *  @dev: device pointer.
505  *  @ifr: An IOCTL specific structure, that can contain a pointer to
506  *  a proprietary structure used to pass information to the driver.
507  *  Description:
508  *  This function configures the MAC to enable/disable both outgoing(TX)
509  *  and incoming(RX) packets time stamping based on user input.
510  *  Return Value:
511  *  0 on success and an appropriate -ve integer on failure.
512  */
513 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
514 {
515 	struct stmmac_priv *priv = netdev_priv(dev);
516 	struct hwtstamp_config config;
517 	struct timespec64 now;
518 	u64 temp = 0;
519 	u32 ptp_v2 = 0;
520 	u32 tstamp_all = 0;
521 	u32 ptp_over_ipv4_udp = 0;
522 	u32 ptp_over_ipv6_udp = 0;
523 	u32 ptp_over_ethernet = 0;
524 	u32 snap_type_sel = 0;
525 	u32 ts_master_en = 0;
526 	u32 ts_event_en = 0;
527 	u32 value = 0;
528 	u32 sec_inc;
529 
530 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
531 		netdev_alert(priv->dev, "No support for HW time stamping\n");
532 		priv->hwts_tx_en = 0;
533 		priv->hwts_rx_en = 0;
534 
535 		return -EOPNOTSUPP;
536 	}
537 
538 	if (copy_from_user(&config, ifr->ifr_data,
539 			   sizeof(struct hwtstamp_config)))
540 		return -EFAULT;
541 
542 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
543 		   __func__, config.flags, config.tx_type, config.rx_filter);
544 
545 	/* reserved for future extensions */
546 	if (config.flags)
547 		return -EINVAL;
548 
549 	if (config.tx_type != HWTSTAMP_TX_OFF &&
550 	    config.tx_type != HWTSTAMP_TX_ON)
551 		return -ERANGE;
552 
553 	if (priv->adv_ts) {
554 		switch (config.rx_filter) {
555 		case HWTSTAMP_FILTER_NONE:
556 			/* time stamp no incoming packet at all */
557 			config.rx_filter = HWTSTAMP_FILTER_NONE;
558 			break;
559 
560 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
561 			/* PTP v1, UDP, any kind of event packet */
562 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
563 			/* take time stamp for all event messages */
564 			if (priv->plat->has_gmac4)
565 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
566 			else
567 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
568 
569 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571 			break;
572 
573 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574 			/* PTP v1, UDP, Sync packet */
575 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576 			/* take time stamp for SYNC messages only */
577 			ts_event_en = PTP_TCR_TSEVNTENA;
578 
579 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581 			break;
582 
583 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584 			/* PTP v1, UDP, Delay_req packet */
585 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586 			/* take time stamp for Delay_Req messages only */
587 			ts_master_en = PTP_TCR_TSMSTRENA;
588 			ts_event_en = PTP_TCR_TSEVNTENA;
589 
590 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592 			break;
593 
594 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595 			/* PTP v2, UDP, any kind of event packet */
596 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597 			ptp_v2 = PTP_TCR_TSVER2ENA;
598 			/* take time stamp for all event messages */
599 			if (priv->plat->has_gmac4)
600 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
601 			else
602 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
603 
604 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 			break;
607 
608 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
609 			/* PTP v2, UDP, Sync packet */
610 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
611 			ptp_v2 = PTP_TCR_TSVER2ENA;
612 			/* take time stamp for SYNC messages only */
613 			ts_event_en = PTP_TCR_TSEVNTENA;
614 
615 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
616 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
617 			break;
618 
619 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
620 			/* PTP v2, UDP, Delay_req packet */
621 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
622 			ptp_v2 = PTP_TCR_TSVER2ENA;
623 			/* take time stamp for Delay_Req messages only */
624 			ts_master_en = PTP_TCR_TSMSTRENA;
625 			ts_event_en = PTP_TCR_TSEVNTENA;
626 
627 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
628 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
629 			break;
630 
631 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
632 			/* PTP v2/802.AS1 any layer, any kind of event packet */
633 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
634 			ptp_v2 = PTP_TCR_TSVER2ENA;
635 			/* take time stamp for all event messages */
636 			if (priv->plat->has_gmac4)
637 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
638 			else
639 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
640 
641 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 			ptp_over_ethernet = PTP_TCR_TSIPENA;
644 			break;
645 
646 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
647 			/* PTP v2/802.AS1, any layer, Sync packet */
648 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
649 			ptp_v2 = PTP_TCR_TSVER2ENA;
650 			/* take time stamp for SYNC messages only */
651 			ts_event_en = PTP_TCR_TSEVNTENA;
652 
653 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655 			ptp_over_ethernet = PTP_TCR_TSIPENA;
656 			break;
657 
658 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
659 			/* PTP v2/802.AS1, any layer, Delay_req packet */
660 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
661 			ptp_v2 = PTP_TCR_TSVER2ENA;
662 			/* take time stamp for Delay_Req messages only */
663 			ts_master_en = PTP_TCR_TSMSTRENA;
664 			ts_event_en = PTP_TCR_TSEVNTENA;
665 
666 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668 			ptp_over_ethernet = PTP_TCR_TSIPENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_NTP_ALL:
672 		case HWTSTAMP_FILTER_ALL:
673 			/* time stamp any incoming packet */
674 			config.rx_filter = HWTSTAMP_FILTER_ALL;
675 			tstamp_all = PTP_TCR_TSENALL;
676 			break;
677 
678 		default:
679 			return -ERANGE;
680 		}
681 	} else {
682 		switch (config.rx_filter) {
683 		case HWTSTAMP_FILTER_NONE:
684 			config.rx_filter = HWTSTAMP_FILTER_NONE;
685 			break;
686 		default:
687 			/* PTP v1, UDP, any kind of event packet */
688 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
689 			break;
690 		}
691 	}
692 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
693 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
694 
695 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
696 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
697 	else {
698 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
699 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
700 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
701 			 ts_master_en | snap_type_sel);
702 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
703 
704 		/* program Sub Second Increment reg */
705 		sec_inc = priv->hw->ptp->config_sub_second_increment(
706 			priv->ptpaddr, priv->plat->clk_ptp_rate,
707 			priv->plat->has_gmac4);
708 		temp = div_u64(1000000000ULL, sec_inc);
709 
710 		/* calculate default added value:
711 		 * formula is :
712 		 * addend = (2^32)/freq_div_ratio;
713 		 * where, freq_div_ratio = 1e9ns/sec_inc
714 		 */
715 		temp = (u64)(temp << 32);
716 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
717 		priv->hw->ptp->config_addend(priv->ptpaddr,
718 					     priv->default_addend);
719 
720 		/* initialize system time */
721 		ktime_get_real_ts64(&now);
722 
723 		/* lower 32 bits of tv_sec are safe until y2106 */
724 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
725 					    now.tv_nsec);
726 	}
727 
728 	return copy_to_user(ifr->ifr_data, &config,
729 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
730 }
731 
732 /**
733  * stmmac_init_ptp - init PTP
734  * @priv: driver private structure
735  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
736  * This is done by looking at the HW cap. register.
737  * This function also registers the ptp driver.
738  */
739 static int stmmac_init_ptp(struct stmmac_priv *priv)
740 {
741 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
742 		return -EOPNOTSUPP;
743 
744 	priv->adv_ts = 0;
745 	/* Check if adv_ts can be enabled for dwmac 4.x core */
746 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
747 		priv->adv_ts = 1;
748 	/* Dwmac 3.x core with extend_desc can support adv_ts */
749 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
750 		priv->adv_ts = 1;
751 
752 	if (priv->dma_cap.time_stamp)
753 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
754 
755 	if (priv->adv_ts)
756 		netdev_info(priv->dev,
757 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
758 
759 	priv->hw->ptp = &stmmac_ptp;
760 	priv->hwts_tx_en = 0;
761 	priv->hwts_rx_en = 0;
762 
763 	stmmac_ptp_register(priv);
764 
765 	return 0;
766 }
767 
768 static void stmmac_release_ptp(struct stmmac_priv *priv)
769 {
770 	if (priv->plat->clk_ptp_ref)
771 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
772 	stmmac_ptp_unregister(priv);
773 }
774 
775 /**
776  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
777  *  @priv: driver private structure
778  *  Description: It is used for configuring the flow control in all queues
779  */
780 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
781 {
782 	u32 tx_cnt = priv->plat->tx_queues_to_use;
783 
784 	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
785 				 priv->pause, tx_cnt);
786 }
787 
788 /**
789  * stmmac_adjust_link - adjusts the link parameters
790  * @dev: net device structure
791  * Description: this is the helper called by the physical abstraction layer
792  * drivers to communicate the phy link status. According the speed and duplex
793  * this driver can invoke registered glue-logic as well.
794  * It also invoke the eee initialization because it could happen when switch
795  * on different networks (that are eee capable).
796  */
797 static void stmmac_adjust_link(struct net_device *dev)
798 {
799 	struct stmmac_priv *priv = netdev_priv(dev);
800 	struct phy_device *phydev = dev->phydev;
801 	unsigned long flags;
802 	bool new_state = false;
803 
804 	if (!phydev)
805 		return;
806 
807 	spin_lock_irqsave(&priv->lock, flags);
808 
809 	if (phydev->link) {
810 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
811 
812 		/* Now we make sure that we can be in full duplex mode.
813 		 * If not, we operate in half-duplex mode. */
814 		if (phydev->duplex != priv->oldduplex) {
815 			new_state = true;
816 			if (!phydev->duplex)
817 				ctrl &= ~priv->hw->link.duplex;
818 			else
819 				ctrl |= priv->hw->link.duplex;
820 			priv->oldduplex = phydev->duplex;
821 		}
822 		/* Flow Control operation */
823 		if (phydev->pause)
824 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
825 
826 		if (phydev->speed != priv->speed) {
827 			new_state = true;
828 			ctrl &= ~priv->hw->link.speed_mask;
829 			switch (phydev->speed) {
830 			case SPEED_1000:
831 				ctrl |= priv->hw->link.speed1000;
832 				break;
833 			case SPEED_100:
834 				ctrl |= priv->hw->link.speed100;
835 				break;
836 			case SPEED_10:
837 				ctrl |= priv->hw->link.speed10;
838 				break;
839 			default:
840 				netif_warn(priv, link, priv->dev,
841 					   "broken speed: %d\n", phydev->speed);
842 				phydev->speed = SPEED_UNKNOWN;
843 				break;
844 			}
845 			if (phydev->speed != SPEED_UNKNOWN)
846 				stmmac_hw_fix_mac_speed(priv);
847 			priv->speed = phydev->speed;
848 		}
849 
850 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
851 
852 		if (!priv->oldlink) {
853 			new_state = true;
854 			priv->oldlink = true;
855 		}
856 	} else if (priv->oldlink) {
857 		new_state = true;
858 		priv->oldlink = false;
859 		priv->speed = SPEED_UNKNOWN;
860 		priv->oldduplex = DUPLEX_UNKNOWN;
861 	}
862 
863 	if (new_state && netif_msg_link(priv))
864 		phy_print_status(phydev);
865 
866 	spin_unlock_irqrestore(&priv->lock, flags);
867 
868 	if (phydev->is_pseudo_fixed_link)
869 		/* Stop PHY layer to call the hook to adjust the link in case
870 		 * of a switch is attached to the stmmac driver.
871 		 */
872 		phydev->irq = PHY_IGNORE_INTERRUPT;
873 	else
874 		/* At this stage, init the EEE if supported.
875 		 * Never called in case of fixed_link.
876 		 */
877 		priv->eee_enabled = stmmac_eee_init(priv);
878 }
879 
880 /**
881  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
882  * @priv: driver private structure
883  * Description: this is to verify if the HW supports the PCS.
884  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
885  * configured for the TBI, RTBI, or SGMII PHY interface.
886  */
887 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
888 {
889 	int interface = priv->plat->interface;
890 
891 	if (priv->dma_cap.pcs) {
892 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
893 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
894 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
895 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
896 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
897 			priv->hw->pcs = STMMAC_PCS_RGMII;
898 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
899 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
900 			priv->hw->pcs = STMMAC_PCS_SGMII;
901 		}
902 	}
903 }
904 
905 /**
906  * stmmac_init_phy - PHY initialization
907  * @dev: net device structure
908  * Description: it initializes the driver's PHY state, and attaches the PHY
909  * to the mac driver.
910  *  Return value:
911  *  0 on success
912  */
913 static int stmmac_init_phy(struct net_device *dev)
914 {
915 	struct stmmac_priv *priv = netdev_priv(dev);
916 	struct phy_device *phydev;
917 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
918 	char bus_id[MII_BUS_ID_SIZE];
919 	int interface = priv->plat->interface;
920 	int max_speed = priv->plat->max_speed;
921 	priv->oldlink = false;
922 	priv->speed = SPEED_UNKNOWN;
923 	priv->oldduplex = DUPLEX_UNKNOWN;
924 
925 	if (priv->plat->phy_node) {
926 		phydev = of_phy_connect(dev, priv->plat->phy_node,
927 					&stmmac_adjust_link, 0, interface);
928 	} else {
929 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
930 			 priv->plat->bus_id);
931 
932 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
933 			 priv->plat->phy_addr);
934 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
935 			   phy_id_fmt);
936 
937 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
938 				     interface);
939 	}
940 
941 	if (IS_ERR_OR_NULL(phydev)) {
942 		netdev_err(priv->dev, "Could not attach to PHY\n");
943 		if (!phydev)
944 			return -ENODEV;
945 
946 		return PTR_ERR(phydev);
947 	}
948 
949 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
950 	if ((interface == PHY_INTERFACE_MODE_MII) ||
951 	    (interface == PHY_INTERFACE_MODE_RMII) ||
952 		(max_speed < 1000 && max_speed > 0))
953 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
954 					 SUPPORTED_1000baseT_Full);
955 
956 	/*
957 	 * Broken HW is sometimes missing the pull-up resistor on the
958 	 * MDIO line, which results in reads to non-existent devices returning
959 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
960 	 * device as well.
961 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
962 	 */
963 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
964 		phy_disconnect(phydev);
965 		return -ENODEV;
966 	}
967 
968 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
969 	 * subsequent PHY polling, make sure we force a link transition if
970 	 * we have a UP/DOWN/UP transition
971 	 */
972 	if (phydev->is_pseudo_fixed_link)
973 		phydev->irq = PHY_POLL;
974 
975 	phy_attached_info(phydev);
976 	return 0;
977 }
978 
979 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
980 {
981 	u32 rx_cnt = priv->plat->rx_queues_to_use;
982 	void *head_rx;
983 	u32 queue;
984 
985 	/* Display RX rings */
986 	for (queue = 0; queue < rx_cnt; queue++) {
987 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
988 
989 		pr_info("\tRX Queue %u rings\n", queue);
990 
991 		if (priv->extend_desc)
992 			head_rx = (void *)rx_q->dma_erx;
993 		else
994 			head_rx = (void *)rx_q->dma_rx;
995 
996 		/* Display RX ring */
997 		priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
998 	}
999 }
1000 
1001 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1002 {
1003 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1004 	void *head_tx;
1005 	u32 queue;
1006 
1007 	/* Display TX rings */
1008 	for (queue = 0; queue < tx_cnt; queue++) {
1009 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1010 
1011 		pr_info("\tTX Queue %d rings\n", queue);
1012 
1013 		if (priv->extend_desc)
1014 			head_tx = (void *)tx_q->dma_etx;
1015 		else
1016 			head_tx = (void *)tx_q->dma_tx;
1017 
1018 		priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1019 	}
1020 }
1021 
1022 static void stmmac_display_rings(struct stmmac_priv *priv)
1023 {
1024 	/* Display RX ring */
1025 	stmmac_display_rx_rings(priv);
1026 
1027 	/* Display TX ring */
1028 	stmmac_display_tx_rings(priv);
1029 }
1030 
1031 static int stmmac_set_bfsize(int mtu, int bufsize)
1032 {
1033 	int ret = bufsize;
1034 
1035 	if (mtu >= BUF_SIZE_4KiB)
1036 		ret = BUF_SIZE_8KiB;
1037 	else if (mtu >= BUF_SIZE_2KiB)
1038 		ret = BUF_SIZE_4KiB;
1039 	else if (mtu > DEFAULT_BUFSIZE)
1040 		ret = BUF_SIZE_2KiB;
1041 	else
1042 		ret = DEFAULT_BUFSIZE;
1043 
1044 	return ret;
1045 }
1046 
1047 /**
1048  * stmmac_clear_rx_descriptors - clear RX descriptors
1049  * @priv: driver private structure
1050  * @queue: RX queue index
1051  * Description: this function is called to clear the RX descriptors
1052  * in case of both basic and extended descriptors are used.
1053  */
1054 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1055 {
1056 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1057 	int i;
1058 
1059 	/* Clear the RX descriptors */
1060 	for (i = 0; i < DMA_RX_SIZE; i++)
1061 		if (priv->extend_desc)
1062 			priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1063 						     priv->use_riwt, priv->mode,
1064 						     (i == DMA_RX_SIZE - 1));
1065 		else
1066 			priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1067 						     priv->use_riwt, priv->mode,
1068 						     (i == DMA_RX_SIZE - 1));
1069 }
1070 
1071 /**
1072  * stmmac_clear_tx_descriptors - clear tx descriptors
1073  * @priv: driver private structure
1074  * @queue: TX queue index.
1075  * Description: this function is called to clear the TX descriptors
1076  * in case of both basic and extended descriptors are used.
1077  */
1078 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1079 {
1080 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1081 	int i;
1082 
1083 	/* Clear the TX descriptors */
1084 	for (i = 0; i < DMA_TX_SIZE; i++)
1085 		if (priv->extend_desc)
1086 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1087 						     priv->mode,
1088 						     (i == DMA_TX_SIZE - 1));
1089 		else
1090 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1091 						     priv->mode,
1092 						     (i == DMA_TX_SIZE - 1));
1093 }
1094 
1095 /**
1096  * stmmac_clear_descriptors - clear descriptors
1097  * @priv: driver private structure
1098  * Description: this function is called to clear the TX and RX descriptors
1099  * in case of both basic and extended descriptors are used.
1100  */
1101 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1102 {
1103 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1104 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1105 	u32 queue;
1106 
1107 	/* Clear the RX descriptors */
1108 	for (queue = 0; queue < rx_queue_cnt; queue++)
1109 		stmmac_clear_rx_descriptors(priv, queue);
1110 
1111 	/* Clear the TX descriptors */
1112 	for (queue = 0; queue < tx_queue_cnt; queue++)
1113 		stmmac_clear_tx_descriptors(priv, queue);
1114 }
1115 
1116 /**
1117  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1118  * @priv: driver private structure
1119  * @p: descriptor pointer
1120  * @i: descriptor index
1121  * @flags: gfp flag
1122  * @queue: RX queue index
1123  * Description: this function is called to allocate a receive buffer, perform
1124  * the DMA mapping and init the descriptor.
1125  */
1126 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1127 				  int i, gfp_t flags, u32 queue)
1128 {
1129 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1130 	struct sk_buff *skb;
1131 
1132 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1133 	if (!skb) {
1134 		netdev_err(priv->dev,
1135 			   "%s: Rx init fails; skb is NULL\n", __func__);
1136 		return -ENOMEM;
1137 	}
1138 	rx_q->rx_skbuff[i] = skb;
1139 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1140 						priv->dma_buf_sz,
1141 						DMA_FROM_DEVICE);
1142 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1143 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1144 		dev_kfree_skb_any(skb);
1145 		return -EINVAL;
1146 	}
1147 
1148 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1149 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1150 	else
1151 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1152 
1153 	if ((priv->hw->mode->init_desc3) &&
1154 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
1155 		priv->hw->mode->init_desc3(p);
1156 
1157 	return 0;
1158 }
1159 
1160 /**
1161  * stmmac_free_rx_buffer - free RX dma buffers
1162  * @priv: private structure
1163  * @queue: RX queue index
1164  * @i: buffer index.
1165  */
1166 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1167 {
1168 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1169 
1170 	if (rx_q->rx_skbuff[i]) {
1171 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1172 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1173 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1174 	}
1175 	rx_q->rx_skbuff[i] = NULL;
1176 }
1177 
1178 /**
1179  * stmmac_free_tx_buffer - free RX dma buffers
1180  * @priv: private structure
1181  * @queue: RX queue index
1182  * @i: buffer index.
1183  */
1184 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1185 {
1186 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1187 
1188 	if (tx_q->tx_skbuff_dma[i].buf) {
1189 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1190 			dma_unmap_page(priv->device,
1191 				       tx_q->tx_skbuff_dma[i].buf,
1192 				       tx_q->tx_skbuff_dma[i].len,
1193 				       DMA_TO_DEVICE);
1194 		else
1195 			dma_unmap_single(priv->device,
1196 					 tx_q->tx_skbuff_dma[i].buf,
1197 					 tx_q->tx_skbuff_dma[i].len,
1198 					 DMA_TO_DEVICE);
1199 	}
1200 
1201 	if (tx_q->tx_skbuff[i]) {
1202 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1203 		tx_q->tx_skbuff[i] = NULL;
1204 		tx_q->tx_skbuff_dma[i].buf = 0;
1205 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1206 	}
1207 }
1208 
1209 /**
1210  * init_dma_rx_desc_rings - init the RX descriptor rings
1211  * @dev: net device structure
1212  * @flags: gfp flag.
1213  * Description: this function initializes the DMA RX descriptors
1214  * and allocates the socket buffers. It supports the chained and ring
1215  * modes.
1216  */
1217 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1218 {
1219 	struct stmmac_priv *priv = netdev_priv(dev);
1220 	u32 rx_count = priv->plat->rx_queues_to_use;
1221 	unsigned int bfsize = 0;
1222 	int ret = -ENOMEM;
1223 	int queue;
1224 	int i;
1225 
1226 	if (priv->hw->mode->set_16kib_bfsize)
1227 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1228 
1229 	if (bfsize < BUF_SIZE_16KiB)
1230 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1231 
1232 	priv->dma_buf_sz = bfsize;
1233 
1234 	/* RX INITIALIZATION */
1235 	netif_dbg(priv, probe, priv->dev,
1236 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1237 
1238 	for (queue = 0; queue < rx_count; queue++) {
1239 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1240 
1241 		netif_dbg(priv, probe, priv->dev,
1242 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1243 			  (u32)rx_q->dma_rx_phy);
1244 
1245 		for (i = 0; i < DMA_RX_SIZE; i++) {
1246 			struct dma_desc *p;
1247 
1248 			if (priv->extend_desc)
1249 				p = &((rx_q->dma_erx + i)->basic);
1250 			else
1251 				p = rx_q->dma_rx + i;
1252 
1253 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1254 						     queue);
1255 			if (ret)
1256 				goto err_init_rx_buffers;
1257 
1258 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1259 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1260 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1261 		}
1262 
1263 		rx_q->cur_rx = 0;
1264 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1265 
1266 		stmmac_clear_rx_descriptors(priv, queue);
1267 
1268 		/* Setup the chained descriptor addresses */
1269 		if (priv->mode == STMMAC_CHAIN_MODE) {
1270 			if (priv->extend_desc)
1271 				priv->hw->mode->init(rx_q->dma_erx,
1272 						     rx_q->dma_rx_phy,
1273 						     DMA_RX_SIZE, 1);
1274 			else
1275 				priv->hw->mode->init(rx_q->dma_rx,
1276 						     rx_q->dma_rx_phy,
1277 						     DMA_RX_SIZE, 0);
1278 		}
1279 	}
1280 
1281 	buf_sz = bfsize;
1282 
1283 	return 0;
1284 
1285 err_init_rx_buffers:
1286 	while (queue >= 0) {
1287 		while (--i >= 0)
1288 			stmmac_free_rx_buffer(priv, queue, i);
1289 
1290 		if (queue == 0)
1291 			break;
1292 
1293 		i = DMA_RX_SIZE;
1294 		queue--;
1295 	}
1296 
1297 	return ret;
1298 }
1299 
1300 /**
1301  * init_dma_tx_desc_rings - init the TX descriptor rings
1302  * @dev: net device structure.
1303  * Description: this function initializes the DMA TX descriptors
1304  * and allocates the socket buffers. It supports the chained and ring
1305  * modes.
1306  */
1307 static int init_dma_tx_desc_rings(struct net_device *dev)
1308 {
1309 	struct stmmac_priv *priv = netdev_priv(dev);
1310 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1311 	u32 queue;
1312 	int i;
1313 
1314 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1315 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1316 
1317 		netif_dbg(priv, probe, priv->dev,
1318 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1319 			 (u32)tx_q->dma_tx_phy);
1320 
1321 		/* Setup the chained descriptor addresses */
1322 		if (priv->mode == STMMAC_CHAIN_MODE) {
1323 			if (priv->extend_desc)
1324 				priv->hw->mode->init(tx_q->dma_etx,
1325 						     tx_q->dma_tx_phy,
1326 						     DMA_TX_SIZE, 1);
1327 			else
1328 				priv->hw->mode->init(tx_q->dma_tx,
1329 						     tx_q->dma_tx_phy,
1330 						     DMA_TX_SIZE, 0);
1331 		}
1332 
1333 		for (i = 0; i < DMA_TX_SIZE; i++) {
1334 			struct dma_desc *p;
1335 			if (priv->extend_desc)
1336 				p = &((tx_q->dma_etx + i)->basic);
1337 			else
1338 				p = tx_q->dma_tx + i;
1339 
1340 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1341 				p->des0 = 0;
1342 				p->des1 = 0;
1343 				p->des2 = 0;
1344 				p->des3 = 0;
1345 			} else {
1346 				p->des2 = 0;
1347 			}
1348 
1349 			tx_q->tx_skbuff_dma[i].buf = 0;
1350 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1351 			tx_q->tx_skbuff_dma[i].len = 0;
1352 			tx_q->tx_skbuff_dma[i].last_segment = false;
1353 			tx_q->tx_skbuff[i] = NULL;
1354 		}
1355 
1356 		tx_q->dirty_tx = 0;
1357 		tx_q->cur_tx = 0;
1358 		tx_q->mss = 0;
1359 
1360 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1361 	}
1362 
1363 	return 0;
1364 }
1365 
1366 /**
1367  * init_dma_desc_rings - init the RX/TX descriptor rings
1368  * @dev: net device structure
1369  * @flags: gfp flag.
1370  * Description: this function initializes the DMA RX/TX descriptors
1371  * and allocates the socket buffers. It supports the chained and ring
1372  * modes.
1373  */
1374 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1375 {
1376 	struct stmmac_priv *priv = netdev_priv(dev);
1377 	int ret;
1378 
1379 	ret = init_dma_rx_desc_rings(dev, flags);
1380 	if (ret)
1381 		return ret;
1382 
1383 	ret = init_dma_tx_desc_rings(dev);
1384 
1385 	stmmac_clear_descriptors(priv);
1386 
1387 	if (netif_msg_hw(priv))
1388 		stmmac_display_rings(priv);
1389 
1390 	return ret;
1391 }
1392 
1393 /**
1394  * dma_free_rx_skbufs - free RX dma buffers
1395  * @priv: private structure
1396  * @queue: RX queue index
1397  */
1398 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1399 {
1400 	int i;
1401 
1402 	for (i = 0; i < DMA_RX_SIZE; i++)
1403 		stmmac_free_rx_buffer(priv, queue, i);
1404 }
1405 
1406 /**
1407  * dma_free_tx_skbufs - free TX dma buffers
1408  * @priv: private structure
1409  * @queue: TX queue index
1410  */
1411 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1412 {
1413 	int i;
1414 
1415 	for (i = 0; i < DMA_TX_SIZE; i++)
1416 		stmmac_free_tx_buffer(priv, queue, i);
1417 }
1418 
1419 /**
1420  * free_dma_rx_desc_resources - free RX dma desc resources
1421  * @priv: private structure
1422  */
1423 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1424 {
1425 	u32 rx_count = priv->plat->rx_queues_to_use;
1426 	u32 queue;
1427 
1428 	/* Free RX queue resources */
1429 	for (queue = 0; queue < rx_count; queue++) {
1430 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1431 
1432 		/* Release the DMA RX socket buffers */
1433 		dma_free_rx_skbufs(priv, queue);
1434 
1435 		/* Free DMA regions of consistent memory previously allocated */
1436 		if (!priv->extend_desc)
1437 			dma_free_coherent(priv->device,
1438 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1439 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1440 		else
1441 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1442 					  sizeof(struct dma_extended_desc),
1443 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1444 
1445 		kfree(rx_q->rx_skbuff_dma);
1446 		kfree(rx_q->rx_skbuff);
1447 	}
1448 }
1449 
1450 /**
1451  * free_dma_tx_desc_resources - free TX dma desc resources
1452  * @priv: private structure
1453  */
1454 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1455 {
1456 	u32 tx_count = priv->plat->tx_queues_to_use;
1457 	u32 queue;
1458 
1459 	/* Free TX queue resources */
1460 	for (queue = 0; queue < tx_count; queue++) {
1461 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1462 
1463 		/* Release the DMA TX socket buffers */
1464 		dma_free_tx_skbufs(priv, queue);
1465 
1466 		/* Free DMA regions of consistent memory previously allocated */
1467 		if (!priv->extend_desc)
1468 			dma_free_coherent(priv->device,
1469 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1470 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1471 		else
1472 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1473 					  sizeof(struct dma_extended_desc),
1474 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1475 
1476 		kfree(tx_q->tx_skbuff_dma);
1477 		kfree(tx_q->tx_skbuff);
1478 	}
1479 }
1480 
1481 /**
1482  * alloc_dma_rx_desc_resources - alloc RX resources.
1483  * @priv: private structure
1484  * Description: according to which descriptor can be used (extend or basic)
1485  * this function allocates the resources for TX and RX paths. In case of
1486  * reception, for example, it pre-allocated the RX socket buffer in order to
1487  * allow zero-copy mechanism.
1488  */
1489 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1490 {
1491 	u32 rx_count = priv->plat->rx_queues_to_use;
1492 	int ret = -ENOMEM;
1493 	u32 queue;
1494 
1495 	/* RX queues buffers and DMA */
1496 	for (queue = 0; queue < rx_count; queue++) {
1497 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1498 
1499 		rx_q->queue_index = queue;
1500 		rx_q->priv_data = priv;
1501 
1502 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1503 						    sizeof(dma_addr_t),
1504 						    GFP_KERNEL);
1505 		if (!rx_q->rx_skbuff_dma)
1506 			goto err_dma;
1507 
1508 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1509 						sizeof(struct sk_buff *),
1510 						GFP_KERNEL);
1511 		if (!rx_q->rx_skbuff)
1512 			goto err_dma;
1513 
1514 		if (priv->extend_desc) {
1515 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1516 							    DMA_RX_SIZE *
1517 							    sizeof(struct
1518 							    dma_extended_desc),
1519 							    &rx_q->dma_rx_phy,
1520 							    GFP_KERNEL);
1521 			if (!rx_q->dma_erx)
1522 				goto err_dma;
1523 
1524 		} else {
1525 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1526 							   DMA_RX_SIZE *
1527 							   sizeof(struct
1528 							   dma_desc),
1529 							   &rx_q->dma_rx_phy,
1530 							   GFP_KERNEL);
1531 			if (!rx_q->dma_rx)
1532 				goto err_dma;
1533 		}
1534 	}
1535 
1536 	return 0;
1537 
1538 err_dma:
1539 	free_dma_rx_desc_resources(priv);
1540 
1541 	return ret;
1542 }
1543 
1544 /**
1545  * alloc_dma_tx_desc_resources - alloc TX resources.
1546  * @priv: private structure
1547  * Description: according to which descriptor can be used (extend or basic)
1548  * this function allocates the resources for TX and RX paths. In case of
1549  * reception, for example, it pre-allocated the RX socket buffer in order to
1550  * allow zero-copy mechanism.
1551  */
1552 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1553 {
1554 	u32 tx_count = priv->plat->tx_queues_to_use;
1555 	int ret = -ENOMEM;
1556 	u32 queue;
1557 
1558 	/* TX queues buffers and DMA */
1559 	for (queue = 0; queue < tx_count; queue++) {
1560 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1561 
1562 		tx_q->queue_index = queue;
1563 		tx_q->priv_data = priv;
1564 
1565 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1566 						    sizeof(*tx_q->tx_skbuff_dma),
1567 						    GFP_KERNEL);
1568 		if (!tx_q->tx_skbuff_dma)
1569 			goto err_dma;
1570 
1571 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1572 						sizeof(struct sk_buff *),
1573 						GFP_KERNEL);
1574 		if (!tx_q->tx_skbuff)
1575 			goto err_dma;
1576 
1577 		if (priv->extend_desc) {
1578 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1579 							    DMA_TX_SIZE *
1580 							    sizeof(struct
1581 							    dma_extended_desc),
1582 							    &tx_q->dma_tx_phy,
1583 							    GFP_KERNEL);
1584 			if (!tx_q->dma_etx)
1585 				goto err_dma;
1586 		} else {
1587 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1588 							   DMA_TX_SIZE *
1589 							   sizeof(struct
1590 								  dma_desc),
1591 							   &tx_q->dma_tx_phy,
1592 							   GFP_KERNEL);
1593 			if (!tx_q->dma_tx)
1594 				goto err_dma;
1595 		}
1596 	}
1597 
1598 	return 0;
1599 
1600 err_dma:
1601 	free_dma_tx_desc_resources(priv);
1602 
1603 	return ret;
1604 }
1605 
1606 /**
1607  * alloc_dma_desc_resources - alloc TX/RX resources.
1608  * @priv: private structure
1609  * Description: according to which descriptor can be used (extend or basic)
1610  * this function allocates the resources for TX and RX paths. In case of
1611  * reception, for example, it pre-allocated the RX socket buffer in order to
1612  * allow zero-copy mechanism.
1613  */
1614 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1615 {
1616 	/* RX Allocation */
1617 	int ret = alloc_dma_rx_desc_resources(priv);
1618 
1619 	if (ret)
1620 		return ret;
1621 
1622 	ret = alloc_dma_tx_desc_resources(priv);
1623 
1624 	return ret;
1625 }
1626 
1627 /**
1628  * free_dma_desc_resources - free dma desc resources
1629  * @priv: private structure
1630  */
1631 static void free_dma_desc_resources(struct stmmac_priv *priv)
1632 {
1633 	/* Release the DMA RX socket buffers */
1634 	free_dma_rx_desc_resources(priv);
1635 
1636 	/* Release the DMA TX socket buffers */
1637 	free_dma_tx_desc_resources(priv);
1638 }
1639 
1640 /**
1641  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1642  *  @priv: driver private structure
1643  *  Description: It is used for enabling the rx queues in the MAC
1644  */
1645 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1646 {
1647 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1648 	int queue;
1649 	u8 mode;
1650 
1651 	for (queue = 0; queue < rx_queues_count; queue++) {
1652 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1653 		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1654 	}
1655 }
1656 
1657 /**
1658  * stmmac_start_rx_dma - start RX DMA channel
1659  * @priv: driver private structure
1660  * @chan: RX channel index
1661  * Description:
1662  * This starts a RX DMA channel
1663  */
1664 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1665 {
1666 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1667 	priv->hw->dma->start_rx(priv->ioaddr, chan);
1668 }
1669 
1670 /**
1671  * stmmac_start_tx_dma - start TX DMA channel
1672  * @priv: driver private structure
1673  * @chan: TX channel index
1674  * Description:
1675  * This starts a TX DMA channel
1676  */
1677 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1678 {
1679 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1680 	priv->hw->dma->start_tx(priv->ioaddr, chan);
1681 }
1682 
1683 /**
1684  * stmmac_stop_rx_dma - stop RX DMA channel
1685  * @priv: driver private structure
1686  * @chan: RX channel index
1687  * Description:
1688  * This stops a RX DMA channel
1689  */
1690 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1691 {
1692 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1693 	priv->hw->dma->stop_rx(priv->ioaddr, chan);
1694 }
1695 
1696 /**
1697  * stmmac_stop_tx_dma - stop TX DMA channel
1698  * @priv: driver private structure
1699  * @chan: TX channel index
1700  * Description:
1701  * This stops a TX DMA channel
1702  */
1703 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1704 {
1705 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1706 	priv->hw->dma->stop_tx(priv->ioaddr, chan);
1707 }
1708 
1709 /**
1710  * stmmac_start_all_dma - start all RX and TX DMA channels
1711  * @priv: driver private structure
1712  * Description:
1713  * This starts all the RX and TX DMA channels
1714  */
1715 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1716 {
1717 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1718 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1719 	u32 chan = 0;
1720 
1721 	for (chan = 0; chan < rx_channels_count; chan++)
1722 		stmmac_start_rx_dma(priv, chan);
1723 
1724 	for (chan = 0; chan < tx_channels_count; chan++)
1725 		stmmac_start_tx_dma(priv, chan);
1726 }
1727 
1728 /**
1729  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1730  * @priv: driver private structure
1731  * Description:
1732  * This stops the RX and TX DMA channels
1733  */
1734 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1735 {
1736 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1737 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1738 	u32 chan = 0;
1739 
1740 	for (chan = 0; chan < rx_channels_count; chan++)
1741 		stmmac_stop_rx_dma(priv, chan);
1742 
1743 	for (chan = 0; chan < tx_channels_count; chan++)
1744 		stmmac_stop_tx_dma(priv, chan);
1745 }
1746 
1747 /**
1748  *  stmmac_dma_operation_mode - HW DMA operation mode
1749  *  @priv: driver private structure
1750  *  Description: it is used for configuring the DMA operation mode register in
1751  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1752  */
1753 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1754 {
1755 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1756 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1757 	int rxfifosz = priv->plat->rx_fifo_size;
1758 	int txfifosz = priv->plat->tx_fifo_size;
1759 	u32 txmode = 0;
1760 	u32 rxmode = 0;
1761 	u32 chan = 0;
1762 	u8 qmode = 0;
1763 
1764 	if (rxfifosz == 0)
1765 		rxfifosz = priv->dma_cap.rx_fifo_size;
1766 	if (txfifosz == 0)
1767 		txfifosz = priv->dma_cap.tx_fifo_size;
1768 
1769 	/* Adjust for real per queue fifo size */
1770 	rxfifosz /= rx_channels_count;
1771 	txfifosz /= tx_channels_count;
1772 
1773 	if (priv->plat->force_thresh_dma_mode) {
1774 		txmode = tc;
1775 		rxmode = tc;
1776 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1777 		/*
1778 		 * In case of GMAC, SF mode can be enabled
1779 		 * to perform the TX COE in HW. This depends on:
1780 		 * 1) TX COE if actually supported
1781 		 * 2) There is no bugged Jumbo frame support
1782 		 *    that needs to not insert csum in the TDES.
1783 		 */
1784 		txmode = SF_DMA_MODE;
1785 		rxmode = SF_DMA_MODE;
1786 		priv->xstats.threshold = SF_DMA_MODE;
1787 	} else {
1788 		txmode = tc;
1789 		rxmode = SF_DMA_MODE;
1790 	}
1791 
1792 	/* configure all channels */
1793 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1794 		for (chan = 0; chan < rx_channels_count; chan++) {
1795 			qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1796 
1797 			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1798 						   rxfifosz, qmode);
1799 		}
1800 
1801 		for (chan = 0; chan < tx_channels_count; chan++) {
1802 			qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1803 
1804 			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1805 						   txfifosz, qmode);
1806 		}
1807 	} else {
1808 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1809 					rxfifosz);
1810 	}
1811 }
1812 
1813 /**
1814  * stmmac_tx_clean - to manage the transmission completion
1815  * @priv: driver private structure
1816  * @queue: TX queue index
1817  * Description: it reclaims the transmit resources after transmission completes.
1818  */
1819 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1820 {
1821 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1822 	unsigned int bytes_compl = 0, pkts_compl = 0;
1823 	unsigned int entry;
1824 
1825 	netif_tx_lock(priv->dev);
1826 
1827 	priv->xstats.tx_clean++;
1828 
1829 	entry = tx_q->dirty_tx;
1830 	while (entry != tx_q->cur_tx) {
1831 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1832 		struct dma_desc *p;
1833 		int status;
1834 
1835 		if (priv->extend_desc)
1836 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1837 		else
1838 			p = tx_q->dma_tx + entry;
1839 
1840 		status = priv->hw->desc->tx_status(&priv->dev->stats,
1841 						      &priv->xstats, p,
1842 						      priv->ioaddr);
1843 		/* Check if the descriptor is owned by the DMA */
1844 		if (unlikely(status & tx_dma_own))
1845 			break;
1846 
1847 		/* Just consider the last segment and ...*/
1848 		if (likely(!(status & tx_not_ls))) {
1849 			/* ... verify the status error condition */
1850 			if (unlikely(status & tx_err)) {
1851 				priv->dev->stats.tx_errors++;
1852 			} else {
1853 				priv->dev->stats.tx_packets++;
1854 				priv->xstats.tx_pkt_n++;
1855 			}
1856 			stmmac_get_tx_hwtstamp(priv, p, skb);
1857 		}
1858 
1859 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1860 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1861 				dma_unmap_page(priv->device,
1862 					       tx_q->tx_skbuff_dma[entry].buf,
1863 					       tx_q->tx_skbuff_dma[entry].len,
1864 					       DMA_TO_DEVICE);
1865 			else
1866 				dma_unmap_single(priv->device,
1867 						 tx_q->tx_skbuff_dma[entry].buf,
1868 						 tx_q->tx_skbuff_dma[entry].len,
1869 						 DMA_TO_DEVICE);
1870 			tx_q->tx_skbuff_dma[entry].buf = 0;
1871 			tx_q->tx_skbuff_dma[entry].len = 0;
1872 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1873 		}
1874 
1875 		if (priv->hw->mode->clean_desc3)
1876 			priv->hw->mode->clean_desc3(tx_q, p);
1877 
1878 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1879 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1880 
1881 		if (likely(skb != NULL)) {
1882 			pkts_compl++;
1883 			bytes_compl += skb->len;
1884 			dev_consume_skb_any(skb);
1885 			tx_q->tx_skbuff[entry] = NULL;
1886 		}
1887 
1888 		priv->hw->desc->release_tx_desc(p, priv->mode);
1889 
1890 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1891 	}
1892 	tx_q->dirty_tx = entry;
1893 
1894 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1895 				  pkts_compl, bytes_compl);
1896 
1897 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1898 								queue))) &&
1899 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1900 
1901 		netif_dbg(priv, tx_done, priv->dev,
1902 			  "%s: restart transmit\n", __func__);
1903 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1904 	}
1905 
1906 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1907 		stmmac_enable_eee_mode(priv);
1908 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1909 	}
1910 	netif_tx_unlock(priv->dev);
1911 }
1912 
1913 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1914 {
1915 	priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1916 }
1917 
1918 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1919 {
1920 	priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1921 }
1922 
1923 /**
1924  * stmmac_tx_err - to manage the tx error
1925  * @priv: driver private structure
1926  * @chan: channel index
1927  * Description: it cleans the descriptors and restarts the transmission
1928  * in case of transmission errors.
1929  */
1930 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1931 {
1932 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1933 	int i;
1934 
1935 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1936 
1937 	stmmac_stop_tx_dma(priv, chan);
1938 	dma_free_tx_skbufs(priv, chan);
1939 	for (i = 0; i < DMA_TX_SIZE; i++)
1940 		if (priv->extend_desc)
1941 			priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1942 						     priv->mode,
1943 						     (i == DMA_TX_SIZE - 1));
1944 		else
1945 			priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1946 						     priv->mode,
1947 						     (i == DMA_TX_SIZE - 1));
1948 	tx_q->dirty_tx = 0;
1949 	tx_q->cur_tx = 0;
1950 	tx_q->mss = 0;
1951 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1952 	stmmac_start_tx_dma(priv, chan);
1953 
1954 	priv->dev->stats.tx_errors++;
1955 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1956 }
1957 
1958 /**
1959  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1960  *  @priv: driver private structure
1961  *  @txmode: TX operating mode
1962  *  @rxmode: RX operating mode
1963  *  @chan: channel index
1964  *  Description: it is used for configuring of the DMA operation mode in
1965  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1966  *  mode.
1967  */
1968 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1969 					  u32 rxmode, u32 chan)
1970 {
1971 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1972 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1973 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1974 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1975 	int rxfifosz = priv->plat->rx_fifo_size;
1976 	int txfifosz = priv->plat->tx_fifo_size;
1977 
1978 	if (rxfifosz == 0)
1979 		rxfifosz = priv->dma_cap.rx_fifo_size;
1980 	if (txfifosz == 0)
1981 		txfifosz = priv->dma_cap.tx_fifo_size;
1982 
1983 	/* Adjust for real per queue fifo size */
1984 	rxfifosz /= rx_channels_count;
1985 	txfifosz /= tx_channels_count;
1986 
1987 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1988 		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1989 					   rxfifosz, rxqmode);
1990 		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1991 					   txfifosz, txqmode);
1992 	} else {
1993 		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1994 					rxfifosz);
1995 	}
1996 }
1997 
1998 /**
1999  * stmmac_dma_interrupt - DMA ISR
2000  * @priv: driver private structure
2001  * Description: this is the DMA ISR. It is called by the main ISR.
2002  * It calls the dwmac dma routine and schedule poll method in case of some
2003  * work can be done.
2004  */
2005 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2006 {
2007 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2008 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2009 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2010 				tx_channel_count : rx_channel_count;
2011 	u32 chan;
2012 	bool poll_scheduled = false;
2013 	int status[channels_to_check];
2014 
2015 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2016 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2017 	 * stmmac_channel struct.
2018 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2019 	 * all tx queues rather than just a single tx queue.
2020 	 */
2021 	for (chan = 0; chan < channels_to_check; chan++)
2022 		status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
2023 							    &priv->xstats,
2024 							    chan);
2025 
2026 	for (chan = 0; chan < rx_channel_count; chan++) {
2027 		if (likely(status[chan] & handle_rx)) {
2028 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2029 
2030 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2031 				stmmac_disable_dma_irq(priv, chan);
2032 				__napi_schedule(&rx_q->napi);
2033 				poll_scheduled = true;
2034 			}
2035 		}
2036 	}
2037 
2038 	/* If we scheduled poll, we already know that tx queues will be checked.
2039 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2040 	 * completed transmission, if so, call stmmac_poll (once).
2041 	 */
2042 	if (!poll_scheduled) {
2043 		for (chan = 0; chan < tx_channel_count; chan++) {
2044 			if (status[chan] & handle_tx) {
2045 				/* It doesn't matter what rx queue we choose
2046 				 * here. We use 0 since it always exists.
2047 				 */
2048 				struct stmmac_rx_queue *rx_q =
2049 					&priv->rx_queue[0];
2050 
2051 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2052 					stmmac_disable_dma_irq(priv, chan);
2053 					__napi_schedule(&rx_q->napi);
2054 				}
2055 				break;
2056 			}
2057 		}
2058 	}
2059 
2060 	for (chan = 0; chan < tx_channel_count; chan++) {
2061 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2062 			/* Try to bump up the dma threshold on this failure */
2063 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2064 			    (tc <= 256)) {
2065 				tc += 64;
2066 				if (priv->plat->force_thresh_dma_mode)
2067 					stmmac_set_dma_operation_mode(priv,
2068 								      tc,
2069 								      tc,
2070 								      chan);
2071 				else
2072 					stmmac_set_dma_operation_mode(priv,
2073 								    tc,
2074 								    SF_DMA_MODE,
2075 								    chan);
2076 				priv->xstats.threshold = tc;
2077 			}
2078 		} else if (unlikely(status[chan] == tx_hard_error)) {
2079 			stmmac_tx_err(priv, chan);
2080 		}
2081 	}
2082 }
2083 
2084 /**
2085  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2086  * @priv: driver private structure
2087  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2088  */
2089 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2090 {
2091 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2092 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2093 
2094 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2095 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2096 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2097 	} else {
2098 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2099 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2100 	}
2101 
2102 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2103 
2104 	if (priv->dma_cap.rmon) {
2105 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2106 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2107 	} else
2108 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2109 }
2110 
2111 /**
2112  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2113  * @priv: driver private structure
2114  * Description: select the Enhanced/Alternate or Normal descriptors.
2115  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2116  * supported by the HW capability register.
2117  */
2118 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2119 {
2120 	if (priv->plat->enh_desc) {
2121 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2122 
2123 		/* GMAC older than 3.50 has no extended descriptors */
2124 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2125 			dev_info(priv->device, "Enabled extended descriptors\n");
2126 			priv->extend_desc = 1;
2127 		} else
2128 			dev_warn(priv->device, "Extended descriptors not supported\n");
2129 
2130 		priv->hw->desc = &enh_desc_ops;
2131 	} else {
2132 		dev_info(priv->device, "Normal descriptors\n");
2133 		priv->hw->desc = &ndesc_ops;
2134 	}
2135 }
2136 
2137 /**
2138  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2139  * @priv: driver private structure
2140  * Description:
2141  *  new GMAC chip generations have a new register to indicate the
2142  *  presence of the optional feature/functions.
2143  *  This can be also used to override the value passed through the
2144  *  platform and necessary for old MAC10/100 and GMAC chips.
2145  */
2146 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2147 {
2148 	u32 ret = 0;
2149 
2150 	if (priv->hw->dma->get_hw_feature) {
2151 		priv->hw->dma->get_hw_feature(priv->ioaddr,
2152 					      &priv->dma_cap);
2153 		ret = 1;
2154 	}
2155 
2156 	return ret;
2157 }
2158 
2159 /**
2160  * stmmac_check_ether_addr - check if the MAC addr is valid
2161  * @priv: driver private structure
2162  * Description:
2163  * it is to verify if the MAC address is valid, in case of failures it
2164  * generates a random MAC address
2165  */
2166 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2167 {
2168 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2169 		priv->hw->mac->get_umac_addr(priv->hw,
2170 					     priv->dev->dev_addr, 0);
2171 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2172 			eth_hw_addr_random(priv->dev);
2173 		netdev_info(priv->dev, "device MAC address %pM\n",
2174 			    priv->dev->dev_addr);
2175 	}
2176 }
2177 
2178 /**
2179  * stmmac_init_dma_engine - DMA init.
2180  * @priv: driver private structure
2181  * Description:
2182  * It inits the DMA invoking the specific MAC/GMAC callback.
2183  * Some DMA parameters can be passed from the platform;
2184  * in case of these are not passed a default is kept for the MAC or GMAC.
2185  */
2186 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2187 {
2188 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2189 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2190 	struct stmmac_rx_queue *rx_q;
2191 	struct stmmac_tx_queue *tx_q;
2192 	u32 dummy_dma_rx_phy = 0;
2193 	u32 dummy_dma_tx_phy = 0;
2194 	u32 chan = 0;
2195 	int atds = 0;
2196 	int ret = 0;
2197 
2198 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2199 		dev_err(priv->device, "Invalid DMA configuration\n");
2200 		return -EINVAL;
2201 	}
2202 
2203 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2204 		atds = 1;
2205 
2206 	ret = priv->hw->dma->reset(priv->ioaddr);
2207 	if (ret) {
2208 		dev_err(priv->device, "Failed to reset the dma\n");
2209 		return ret;
2210 	}
2211 
2212 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2213 		/* DMA Configuration */
2214 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2215 				    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2216 
2217 		/* DMA RX Channel Configuration */
2218 		for (chan = 0; chan < rx_channels_count; chan++) {
2219 			rx_q = &priv->rx_queue[chan];
2220 
2221 			priv->hw->dma->init_rx_chan(priv->ioaddr,
2222 						    priv->plat->dma_cfg,
2223 						    rx_q->dma_rx_phy, chan);
2224 
2225 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2226 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2227 			priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2228 						       rx_q->rx_tail_addr,
2229 						       chan);
2230 		}
2231 
2232 		/* DMA TX Channel Configuration */
2233 		for (chan = 0; chan < tx_channels_count; chan++) {
2234 			tx_q = &priv->tx_queue[chan];
2235 
2236 			priv->hw->dma->init_chan(priv->ioaddr,
2237 						 priv->plat->dma_cfg,
2238 						 chan);
2239 
2240 			priv->hw->dma->init_tx_chan(priv->ioaddr,
2241 						    priv->plat->dma_cfg,
2242 						    tx_q->dma_tx_phy, chan);
2243 
2244 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2245 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2246 			priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2247 						       tx_q->tx_tail_addr,
2248 						       chan);
2249 		}
2250 	} else {
2251 		rx_q = &priv->rx_queue[chan];
2252 		tx_q = &priv->tx_queue[chan];
2253 		priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2254 				    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2255 	}
2256 
2257 	if (priv->plat->axi && priv->hw->dma->axi)
2258 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2259 
2260 	return ret;
2261 }
2262 
2263 /**
2264  * stmmac_tx_timer - mitigation sw timer for tx.
2265  * @data: data pointer
2266  * Description:
2267  * This is the timer handler to directly invoke the stmmac_tx_clean.
2268  */
2269 static void stmmac_tx_timer(struct timer_list *t)
2270 {
2271 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2272 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2273 	u32 queue;
2274 
2275 	/* let's scan all the tx queues */
2276 	for (queue = 0; queue < tx_queues_count; queue++)
2277 		stmmac_tx_clean(priv, queue);
2278 }
2279 
2280 /**
2281  * stmmac_init_tx_coalesce - init tx mitigation options.
2282  * @priv: driver private structure
2283  * Description:
2284  * This inits the transmit coalesce parameters: i.e. timer rate,
2285  * timer handler and default threshold used for enabling the
2286  * interrupt on completion bit.
2287  */
2288 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2289 {
2290 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2291 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2292 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2293 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2294 	add_timer(&priv->txtimer);
2295 }
2296 
2297 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2298 {
2299 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2300 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2301 	u32 chan;
2302 
2303 	/* set TX ring length */
2304 	if (priv->hw->dma->set_tx_ring_len) {
2305 		for (chan = 0; chan < tx_channels_count; chan++)
2306 			priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2307 						       (DMA_TX_SIZE - 1), chan);
2308 	}
2309 
2310 	/* set RX ring length */
2311 	if (priv->hw->dma->set_rx_ring_len) {
2312 		for (chan = 0; chan < rx_channels_count; chan++)
2313 			priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2314 						       (DMA_RX_SIZE - 1), chan);
2315 	}
2316 }
2317 
2318 /**
2319  *  stmmac_set_tx_queue_weight - Set TX queue weight
2320  *  @priv: driver private structure
2321  *  Description: It is used for setting TX queues weight
2322  */
2323 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2324 {
2325 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2326 	u32 weight;
2327 	u32 queue;
2328 
2329 	for (queue = 0; queue < tx_queues_count; queue++) {
2330 		weight = priv->plat->tx_queues_cfg[queue].weight;
2331 		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2332 	}
2333 }
2334 
2335 /**
2336  *  stmmac_configure_cbs - Configure CBS in TX queue
2337  *  @priv: driver private structure
2338  *  Description: It is used for configuring CBS in AVB TX queues
2339  */
2340 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2341 {
2342 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2343 	u32 mode_to_use;
2344 	u32 queue;
2345 
2346 	/* queue 0 is reserved for legacy traffic */
2347 	for (queue = 1; queue < tx_queues_count; queue++) {
2348 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2349 		if (mode_to_use == MTL_QUEUE_DCB)
2350 			continue;
2351 
2352 		priv->hw->mac->config_cbs(priv->hw,
2353 				priv->plat->tx_queues_cfg[queue].send_slope,
2354 				priv->plat->tx_queues_cfg[queue].idle_slope,
2355 				priv->plat->tx_queues_cfg[queue].high_credit,
2356 				priv->plat->tx_queues_cfg[queue].low_credit,
2357 				queue);
2358 	}
2359 }
2360 
2361 /**
2362  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2363  *  @priv: driver private structure
2364  *  Description: It is used for mapping RX queues to RX dma channels
2365  */
2366 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2367 {
2368 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2369 	u32 queue;
2370 	u32 chan;
2371 
2372 	for (queue = 0; queue < rx_queues_count; queue++) {
2373 		chan = priv->plat->rx_queues_cfg[queue].chan;
2374 		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2375 	}
2376 }
2377 
2378 /**
2379  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2380  *  @priv: driver private structure
2381  *  Description: It is used for configuring the RX Queue Priority
2382  */
2383 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2384 {
2385 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2386 	u32 queue;
2387 	u32 prio;
2388 
2389 	for (queue = 0; queue < rx_queues_count; queue++) {
2390 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2391 			continue;
2392 
2393 		prio = priv->plat->rx_queues_cfg[queue].prio;
2394 		priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2395 	}
2396 }
2397 
2398 /**
2399  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2400  *  @priv: driver private structure
2401  *  Description: It is used for configuring the TX Queue Priority
2402  */
2403 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2404 {
2405 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2406 	u32 queue;
2407 	u32 prio;
2408 
2409 	for (queue = 0; queue < tx_queues_count; queue++) {
2410 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2411 			continue;
2412 
2413 		prio = priv->plat->tx_queues_cfg[queue].prio;
2414 		priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2415 	}
2416 }
2417 
2418 /**
2419  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2420  *  @priv: driver private structure
2421  *  Description: It is used for configuring the RX queue routing
2422  */
2423 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2424 {
2425 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2426 	u32 queue;
2427 	u8 packet;
2428 
2429 	for (queue = 0; queue < rx_queues_count; queue++) {
2430 		/* no specific packet type routing specified for the queue */
2431 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2432 			continue;
2433 
2434 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2435 		priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
2436 	}
2437 }
2438 
2439 /**
2440  *  stmmac_mtl_configuration - Configure MTL
2441  *  @priv: driver private structure
2442  *  Description: It is used for configurring MTL
2443  */
2444 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2445 {
2446 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2447 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2448 
2449 	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2450 		stmmac_set_tx_queue_weight(priv);
2451 
2452 	/* Configure MTL RX algorithms */
2453 	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2454 		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2455 						priv->plat->rx_sched_algorithm);
2456 
2457 	/* Configure MTL TX algorithms */
2458 	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2459 		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2460 						priv->plat->tx_sched_algorithm);
2461 
2462 	/* Configure CBS in AVB TX queues */
2463 	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2464 		stmmac_configure_cbs(priv);
2465 
2466 	/* Map RX MTL to DMA channels */
2467 	if (priv->hw->mac->map_mtl_to_dma)
2468 		stmmac_rx_queue_dma_chan_map(priv);
2469 
2470 	/* Enable MAC RX Queues */
2471 	if (priv->hw->mac->rx_queue_enable)
2472 		stmmac_mac_enable_rx_queues(priv);
2473 
2474 	/* Set RX priorities */
2475 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2476 		stmmac_mac_config_rx_queues_prio(priv);
2477 
2478 	/* Set TX priorities */
2479 	if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2480 		stmmac_mac_config_tx_queues_prio(priv);
2481 
2482 	/* Set RX routing */
2483 	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2484 		stmmac_mac_config_rx_queues_routing(priv);
2485 }
2486 
2487 /**
2488  * stmmac_hw_setup - setup mac in a usable state.
2489  *  @dev : pointer to the device structure.
2490  *  Description:
2491  *  this is the main function to setup the HW in a usable state because the
2492  *  dma engine is reset, the core registers are configured (e.g. AXI,
2493  *  Checksum features, timers). The DMA is ready to start receiving and
2494  *  transmitting.
2495  *  Return value:
2496  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2497  *  file on failure.
2498  */
2499 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2500 {
2501 	struct stmmac_priv *priv = netdev_priv(dev);
2502 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2503 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2504 	u32 chan;
2505 	int ret;
2506 
2507 	/* DMA initialization and SW reset */
2508 	ret = stmmac_init_dma_engine(priv);
2509 	if (ret < 0) {
2510 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2511 			   __func__);
2512 		return ret;
2513 	}
2514 
2515 	/* Copy the MAC addr into the HW  */
2516 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2517 
2518 	/* PS and related bits will be programmed according to the speed */
2519 	if (priv->hw->pcs) {
2520 		int speed = priv->plat->mac_port_sel_speed;
2521 
2522 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2523 		    (speed == SPEED_1000)) {
2524 			priv->hw->ps = speed;
2525 		} else {
2526 			dev_warn(priv->device, "invalid port speed\n");
2527 			priv->hw->ps = 0;
2528 		}
2529 	}
2530 
2531 	/* Initialize the MAC Core */
2532 	priv->hw->mac->core_init(priv->hw, dev);
2533 
2534 	/* Initialize MTL*/
2535 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2536 		stmmac_mtl_configuration(priv);
2537 
2538 	ret = priv->hw->mac->rx_ipc(priv->hw);
2539 	if (!ret) {
2540 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2541 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2542 		priv->hw->rx_csum = 0;
2543 	}
2544 
2545 	/* Enable the MAC Rx/Tx */
2546 	priv->hw->mac->set_mac(priv->ioaddr, true);
2547 
2548 	/* Set the HW DMA mode and the COE */
2549 	stmmac_dma_operation_mode(priv);
2550 
2551 	stmmac_mmc_setup(priv);
2552 
2553 	if (init_ptp) {
2554 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2555 		if (ret < 0)
2556 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2557 
2558 		ret = stmmac_init_ptp(priv);
2559 		if (ret == -EOPNOTSUPP)
2560 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2561 		else if (ret)
2562 			netdev_warn(priv->dev, "PTP init failed\n");
2563 	}
2564 
2565 #ifdef CONFIG_DEBUG_FS
2566 	ret = stmmac_init_fs(dev);
2567 	if (ret < 0)
2568 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2569 			    __func__);
2570 #endif
2571 	/* Start the ball rolling... */
2572 	stmmac_start_all_dma(priv);
2573 
2574 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2575 
2576 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2577 		priv->rx_riwt = MAX_DMA_RIWT;
2578 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2579 	}
2580 
2581 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2582 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2583 
2584 	/* set TX and RX rings length */
2585 	stmmac_set_rings_length(priv);
2586 
2587 	/* Enable TSO */
2588 	if (priv->tso) {
2589 		for (chan = 0; chan < tx_cnt; chan++)
2590 			priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2591 	}
2592 
2593 	return 0;
2594 }
2595 
2596 static void stmmac_hw_teardown(struct net_device *dev)
2597 {
2598 	struct stmmac_priv *priv = netdev_priv(dev);
2599 
2600 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2601 }
2602 
2603 /**
2604  *  stmmac_open - open entry point of the driver
2605  *  @dev : pointer to the device structure.
2606  *  Description:
2607  *  This function is the open entry point of the driver.
2608  *  Return value:
2609  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2610  *  file on failure.
2611  */
2612 static int stmmac_open(struct net_device *dev)
2613 {
2614 	struct stmmac_priv *priv = netdev_priv(dev);
2615 	int ret;
2616 
2617 	stmmac_check_ether_addr(priv);
2618 
2619 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2620 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2621 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2622 		ret = stmmac_init_phy(dev);
2623 		if (ret) {
2624 			netdev_err(priv->dev,
2625 				   "%s: Cannot attach to PHY (error: %d)\n",
2626 				   __func__, ret);
2627 			return ret;
2628 		}
2629 	}
2630 
2631 	/* Extra statistics */
2632 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2633 	priv->xstats.threshold = tc;
2634 
2635 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2636 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2637 
2638 	ret = alloc_dma_desc_resources(priv);
2639 	if (ret < 0) {
2640 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2641 			   __func__);
2642 		goto dma_desc_error;
2643 	}
2644 
2645 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2646 	if (ret < 0) {
2647 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2648 			   __func__);
2649 		goto init_error;
2650 	}
2651 
2652 	ret = stmmac_hw_setup(dev, true);
2653 	if (ret < 0) {
2654 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2655 		goto init_error;
2656 	}
2657 
2658 	stmmac_init_tx_coalesce(priv);
2659 
2660 	if (dev->phydev)
2661 		phy_start(dev->phydev);
2662 
2663 	/* Request the IRQ lines */
2664 	ret = request_irq(dev->irq, stmmac_interrupt,
2665 			  IRQF_SHARED, dev->name, dev);
2666 	if (unlikely(ret < 0)) {
2667 		netdev_err(priv->dev,
2668 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2669 			   __func__, dev->irq, ret);
2670 		goto irq_error;
2671 	}
2672 
2673 	/* Request the Wake IRQ in case of another line is used for WoL */
2674 	if (priv->wol_irq != dev->irq) {
2675 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2676 				  IRQF_SHARED, dev->name, dev);
2677 		if (unlikely(ret < 0)) {
2678 			netdev_err(priv->dev,
2679 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2680 				   __func__, priv->wol_irq, ret);
2681 			goto wolirq_error;
2682 		}
2683 	}
2684 
2685 	/* Request the IRQ lines */
2686 	if (priv->lpi_irq > 0) {
2687 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2688 				  dev->name, dev);
2689 		if (unlikely(ret < 0)) {
2690 			netdev_err(priv->dev,
2691 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2692 				   __func__, priv->lpi_irq, ret);
2693 			goto lpiirq_error;
2694 		}
2695 	}
2696 
2697 	stmmac_enable_all_queues(priv);
2698 	stmmac_start_all_queues(priv);
2699 
2700 	return 0;
2701 
2702 lpiirq_error:
2703 	if (priv->wol_irq != dev->irq)
2704 		free_irq(priv->wol_irq, dev);
2705 wolirq_error:
2706 	free_irq(dev->irq, dev);
2707 irq_error:
2708 	if (dev->phydev)
2709 		phy_stop(dev->phydev);
2710 
2711 	del_timer_sync(&priv->txtimer);
2712 	stmmac_hw_teardown(dev);
2713 init_error:
2714 	free_dma_desc_resources(priv);
2715 dma_desc_error:
2716 	if (dev->phydev)
2717 		phy_disconnect(dev->phydev);
2718 
2719 	return ret;
2720 }
2721 
2722 /**
2723  *  stmmac_release - close entry point of the driver
2724  *  @dev : device pointer.
2725  *  Description:
2726  *  This is the stop entry point of the driver.
2727  */
2728 static int stmmac_release(struct net_device *dev)
2729 {
2730 	struct stmmac_priv *priv = netdev_priv(dev);
2731 
2732 	if (priv->eee_enabled)
2733 		del_timer_sync(&priv->eee_ctrl_timer);
2734 
2735 	/* Stop and disconnect the PHY */
2736 	if (dev->phydev) {
2737 		phy_stop(dev->phydev);
2738 		phy_disconnect(dev->phydev);
2739 	}
2740 
2741 	stmmac_stop_all_queues(priv);
2742 
2743 	stmmac_disable_all_queues(priv);
2744 
2745 	del_timer_sync(&priv->txtimer);
2746 
2747 	/* Free the IRQ lines */
2748 	free_irq(dev->irq, dev);
2749 	if (priv->wol_irq != dev->irq)
2750 		free_irq(priv->wol_irq, dev);
2751 	if (priv->lpi_irq > 0)
2752 		free_irq(priv->lpi_irq, dev);
2753 
2754 	/* Stop TX/RX DMA and clear the descriptors */
2755 	stmmac_stop_all_dma(priv);
2756 
2757 	/* Release and free the Rx/Tx resources */
2758 	free_dma_desc_resources(priv);
2759 
2760 	/* Disable the MAC Rx/Tx */
2761 	priv->hw->mac->set_mac(priv->ioaddr, false);
2762 
2763 	netif_carrier_off(dev);
2764 
2765 #ifdef CONFIG_DEBUG_FS
2766 	stmmac_exit_fs(dev);
2767 #endif
2768 
2769 	stmmac_release_ptp(priv);
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  *  stmmac_tso_allocator - close entry point of the driver
2776  *  @priv: driver private structure
2777  *  @des: buffer start address
2778  *  @total_len: total length to fill in descriptors
2779  *  @last_segmant: condition for the last descriptor
2780  *  @queue: TX queue index
2781  *  Description:
2782  *  This function fills descriptor and request new descriptors according to
2783  *  buffer length to fill
2784  */
2785 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2786 				 int total_len, bool last_segment, u32 queue)
2787 {
2788 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2789 	struct dma_desc *desc;
2790 	u32 buff_size;
2791 	int tmp_len;
2792 
2793 	tmp_len = total_len;
2794 
2795 	while (tmp_len > 0) {
2796 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2797 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2798 		desc = tx_q->dma_tx + tx_q->cur_tx;
2799 
2800 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2801 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2802 			    TSO_MAX_BUFF_SIZE : tmp_len;
2803 
2804 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2805 			0, 1,
2806 			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2807 			0, 0);
2808 
2809 		tmp_len -= TSO_MAX_BUFF_SIZE;
2810 	}
2811 }
2812 
2813 /**
2814  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2815  *  @skb : the socket buffer
2816  *  @dev : device pointer
2817  *  Description: this is the transmit function that is called on TSO frames
2818  *  (support available on GMAC4 and newer chips).
2819  *  Diagram below show the ring programming in case of TSO frames:
2820  *
2821  *  First Descriptor
2822  *   --------
2823  *   | DES0 |---> buffer1 = L2/L3/L4 header
2824  *   | DES1 |---> TCP Payload (can continue on next descr...)
2825  *   | DES2 |---> buffer 1 and 2 len
2826  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2827  *   --------
2828  *	|
2829  *     ...
2830  *	|
2831  *   --------
2832  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2833  *   | DES1 | --|
2834  *   | DES2 | --> buffer 1 and 2 len
2835  *   | DES3 |
2836  *   --------
2837  *
2838  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2839  */
2840 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2841 {
2842 	struct dma_desc *desc, *first, *mss_desc = NULL;
2843 	struct stmmac_priv *priv = netdev_priv(dev);
2844 	int nfrags = skb_shinfo(skb)->nr_frags;
2845 	u32 queue = skb_get_queue_mapping(skb);
2846 	unsigned int first_entry, des;
2847 	struct stmmac_tx_queue *tx_q;
2848 	int tmp_pay_len = 0;
2849 	u32 pay_len, mss;
2850 	u8 proto_hdr_len;
2851 	int i;
2852 
2853 	tx_q = &priv->tx_queue[queue];
2854 
2855 	/* Compute header lengths */
2856 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2857 
2858 	/* Desc availability based on threshold should be enough safe */
2859 	if (unlikely(stmmac_tx_avail(priv, queue) <
2860 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2861 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2862 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2863 								queue));
2864 			/* This is a hard error, log it. */
2865 			netdev_err(priv->dev,
2866 				   "%s: Tx Ring full when queue awake\n",
2867 				   __func__);
2868 		}
2869 		return NETDEV_TX_BUSY;
2870 	}
2871 
2872 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2873 
2874 	mss = skb_shinfo(skb)->gso_size;
2875 
2876 	/* set new MSS value if needed */
2877 	if (mss != tx_q->mss) {
2878 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2879 		priv->hw->desc->set_mss(mss_desc, mss);
2880 		tx_q->mss = mss;
2881 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2882 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2883 	}
2884 
2885 	if (netif_msg_tx_queued(priv)) {
2886 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2887 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2888 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2889 			skb->data_len);
2890 	}
2891 
2892 	first_entry = tx_q->cur_tx;
2893 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2894 
2895 	desc = tx_q->dma_tx + first_entry;
2896 	first = desc;
2897 
2898 	/* first descriptor: fill Headers on Buf1 */
2899 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2900 			     DMA_TO_DEVICE);
2901 	if (dma_mapping_error(priv->device, des))
2902 		goto dma_map_err;
2903 
2904 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2905 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2906 
2907 	first->des0 = cpu_to_le32(des);
2908 
2909 	/* Fill start of payload in buff2 of first descriptor */
2910 	if (pay_len)
2911 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2912 
2913 	/* If needed take extra descriptors to fill the remaining payload */
2914 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2915 
2916 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2917 
2918 	/* Prepare fragments */
2919 	for (i = 0; i < nfrags; i++) {
2920 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2921 
2922 		des = skb_frag_dma_map(priv->device, frag, 0,
2923 				       skb_frag_size(frag),
2924 				       DMA_TO_DEVICE);
2925 		if (dma_mapping_error(priv->device, des))
2926 			goto dma_map_err;
2927 
2928 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2929 				     (i == nfrags - 1), queue);
2930 
2931 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2932 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2933 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2934 	}
2935 
2936 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2937 
2938 	/* Only the last descriptor gets to point to the skb. */
2939 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2940 
2941 	/* We've used all descriptors we need for this skb, however,
2942 	 * advance cur_tx so that it references a fresh descriptor.
2943 	 * ndo_start_xmit will fill this descriptor the next time it's
2944 	 * called and stmmac_tx_clean may clean up to this descriptor.
2945 	 */
2946 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2947 
2948 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2949 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2950 			  __func__);
2951 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2952 	}
2953 
2954 	dev->stats.tx_bytes += skb->len;
2955 	priv->xstats.tx_tso_frames++;
2956 	priv->xstats.tx_tso_nfrags += nfrags;
2957 
2958 	/* Manage tx mitigation */
2959 	priv->tx_count_frames += nfrags + 1;
2960 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2961 		mod_timer(&priv->txtimer,
2962 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2963 	} else {
2964 		priv->tx_count_frames = 0;
2965 		priv->hw->desc->set_tx_ic(desc);
2966 		priv->xstats.tx_set_ic_bit++;
2967 	}
2968 
2969 	skb_tx_timestamp(skb);
2970 
2971 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2972 		     priv->hwts_tx_en)) {
2973 		/* declare that device is doing timestamping */
2974 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2975 		priv->hw->desc->enable_tx_timestamp(first);
2976 	}
2977 
2978 	/* Complete the first descriptor before granting the DMA */
2979 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2980 			proto_hdr_len,
2981 			pay_len,
2982 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2983 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2984 
2985 	/* If context desc is used to change MSS */
2986 	if (mss_desc)
2987 		priv->hw->desc->set_tx_owner(mss_desc);
2988 
2989 	/* The own bit must be the latest setting done when prepare the
2990 	 * descriptor and then barrier is needed to make sure that
2991 	 * all is coherent before granting the DMA engine.
2992 	 */
2993 	dma_wmb();
2994 
2995 	if (netif_msg_pktdata(priv)) {
2996 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2997 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2998 			tx_q->cur_tx, first, nfrags);
2999 
3000 		priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
3001 					     0);
3002 
3003 		pr_info(">>> frame to be transmitted: ");
3004 		print_pkt(skb->data, skb_headlen(skb));
3005 	}
3006 
3007 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3008 
3009 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3010 				       queue);
3011 
3012 	return NETDEV_TX_OK;
3013 
3014 dma_map_err:
3015 	dev_err(priv->device, "Tx dma map failed\n");
3016 	dev_kfree_skb(skb);
3017 	priv->dev->stats.tx_dropped++;
3018 	return NETDEV_TX_OK;
3019 }
3020 
3021 /**
3022  *  stmmac_xmit - Tx entry point of the driver
3023  *  @skb : the socket buffer
3024  *  @dev : device pointer
3025  *  Description : this is the tx entry point of the driver.
3026  *  It programs the chain or the ring and supports oversized frames
3027  *  and SG feature.
3028  */
3029 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3030 {
3031 	struct stmmac_priv *priv = netdev_priv(dev);
3032 	unsigned int nopaged_len = skb_headlen(skb);
3033 	int i, csum_insertion = 0, is_jumbo = 0;
3034 	u32 queue = skb_get_queue_mapping(skb);
3035 	int nfrags = skb_shinfo(skb)->nr_frags;
3036 	int entry;
3037 	unsigned int first_entry;
3038 	struct dma_desc *desc, *first;
3039 	struct stmmac_tx_queue *tx_q;
3040 	unsigned int enh_desc;
3041 	unsigned int des;
3042 
3043 	tx_q = &priv->tx_queue[queue];
3044 
3045 	/* Manage oversized TCP frames for GMAC4 device */
3046 	if (skb_is_gso(skb) && priv->tso) {
3047 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3048 			return stmmac_tso_xmit(skb, dev);
3049 	}
3050 
3051 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3052 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3053 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3054 								queue));
3055 			/* This is a hard error, log it. */
3056 			netdev_err(priv->dev,
3057 				   "%s: Tx Ring full when queue awake\n",
3058 				   __func__);
3059 		}
3060 		return NETDEV_TX_BUSY;
3061 	}
3062 
3063 	if (priv->tx_path_in_lpi_mode)
3064 		stmmac_disable_eee_mode(priv);
3065 
3066 	entry = tx_q->cur_tx;
3067 	first_entry = entry;
3068 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3069 
3070 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3071 
3072 	if (likely(priv->extend_desc))
3073 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3074 	else
3075 		desc = tx_q->dma_tx + entry;
3076 
3077 	first = desc;
3078 
3079 	enh_desc = priv->plat->enh_desc;
3080 	/* To program the descriptors according to the size of the frame */
3081 	if (enh_desc)
3082 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3083 
3084 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3085 					 DWMAC_CORE_4_00)) {
3086 		entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3087 		if (unlikely(entry < 0))
3088 			goto dma_map_err;
3089 	}
3090 
3091 	for (i = 0; i < nfrags; i++) {
3092 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3093 		int len = skb_frag_size(frag);
3094 		bool last_segment = (i == (nfrags - 1));
3095 
3096 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3097 		WARN_ON(tx_q->tx_skbuff[entry]);
3098 
3099 		if (likely(priv->extend_desc))
3100 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3101 		else
3102 			desc = tx_q->dma_tx + entry;
3103 
3104 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3105 				       DMA_TO_DEVICE);
3106 		if (dma_mapping_error(priv->device, des))
3107 			goto dma_map_err; /* should reuse desc w/o issues */
3108 
3109 		tx_q->tx_skbuff_dma[entry].buf = des;
3110 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3111 			desc->des0 = cpu_to_le32(des);
3112 		else
3113 			desc->des2 = cpu_to_le32(des);
3114 
3115 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3116 		tx_q->tx_skbuff_dma[entry].len = len;
3117 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3118 
3119 		/* Prepare the descriptor and set the own bit too */
3120 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3121 						priv->mode, 1, last_segment,
3122 						skb->len);
3123 	}
3124 
3125 	/* Only the last descriptor gets to point to the skb. */
3126 	tx_q->tx_skbuff[entry] = skb;
3127 
3128 	/* We've used all descriptors we need for this skb, however,
3129 	 * advance cur_tx so that it references a fresh descriptor.
3130 	 * ndo_start_xmit will fill this descriptor the next time it's
3131 	 * called and stmmac_tx_clean may clean up to this descriptor.
3132 	 */
3133 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3134 	tx_q->cur_tx = entry;
3135 
3136 	if (netif_msg_pktdata(priv)) {
3137 		void *tx_head;
3138 
3139 		netdev_dbg(priv->dev,
3140 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3141 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3142 			   entry, first, nfrags);
3143 
3144 		if (priv->extend_desc)
3145 			tx_head = (void *)tx_q->dma_etx;
3146 		else
3147 			tx_head = (void *)tx_q->dma_tx;
3148 
3149 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3150 
3151 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3152 		print_pkt(skb->data, skb->len);
3153 	}
3154 
3155 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3156 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3157 			  __func__);
3158 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3159 	}
3160 
3161 	dev->stats.tx_bytes += skb->len;
3162 
3163 	/* According to the coalesce parameter the IC bit for the latest
3164 	 * segment is reset and the timer re-started to clean the tx status.
3165 	 * This approach takes care about the fragments: desc is the first
3166 	 * element in case of no SG.
3167 	 */
3168 	priv->tx_count_frames += nfrags + 1;
3169 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3170 		mod_timer(&priv->txtimer,
3171 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3172 	} else {
3173 		priv->tx_count_frames = 0;
3174 		priv->hw->desc->set_tx_ic(desc);
3175 		priv->xstats.tx_set_ic_bit++;
3176 	}
3177 
3178 	skb_tx_timestamp(skb);
3179 
3180 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3181 	 * problems because all the descriptors are actually ready to be
3182 	 * passed to the DMA engine.
3183 	 */
3184 	if (likely(!is_jumbo)) {
3185 		bool last_segment = (nfrags == 0);
3186 
3187 		des = dma_map_single(priv->device, skb->data,
3188 				     nopaged_len, DMA_TO_DEVICE);
3189 		if (dma_mapping_error(priv->device, des))
3190 			goto dma_map_err;
3191 
3192 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3193 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3194 			first->des0 = cpu_to_le32(des);
3195 		else
3196 			first->des2 = cpu_to_le32(des);
3197 
3198 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3199 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3200 
3201 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3202 			     priv->hwts_tx_en)) {
3203 			/* declare that device is doing timestamping */
3204 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3205 			priv->hw->desc->enable_tx_timestamp(first);
3206 		}
3207 
3208 		/* Prepare the first descriptor setting the OWN bit too */
3209 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3210 						csum_insertion, priv->mode, 1,
3211 						last_segment, skb->len);
3212 
3213 		/* The own bit must be the latest setting done when prepare the
3214 		 * descriptor and then barrier is needed to make sure that
3215 		 * all is coherent before granting the DMA engine.
3216 		 */
3217 		dma_wmb();
3218 	}
3219 
3220 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3221 
3222 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3223 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3224 	else
3225 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3226 					       queue);
3227 
3228 	return NETDEV_TX_OK;
3229 
3230 dma_map_err:
3231 	netdev_err(priv->dev, "Tx DMA map failed\n");
3232 	dev_kfree_skb(skb);
3233 	priv->dev->stats.tx_dropped++;
3234 	return NETDEV_TX_OK;
3235 }
3236 
3237 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3238 {
3239 	struct ethhdr *ehdr;
3240 	u16 vlanid;
3241 
3242 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3243 	    NETIF_F_HW_VLAN_CTAG_RX &&
3244 	    !__vlan_get_tag(skb, &vlanid)) {
3245 		/* pop the vlan tag */
3246 		ehdr = (struct ethhdr *)skb->data;
3247 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3248 		skb_pull(skb, VLAN_HLEN);
3249 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3250 	}
3251 }
3252 
3253 
3254 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3255 {
3256 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3257 		return 0;
3258 
3259 	return 1;
3260 }
3261 
3262 /**
3263  * stmmac_rx_refill - refill used skb preallocated buffers
3264  * @priv: driver private structure
3265  * @queue: RX queue index
3266  * Description : this is to reallocate the skb for the reception process
3267  * that is based on zero-copy.
3268  */
3269 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3270 {
3271 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3272 	int dirty = stmmac_rx_dirty(priv, queue);
3273 	unsigned int entry = rx_q->dirty_rx;
3274 
3275 	int bfsize = priv->dma_buf_sz;
3276 
3277 	while (dirty-- > 0) {
3278 		struct dma_desc *p;
3279 
3280 		if (priv->extend_desc)
3281 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3282 		else
3283 			p = rx_q->dma_rx + entry;
3284 
3285 		if (likely(!rx_q->rx_skbuff[entry])) {
3286 			struct sk_buff *skb;
3287 
3288 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3289 			if (unlikely(!skb)) {
3290 				/* so for a while no zero-copy! */
3291 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3292 				if (unlikely(net_ratelimit()))
3293 					dev_err(priv->device,
3294 						"fail to alloc skb entry %d\n",
3295 						entry);
3296 				break;
3297 			}
3298 
3299 			rx_q->rx_skbuff[entry] = skb;
3300 			rx_q->rx_skbuff_dma[entry] =
3301 			    dma_map_single(priv->device, skb->data, bfsize,
3302 					   DMA_FROM_DEVICE);
3303 			if (dma_mapping_error(priv->device,
3304 					      rx_q->rx_skbuff_dma[entry])) {
3305 				netdev_err(priv->dev, "Rx DMA map failed\n");
3306 				dev_kfree_skb(skb);
3307 				break;
3308 			}
3309 
3310 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3311 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3312 				p->des1 = 0;
3313 			} else {
3314 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3315 			}
3316 			if (priv->hw->mode->refill_desc3)
3317 				priv->hw->mode->refill_desc3(rx_q, p);
3318 
3319 			if (rx_q->rx_zeroc_thresh > 0)
3320 				rx_q->rx_zeroc_thresh--;
3321 
3322 			netif_dbg(priv, rx_status, priv->dev,
3323 				  "refill entry #%d\n", entry);
3324 		}
3325 		dma_wmb();
3326 
3327 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3328 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3329 		else
3330 			priv->hw->desc->set_rx_owner(p);
3331 
3332 		dma_wmb();
3333 
3334 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3335 	}
3336 	rx_q->dirty_rx = entry;
3337 }
3338 
3339 /**
3340  * stmmac_rx - manage the receive process
3341  * @priv: driver private structure
3342  * @limit: napi bugget
3343  * @queue: RX queue index.
3344  * Description :  this the function called by the napi poll method.
3345  * It gets all the frames inside the ring.
3346  */
3347 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3348 {
3349 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3350 	unsigned int entry = rx_q->cur_rx;
3351 	int coe = priv->hw->rx_csum;
3352 	unsigned int next_entry;
3353 	unsigned int count = 0;
3354 
3355 	if (netif_msg_rx_status(priv)) {
3356 		void *rx_head;
3357 
3358 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3359 		if (priv->extend_desc)
3360 			rx_head = (void *)rx_q->dma_erx;
3361 		else
3362 			rx_head = (void *)rx_q->dma_rx;
3363 
3364 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3365 	}
3366 	while (count < limit) {
3367 		int status;
3368 		struct dma_desc *p;
3369 		struct dma_desc *np;
3370 
3371 		if (priv->extend_desc)
3372 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3373 		else
3374 			p = rx_q->dma_rx + entry;
3375 
3376 		/* read the status of the incoming frame */
3377 		status = priv->hw->desc->rx_status(&priv->dev->stats,
3378 						   &priv->xstats, p);
3379 		/* check if managed by the DMA otherwise go ahead */
3380 		if (unlikely(status & dma_own))
3381 			break;
3382 
3383 		count++;
3384 
3385 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3386 		next_entry = rx_q->cur_rx;
3387 
3388 		if (priv->extend_desc)
3389 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3390 		else
3391 			np = rx_q->dma_rx + next_entry;
3392 
3393 		prefetch(np);
3394 
3395 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3396 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
3397 							   &priv->xstats,
3398 							   rx_q->dma_erx +
3399 							   entry);
3400 		if (unlikely(status == discard_frame)) {
3401 			priv->dev->stats.rx_errors++;
3402 			if (priv->hwts_rx_en && !priv->extend_desc) {
3403 				/* DESC2 & DESC3 will be overwritten by device
3404 				 * with timestamp value, hence reinitialize
3405 				 * them in stmmac_rx_refill() function so that
3406 				 * device can reuse it.
3407 				 */
3408 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3409 				rx_q->rx_skbuff[entry] = NULL;
3410 				dma_unmap_single(priv->device,
3411 						 rx_q->rx_skbuff_dma[entry],
3412 						 priv->dma_buf_sz,
3413 						 DMA_FROM_DEVICE);
3414 			}
3415 		} else {
3416 			struct sk_buff *skb;
3417 			int frame_len;
3418 			unsigned int des;
3419 
3420 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3421 				des = le32_to_cpu(p->des0);
3422 			else
3423 				des = le32_to_cpu(p->des2);
3424 
3425 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3426 
3427 			/*  If frame length is greater than skb buffer size
3428 			 *  (preallocated during init) then the packet is
3429 			 *  ignored
3430 			 */
3431 			if (frame_len > priv->dma_buf_sz) {
3432 				netdev_err(priv->dev,
3433 					   "len %d larger than size (%d)\n",
3434 					   frame_len, priv->dma_buf_sz);
3435 				priv->dev->stats.rx_length_errors++;
3436 				break;
3437 			}
3438 
3439 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3440 			 * Type frames (LLC/LLC-SNAP)
3441 			 */
3442 			if (unlikely(status != llc_snap))
3443 				frame_len -= ETH_FCS_LEN;
3444 
3445 			if (netif_msg_rx_status(priv)) {
3446 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3447 					   p, entry, des);
3448 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3449 					   frame_len, status);
3450 			}
3451 
3452 			/* The zero-copy is always used for all the sizes
3453 			 * in case of GMAC4 because it needs
3454 			 * to refill the used descriptors, always.
3455 			 */
3456 			if (unlikely(!priv->plat->has_gmac4 &&
3457 				     ((frame_len < priv->rx_copybreak) ||
3458 				     stmmac_rx_threshold_count(rx_q)))) {
3459 				skb = netdev_alloc_skb_ip_align(priv->dev,
3460 								frame_len);
3461 				if (unlikely(!skb)) {
3462 					if (net_ratelimit())
3463 						dev_warn(priv->device,
3464 							 "packet dropped\n");
3465 					priv->dev->stats.rx_dropped++;
3466 					break;
3467 				}
3468 
3469 				dma_sync_single_for_cpu(priv->device,
3470 							rx_q->rx_skbuff_dma
3471 							[entry], frame_len,
3472 							DMA_FROM_DEVICE);
3473 				skb_copy_to_linear_data(skb,
3474 							rx_q->
3475 							rx_skbuff[entry]->data,
3476 							frame_len);
3477 
3478 				skb_put(skb, frame_len);
3479 				dma_sync_single_for_device(priv->device,
3480 							   rx_q->rx_skbuff_dma
3481 							   [entry], frame_len,
3482 							   DMA_FROM_DEVICE);
3483 			} else {
3484 				skb = rx_q->rx_skbuff[entry];
3485 				if (unlikely(!skb)) {
3486 					netdev_err(priv->dev,
3487 						   "%s: Inconsistent Rx chain\n",
3488 						   priv->dev->name);
3489 					priv->dev->stats.rx_dropped++;
3490 					break;
3491 				}
3492 				prefetch(skb->data - NET_IP_ALIGN);
3493 				rx_q->rx_skbuff[entry] = NULL;
3494 				rx_q->rx_zeroc_thresh++;
3495 
3496 				skb_put(skb, frame_len);
3497 				dma_unmap_single(priv->device,
3498 						 rx_q->rx_skbuff_dma[entry],
3499 						 priv->dma_buf_sz,
3500 						 DMA_FROM_DEVICE);
3501 			}
3502 
3503 			if (netif_msg_pktdata(priv)) {
3504 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3505 					   frame_len);
3506 				print_pkt(skb->data, frame_len);
3507 			}
3508 
3509 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3510 
3511 			stmmac_rx_vlan(priv->dev, skb);
3512 
3513 			skb->protocol = eth_type_trans(skb, priv->dev);
3514 
3515 			if (unlikely(!coe))
3516 				skb_checksum_none_assert(skb);
3517 			else
3518 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3519 
3520 			napi_gro_receive(&rx_q->napi, skb);
3521 
3522 			priv->dev->stats.rx_packets++;
3523 			priv->dev->stats.rx_bytes += frame_len;
3524 		}
3525 		entry = next_entry;
3526 	}
3527 
3528 	stmmac_rx_refill(priv, queue);
3529 
3530 	priv->xstats.rx_pkt_n += count;
3531 
3532 	return count;
3533 }
3534 
3535 /**
3536  *  stmmac_poll - stmmac poll method (NAPI)
3537  *  @napi : pointer to the napi structure.
3538  *  @budget : maximum number of packets that the current CPU can receive from
3539  *	      all interfaces.
3540  *  Description :
3541  *  To look at the incoming frames and clear the tx resources.
3542  */
3543 static int stmmac_poll(struct napi_struct *napi, int budget)
3544 {
3545 	struct stmmac_rx_queue *rx_q =
3546 		container_of(napi, struct stmmac_rx_queue, napi);
3547 	struct stmmac_priv *priv = rx_q->priv_data;
3548 	u32 tx_count = priv->plat->tx_queues_to_use;
3549 	u32 chan = rx_q->queue_index;
3550 	int work_done = 0;
3551 	u32 queue;
3552 
3553 	priv->xstats.napi_poll++;
3554 
3555 	/* check all the queues */
3556 	for (queue = 0; queue < tx_count; queue++)
3557 		stmmac_tx_clean(priv, queue);
3558 
3559 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3560 	if (work_done < budget) {
3561 		napi_complete_done(napi, work_done);
3562 		stmmac_enable_dma_irq(priv, chan);
3563 	}
3564 	return work_done;
3565 }
3566 
3567 /**
3568  *  stmmac_tx_timeout
3569  *  @dev : Pointer to net device structure
3570  *  Description: this function is called when a packet transmission fails to
3571  *   complete within a reasonable time. The driver will mark the error in the
3572  *   netdev structure and arrange for the device to be reset to a sane state
3573  *   in order to transmit a new packet.
3574  */
3575 static void stmmac_tx_timeout(struct net_device *dev)
3576 {
3577 	struct stmmac_priv *priv = netdev_priv(dev);
3578 	u32 tx_count = priv->plat->tx_queues_to_use;
3579 	u32 chan;
3580 
3581 	/* Clear Tx resources and restart transmitting again */
3582 	for (chan = 0; chan < tx_count; chan++)
3583 		stmmac_tx_err(priv, chan);
3584 }
3585 
3586 /**
3587  *  stmmac_set_rx_mode - entry point for multicast addressing
3588  *  @dev : pointer to the device structure
3589  *  Description:
3590  *  This function is a driver entry point which gets called by the kernel
3591  *  whenever multicast addresses must be enabled/disabled.
3592  *  Return value:
3593  *  void.
3594  */
3595 static void stmmac_set_rx_mode(struct net_device *dev)
3596 {
3597 	struct stmmac_priv *priv = netdev_priv(dev);
3598 
3599 	priv->hw->mac->set_filter(priv->hw, dev);
3600 }
3601 
3602 /**
3603  *  stmmac_change_mtu - entry point to change MTU size for the device.
3604  *  @dev : device pointer.
3605  *  @new_mtu : the new MTU size for the device.
3606  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3607  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3608  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3609  *  Return value:
3610  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3611  *  file on failure.
3612  */
3613 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3614 {
3615 	struct stmmac_priv *priv = netdev_priv(dev);
3616 
3617 	if (netif_running(dev)) {
3618 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3619 		return -EBUSY;
3620 	}
3621 
3622 	dev->mtu = new_mtu;
3623 
3624 	netdev_update_features(dev);
3625 
3626 	return 0;
3627 }
3628 
3629 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3630 					     netdev_features_t features)
3631 {
3632 	struct stmmac_priv *priv = netdev_priv(dev);
3633 
3634 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3635 		features &= ~NETIF_F_RXCSUM;
3636 
3637 	if (!priv->plat->tx_coe)
3638 		features &= ~NETIF_F_CSUM_MASK;
3639 
3640 	/* Some GMAC devices have a bugged Jumbo frame support that
3641 	 * needs to have the Tx COE disabled for oversized frames
3642 	 * (due to limited buffer sizes). In this case we disable
3643 	 * the TX csum insertion in the TDES and not use SF.
3644 	 */
3645 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3646 		features &= ~NETIF_F_CSUM_MASK;
3647 
3648 	/* Disable tso if asked by ethtool */
3649 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3650 		if (features & NETIF_F_TSO)
3651 			priv->tso = true;
3652 		else
3653 			priv->tso = false;
3654 	}
3655 
3656 	return features;
3657 }
3658 
3659 static int stmmac_set_features(struct net_device *netdev,
3660 			       netdev_features_t features)
3661 {
3662 	struct stmmac_priv *priv = netdev_priv(netdev);
3663 
3664 	/* Keep the COE Type in case of csum is supporting */
3665 	if (features & NETIF_F_RXCSUM)
3666 		priv->hw->rx_csum = priv->plat->rx_coe;
3667 	else
3668 		priv->hw->rx_csum = 0;
3669 	/* No check needed because rx_coe has been set before and it will be
3670 	 * fixed in case of issue.
3671 	 */
3672 	priv->hw->mac->rx_ipc(priv->hw);
3673 
3674 	return 0;
3675 }
3676 
3677 /**
3678  *  stmmac_interrupt - main ISR
3679  *  @irq: interrupt number.
3680  *  @dev_id: to pass the net device pointer.
3681  *  Description: this is the main driver interrupt service routine.
3682  *  It can call:
3683  *  o DMA service routine (to manage incoming frame reception and transmission
3684  *    status)
3685  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3686  *    interrupts.
3687  */
3688 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3689 {
3690 	struct net_device *dev = (struct net_device *)dev_id;
3691 	struct stmmac_priv *priv = netdev_priv(dev);
3692 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3693 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3694 	u32 queues_count;
3695 	u32 queue;
3696 
3697 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3698 
3699 	if (priv->irq_wake)
3700 		pm_wakeup_event(priv->device, 0);
3701 
3702 	if (unlikely(!dev)) {
3703 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3704 		return IRQ_NONE;
3705 	}
3706 
3707 	/* To handle GMAC own interrupts */
3708 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3709 		int status = priv->hw->mac->host_irq_status(priv->hw,
3710 							    &priv->xstats);
3711 
3712 		if (unlikely(status)) {
3713 			/* For LPI we need to save the tx status */
3714 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3715 				priv->tx_path_in_lpi_mode = true;
3716 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3717 				priv->tx_path_in_lpi_mode = false;
3718 		}
3719 
3720 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3721 			for (queue = 0; queue < queues_count; queue++) {
3722 				struct stmmac_rx_queue *rx_q =
3723 				&priv->rx_queue[queue];
3724 
3725 				status |=
3726 				priv->hw->mac->host_mtl_irq_status(priv->hw,
3727 								   queue);
3728 
3729 				if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3730 				    priv->hw->dma->set_rx_tail_ptr)
3731 					priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3732 								rx_q->rx_tail_addr,
3733 								queue);
3734 			}
3735 		}
3736 
3737 		/* PCS link status */
3738 		if (priv->hw->pcs) {
3739 			if (priv->xstats.pcs_link)
3740 				netif_carrier_on(dev);
3741 			else
3742 				netif_carrier_off(dev);
3743 		}
3744 	}
3745 
3746 	/* To handle DMA interrupts */
3747 	stmmac_dma_interrupt(priv);
3748 
3749 	return IRQ_HANDLED;
3750 }
3751 
3752 #ifdef CONFIG_NET_POLL_CONTROLLER
3753 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3754  * to allow network I/O with interrupts disabled.
3755  */
3756 static void stmmac_poll_controller(struct net_device *dev)
3757 {
3758 	disable_irq(dev->irq);
3759 	stmmac_interrupt(dev->irq, dev);
3760 	enable_irq(dev->irq);
3761 }
3762 #endif
3763 
3764 /**
3765  *  stmmac_ioctl - Entry point for the Ioctl
3766  *  @dev: Device pointer.
3767  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3768  *  a proprietary structure used to pass information to the driver.
3769  *  @cmd: IOCTL command
3770  *  Description:
3771  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3772  */
3773 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3774 {
3775 	int ret = -EOPNOTSUPP;
3776 
3777 	if (!netif_running(dev))
3778 		return -EINVAL;
3779 
3780 	switch (cmd) {
3781 	case SIOCGMIIPHY:
3782 	case SIOCGMIIREG:
3783 	case SIOCSMIIREG:
3784 		if (!dev->phydev)
3785 			return -EINVAL;
3786 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3787 		break;
3788 	case SIOCSHWTSTAMP:
3789 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3790 		break;
3791 	default:
3792 		break;
3793 	}
3794 
3795 	return ret;
3796 }
3797 
3798 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3799 {
3800 	struct stmmac_priv *priv = netdev_priv(ndev);
3801 	int ret = 0;
3802 
3803 	ret = eth_mac_addr(ndev, addr);
3804 	if (ret)
3805 		return ret;
3806 
3807 	priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3808 
3809 	return ret;
3810 }
3811 
3812 #ifdef CONFIG_DEBUG_FS
3813 static struct dentry *stmmac_fs_dir;
3814 
3815 static void sysfs_display_ring(void *head, int size, int extend_desc,
3816 			       struct seq_file *seq)
3817 {
3818 	int i;
3819 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3820 	struct dma_desc *p = (struct dma_desc *)head;
3821 
3822 	for (i = 0; i < size; i++) {
3823 		if (extend_desc) {
3824 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3825 				   i, (unsigned int)virt_to_phys(ep),
3826 				   le32_to_cpu(ep->basic.des0),
3827 				   le32_to_cpu(ep->basic.des1),
3828 				   le32_to_cpu(ep->basic.des2),
3829 				   le32_to_cpu(ep->basic.des3));
3830 			ep++;
3831 		} else {
3832 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3833 				   i, (unsigned int)virt_to_phys(p),
3834 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3835 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3836 			p++;
3837 		}
3838 		seq_printf(seq, "\n");
3839 	}
3840 }
3841 
3842 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3843 {
3844 	struct net_device *dev = seq->private;
3845 	struct stmmac_priv *priv = netdev_priv(dev);
3846 	u32 rx_count = priv->plat->rx_queues_to_use;
3847 	u32 tx_count = priv->plat->tx_queues_to_use;
3848 	u32 queue;
3849 
3850 	for (queue = 0; queue < rx_count; queue++) {
3851 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3852 
3853 		seq_printf(seq, "RX Queue %d:\n", queue);
3854 
3855 		if (priv->extend_desc) {
3856 			seq_printf(seq, "Extended descriptor ring:\n");
3857 			sysfs_display_ring((void *)rx_q->dma_erx,
3858 					   DMA_RX_SIZE, 1, seq);
3859 		} else {
3860 			seq_printf(seq, "Descriptor ring:\n");
3861 			sysfs_display_ring((void *)rx_q->dma_rx,
3862 					   DMA_RX_SIZE, 0, seq);
3863 		}
3864 	}
3865 
3866 	for (queue = 0; queue < tx_count; queue++) {
3867 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3868 
3869 		seq_printf(seq, "TX Queue %d:\n", queue);
3870 
3871 		if (priv->extend_desc) {
3872 			seq_printf(seq, "Extended descriptor ring:\n");
3873 			sysfs_display_ring((void *)tx_q->dma_etx,
3874 					   DMA_TX_SIZE, 1, seq);
3875 		} else {
3876 			seq_printf(seq, "Descriptor ring:\n");
3877 			sysfs_display_ring((void *)tx_q->dma_tx,
3878 					   DMA_TX_SIZE, 0, seq);
3879 		}
3880 	}
3881 
3882 	return 0;
3883 }
3884 
3885 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3886 {
3887 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3888 }
3889 
3890 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3891 
3892 static const struct file_operations stmmac_rings_status_fops = {
3893 	.owner = THIS_MODULE,
3894 	.open = stmmac_sysfs_ring_open,
3895 	.read = seq_read,
3896 	.llseek = seq_lseek,
3897 	.release = single_release,
3898 };
3899 
3900 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3901 {
3902 	struct net_device *dev = seq->private;
3903 	struct stmmac_priv *priv = netdev_priv(dev);
3904 
3905 	if (!priv->hw_cap_support) {
3906 		seq_printf(seq, "DMA HW features not supported\n");
3907 		return 0;
3908 	}
3909 
3910 	seq_printf(seq, "==============================\n");
3911 	seq_printf(seq, "\tDMA HW features\n");
3912 	seq_printf(seq, "==============================\n");
3913 
3914 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3915 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3916 	seq_printf(seq, "\t1000 Mbps: %s\n",
3917 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3918 	seq_printf(seq, "\tHalf duplex: %s\n",
3919 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3920 	seq_printf(seq, "\tHash Filter: %s\n",
3921 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3922 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3923 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3924 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3925 		   (priv->dma_cap.pcs) ? "Y" : "N");
3926 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3927 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3928 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3929 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3930 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3931 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3932 	seq_printf(seq, "\tRMON module: %s\n",
3933 		   (priv->dma_cap.rmon) ? "Y" : "N");
3934 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3935 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3936 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3937 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3938 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3939 		   (priv->dma_cap.eee) ? "Y" : "N");
3940 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3941 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3942 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3943 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3944 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3945 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3946 	} else {
3947 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3948 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3949 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3950 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3951 	}
3952 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3953 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3954 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3955 		   priv->dma_cap.number_rx_channel);
3956 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3957 		   priv->dma_cap.number_tx_channel);
3958 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3959 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3960 
3961 	return 0;
3962 }
3963 
3964 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3965 {
3966 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3967 }
3968 
3969 static const struct file_operations stmmac_dma_cap_fops = {
3970 	.owner = THIS_MODULE,
3971 	.open = stmmac_sysfs_dma_cap_open,
3972 	.read = seq_read,
3973 	.llseek = seq_lseek,
3974 	.release = single_release,
3975 };
3976 
3977 static int stmmac_init_fs(struct net_device *dev)
3978 {
3979 	struct stmmac_priv *priv = netdev_priv(dev);
3980 
3981 	/* Create per netdev entries */
3982 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3983 
3984 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3985 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3986 
3987 		return -ENOMEM;
3988 	}
3989 
3990 	/* Entry to report DMA RX/TX rings */
3991 	priv->dbgfs_rings_status =
3992 		debugfs_create_file("descriptors_status", S_IRUGO,
3993 				    priv->dbgfs_dir, dev,
3994 				    &stmmac_rings_status_fops);
3995 
3996 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3997 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3998 		debugfs_remove_recursive(priv->dbgfs_dir);
3999 
4000 		return -ENOMEM;
4001 	}
4002 
4003 	/* Entry to report the DMA HW features */
4004 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
4005 					    priv->dbgfs_dir,
4006 					    dev, &stmmac_dma_cap_fops);
4007 
4008 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4009 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4010 		debugfs_remove_recursive(priv->dbgfs_dir);
4011 
4012 		return -ENOMEM;
4013 	}
4014 
4015 	return 0;
4016 }
4017 
4018 static void stmmac_exit_fs(struct net_device *dev)
4019 {
4020 	struct stmmac_priv *priv = netdev_priv(dev);
4021 
4022 	debugfs_remove_recursive(priv->dbgfs_dir);
4023 }
4024 #endif /* CONFIG_DEBUG_FS */
4025 
4026 static const struct net_device_ops stmmac_netdev_ops = {
4027 	.ndo_open = stmmac_open,
4028 	.ndo_start_xmit = stmmac_xmit,
4029 	.ndo_stop = stmmac_release,
4030 	.ndo_change_mtu = stmmac_change_mtu,
4031 	.ndo_fix_features = stmmac_fix_features,
4032 	.ndo_set_features = stmmac_set_features,
4033 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4034 	.ndo_tx_timeout = stmmac_tx_timeout,
4035 	.ndo_do_ioctl = stmmac_ioctl,
4036 #ifdef CONFIG_NET_POLL_CONTROLLER
4037 	.ndo_poll_controller = stmmac_poll_controller,
4038 #endif
4039 	.ndo_set_mac_address = stmmac_set_mac_address,
4040 };
4041 
4042 /**
4043  *  stmmac_hw_init - Init the MAC device
4044  *  @priv: driver private structure
4045  *  Description: this function is to configure the MAC device according to
4046  *  some platform parameters or the HW capability register. It prepares the
4047  *  driver to use either ring or chain modes and to setup either enhanced or
4048  *  normal descriptors.
4049  */
4050 static int stmmac_hw_init(struct stmmac_priv *priv)
4051 {
4052 	struct mac_device_info *mac;
4053 
4054 	/* Identify the MAC HW device */
4055 	if (priv->plat->setup) {
4056 		mac = priv->plat->setup(priv);
4057 	} else if (priv->plat->has_gmac) {
4058 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
4059 		mac = dwmac1000_setup(priv->ioaddr,
4060 				      priv->plat->multicast_filter_bins,
4061 				      priv->plat->unicast_filter_entries,
4062 				      &priv->synopsys_id);
4063 	} else if (priv->plat->has_gmac4) {
4064 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
4065 		mac = dwmac4_setup(priv->ioaddr,
4066 				   priv->plat->multicast_filter_bins,
4067 				   priv->plat->unicast_filter_entries,
4068 				   &priv->synopsys_id);
4069 	} else {
4070 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4071 	}
4072 	if (!mac)
4073 		return -ENOMEM;
4074 
4075 	priv->hw = mac;
4076 
4077 	/* dwmac-sun8i only work in chain mode */
4078 	if (priv->plat->has_sun8i)
4079 		chain_mode = 1;
4080 
4081 	/* To use the chained or ring mode */
4082 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4083 		priv->hw->mode = &dwmac4_ring_mode_ops;
4084 	} else {
4085 		if (chain_mode) {
4086 			priv->hw->mode = &chain_mode_ops;
4087 			dev_info(priv->device, "Chain mode enabled\n");
4088 			priv->mode = STMMAC_CHAIN_MODE;
4089 		} else {
4090 			priv->hw->mode = &ring_mode_ops;
4091 			dev_info(priv->device, "Ring mode enabled\n");
4092 			priv->mode = STMMAC_RING_MODE;
4093 		}
4094 	}
4095 
4096 	/* Get the HW capability (new GMAC newer than 3.50a) */
4097 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4098 	if (priv->hw_cap_support) {
4099 		dev_info(priv->device, "DMA HW capability register supported\n");
4100 
4101 		/* We can override some gmac/dma configuration fields: e.g.
4102 		 * enh_desc, tx_coe (e.g. that are passed through the
4103 		 * platform) with the values from the HW capability
4104 		 * register (if supported).
4105 		 */
4106 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4107 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4108 		priv->hw->pmt = priv->plat->pmt;
4109 
4110 		/* TXCOE doesn't work in thresh DMA mode */
4111 		if (priv->plat->force_thresh_dma_mode)
4112 			priv->plat->tx_coe = 0;
4113 		else
4114 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4115 
4116 		/* In case of GMAC4 rx_coe is from HW cap register. */
4117 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4118 
4119 		if (priv->dma_cap.rx_coe_type2)
4120 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4121 		else if (priv->dma_cap.rx_coe_type1)
4122 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4123 
4124 	} else {
4125 		dev_info(priv->device, "No HW DMA feature register supported\n");
4126 	}
4127 
4128 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
4129 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
4130 		priv->hw->desc = &dwmac4_desc_ops;
4131 	else
4132 		stmmac_selec_desc_mode(priv);
4133 
4134 	if (priv->plat->rx_coe) {
4135 		priv->hw->rx_csum = priv->plat->rx_coe;
4136 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4137 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4138 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4139 	}
4140 	if (priv->plat->tx_coe)
4141 		dev_info(priv->device, "TX Checksum insertion supported\n");
4142 
4143 	if (priv->plat->pmt) {
4144 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4145 		device_set_wakeup_capable(priv->device, 1);
4146 	}
4147 
4148 	if (priv->dma_cap.tsoen)
4149 		dev_info(priv->device, "TSO supported\n");
4150 
4151 	return 0;
4152 }
4153 
4154 /**
4155  * stmmac_dvr_probe
4156  * @device: device pointer
4157  * @plat_dat: platform data pointer
4158  * @res: stmmac resource pointer
4159  * Description: this is the main probe function used to
4160  * call the alloc_etherdev, allocate the priv structure.
4161  * Return:
4162  * returns 0 on success, otherwise errno.
4163  */
4164 int stmmac_dvr_probe(struct device *device,
4165 		     struct plat_stmmacenet_data *plat_dat,
4166 		     struct stmmac_resources *res)
4167 {
4168 	struct net_device *ndev = NULL;
4169 	struct stmmac_priv *priv;
4170 	int ret = 0;
4171 	u32 queue;
4172 
4173 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4174 				  MTL_MAX_TX_QUEUES,
4175 				  MTL_MAX_RX_QUEUES);
4176 	if (!ndev)
4177 		return -ENOMEM;
4178 
4179 	SET_NETDEV_DEV(ndev, device);
4180 
4181 	priv = netdev_priv(ndev);
4182 	priv->device = device;
4183 	priv->dev = ndev;
4184 
4185 	stmmac_set_ethtool_ops(ndev);
4186 	priv->pause = pause;
4187 	priv->plat = plat_dat;
4188 	priv->ioaddr = res->addr;
4189 	priv->dev->base_addr = (unsigned long)res->addr;
4190 
4191 	priv->dev->irq = res->irq;
4192 	priv->wol_irq = res->wol_irq;
4193 	priv->lpi_irq = res->lpi_irq;
4194 
4195 	if (res->mac)
4196 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4197 
4198 	dev_set_drvdata(device, priv->dev);
4199 
4200 	/* Verify driver arguments */
4201 	stmmac_verify_args();
4202 
4203 	/* Override with kernel parameters if supplied XXX CRS XXX
4204 	 * this needs to have multiple instances
4205 	 */
4206 	if ((phyaddr >= 0) && (phyaddr <= 31))
4207 		priv->plat->phy_addr = phyaddr;
4208 
4209 	if (priv->plat->stmmac_rst) {
4210 		ret = reset_control_assert(priv->plat->stmmac_rst);
4211 		reset_control_deassert(priv->plat->stmmac_rst);
4212 		/* Some reset controllers have only reset callback instead of
4213 		 * assert + deassert callbacks pair.
4214 		 */
4215 		if (ret == -ENOTSUPP)
4216 			reset_control_reset(priv->plat->stmmac_rst);
4217 	}
4218 
4219 	/* Init MAC and get the capabilities */
4220 	ret = stmmac_hw_init(priv);
4221 	if (ret)
4222 		goto error_hw_init;
4223 
4224 	/* Configure real RX and TX queues */
4225 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4226 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4227 
4228 	ndev->netdev_ops = &stmmac_netdev_ops;
4229 
4230 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4231 			    NETIF_F_RXCSUM;
4232 
4233 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4234 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4235 		priv->tso = true;
4236 		dev_info(priv->device, "TSO feature enabled\n");
4237 	}
4238 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4239 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4240 #ifdef STMMAC_VLAN_TAG_USED
4241 	/* Both mac100 and gmac support receive VLAN tag detection */
4242 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4243 #endif
4244 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4245 
4246 	/* MTU range: 46 - hw-specific max */
4247 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4248 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4249 		ndev->max_mtu = JUMBO_LEN;
4250 	else
4251 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4252 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4253 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4254 	 */
4255 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4256 	    (priv->plat->maxmtu >= ndev->min_mtu))
4257 		ndev->max_mtu = priv->plat->maxmtu;
4258 	else if (priv->plat->maxmtu < ndev->min_mtu)
4259 		dev_warn(priv->device,
4260 			 "%s: warning: maxmtu having invalid value (%d)\n",
4261 			 __func__, priv->plat->maxmtu);
4262 
4263 	if (flow_ctrl)
4264 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4265 
4266 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4267 	 * In some case, for example on bugged HW this feature
4268 	 * has to be disable and this can be done by passing the
4269 	 * riwt_off field from the platform.
4270 	 */
4271 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4272 		priv->use_riwt = 1;
4273 		dev_info(priv->device,
4274 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4275 	}
4276 
4277 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4278 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4279 
4280 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4281 			       (8 * priv->plat->rx_queues_to_use));
4282 	}
4283 
4284 	spin_lock_init(&priv->lock);
4285 
4286 	/* If a specific clk_csr value is passed from the platform
4287 	 * this means that the CSR Clock Range selection cannot be
4288 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4289 	 * set the MDC clock dynamically according to the csr actual
4290 	 * clock input.
4291 	 */
4292 	if (!priv->plat->clk_csr)
4293 		stmmac_clk_csr_set(priv);
4294 	else
4295 		priv->clk_csr = priv->plat->clk_csr;
4296 
4297 	stmmac_check_pcs_mode(priv);
4298 
4299 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4300 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4301 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4302 		/* MDIO bus Registration */
4303 		ret = stmmac_mdio_register(ndev);
4304 		if (ret < 0) {
4305 			dev_err(priv->device,
4306 				"%s: MDIO bus (id: %d) registration failed",
4307 				__func__, priv->plat->bus_id);
4308 			goto error_mdio_register;
4309 		}
4310 	}
4311 
4312 	ret = register_netdev(ndev);
4313 	if (ret) {
4314 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4315 			__func__, ret);
4316 		goto error_netdev_register;
4317 	}
4318 
4319 	return ret;
4320 
4321 error_netdev_register:
4322 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4323 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4324 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4325 		stmmac_mdio_unregister(ndev);
4326 error_mdio_register:
4327 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4328 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4329 
4330 		netif_napi_del(&rx_q->napi);
4331 	}
4332 error_hw_init:
4333 	free_netdev(ndev);
4334 
4335 	return ret;
4336 }
4337 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4338 
4339 /**
4340  * stmmac_dvr_remove
4341  * @dev: device pointer
4342  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4343  * changes the link status, releases the DMA descriptor rings.
4344  */
4345 int stmmac_dvr_remove(struct device *dev)
4346 {
4347 	struct net_device *ndev = dev_get_drvdata(dev);
4348 	struct stmmac_priv *priv = netdev_priv(ndev);
4349 
4350 	netdev_info(priv->dev, "%s: removing driver", __func__);
4351 
4352 	stmmac_stop_all_dma(priv);
4353 
4354 	priv->hw->mac->set_mac(priv->ioaddr, false);
4355 	netif_carrier_off(ndev);
4356 	unregister_netdev(ndev);
4357 	if (priv->plat->stmmac_rst)
4358 		reset_control_assert(priv->plat->stmmac_rst);
4359 	clk_disable_unprepare(priv->plat->pclk);
4360 	clk_disable_unprepare(priv->plat->stmmac_clk);
4361 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4362 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4363 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4364 		stmmac_mdio_unregister(ndev);
4365 	free_netdev(ndev);
4366 
4367 	return 0;
4368 }
4369 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4370 
4371 /**
4372  * stmmac_suspend - suspend callback
4373  * @dev: device pointer
4374  * Description: this is the function to suspend the device and it is called
4375  * by the platform driver to stop the network queue, release the resources,
4376  * program the PMT register (for WoL), clean and release driver resources.
4377  */
4378 int stmmac_suspend(struct device *dev)
4379 {
4380 	struct net_device *ndev = dev_get_drvdata(dev);
4381 	struct stmmac_priv *priv = netdev_priv(ndev);
4382 	unsigned long flags;
4383 
4384 	if (!ndev || !netif_running(ndev))
4385 		return 0;
4386 
4387 	if (ndev->phydev)
4388 		phy_stop(ndev->phydev);
4389 
4390 	spin_lock_irqsave(&priv->lock, flags);
4391 
4392 	netif_device_detach(ndev);
4393 	stmmac_stop_all_queues(priv);
4394 
4395 	stmmac_disable_all_queues(priv);
4396 
4397 	/* Stop TX/RX DMA */
4398 	stmmac_stop_all_dma(priv);
4399 
4400 	/* Enable Power down mode by programming the PMT regs */
4401 	if (device_may_wakeup(priv->device)) {
4402 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
4403 		priv->irq_wake = 1;
4404 	} else {
4405 		priv->hw->mac->set_mac(priv->ioaddr, false);
4406 		pinctrl_pm_select_sleep_state(priv->device);
4407 		/* Disable clock in case of PWM is off */
4408 		clk_disable(priv->plat->pclk);
4409 		clk_disable(priv->plat->stmmac_clk);
4410 	}
4411 	spin_unlock_irqrestore(&priv->lock, flags);
4412 
4413 	priv->oldlink = false;
4414 	priv->speed = SPEED_UNKNOWN;
4415 	priv->oldduplex = DUPLEX_UNKNOWN;
4416 	return 0;
4417 }
4418 EXPORT_SYMBOL_GPL(stmmac_suspend);
4419 
4420 /**
4421  * stmmac_reset_queues_param - reset queue parameters
4422  * @dev: device pointer
4423  */
4424 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4425 {
4426 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4427 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4428 	u32 queue;
4429 
4430 	for (queue = 0; queue < rx_cnt; queue++) {
4431 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4432 
4433 		rx_q->cur_rx = 0;
4434 		rx_q->dirty_rx = 0;
4435 	}
4436 
4437 	for (queue = 0; queue < tx_cnt; queue++) {
4438 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4439 
4440 		tx_q->cur_tx = 0;
4441 		tx_q->dirty_tx = 0;
4442 		tx_q->mss = 0;
4443 	}
4444 }
4445 
4446 /**
4447  * stmmac_resume - resume callback
4448  * @dev: device pointer
4449  * Description: when resume this function is invoked to setup the DMA and CORE
4450  * in a usable state.
4451  */
4452 int stmmac_resume(struct device *dev)
4453 {
4454 	struct net_device *ndev = dev_get_drvdata(dev);
4455 	struct stmmac_priv *priv = netdev_priv(ndev);
4456 	unsigned long flags;
4457 
4458 	if (!netif_running(ndev))
4459 		return 0;
4460 
4461 	/* Power Down bit, into the PM register, is cleared
4462 	 * automatically as soon as a magic packet or a Wake-up frame
4463 	 * is received. Anyway, it's better to manually clear
4464 	 * this bit because it can generate problems while resuming
4465 	 * from another devices (e.g. serial console).
4466 	 */
4467 	if (device_may_wakeup(priv->device)) {
4468 		spin_lock_irqsave(&priv->lock, flags);
4469 		priv->hw->mac->pmt(priv->hw, 0);
4470 		spin_unlock_irqrestore(&priv->lock, flags);
4471 		priv->irq_wake = 0;
4472 	} else {
4473 		pinctrl_pm_select_default_state(priv->device);
4474 		/* enable the clk previously disabled */
4475 		clk_enable(priv->plat->stmmac_clk);
4476 		clk_enable(priv->plat->pclk);
4477 		/* reset the phy so that it's ready */
4478 		if (priv->mii)
4479 			stmmac_mdio_reset(priv->mii);
4480 	}
4481 
4482 	netif_device_attach(ndev);
4483 
4484 	spin_lock_irqsave(&priv->lock, flags);
4485 
4486 	stmmac_reset_queues_param(priv);
4487 
4488 	stmmac_clear_descriptors(priv);
4489 
4490 	stmmac_hw_setup(ndev, false);
4491 	stmmac_init_tx_coalesce(priv);
4492 	stmmac_set_rx_mode(ndev);
4493 
4494 	stmmac_enable_all_queues(priv);
4495 
4496 	stmmac_start_all_queues(priv);
4497 
4498 	spin_unlock_irqrestore(&priv->lock, flags);
4499 
4500 	if (ndev->phydev)
4501 		phy_start(ndev->phydev);
4502 
4503 	return 0;
4504 }
4505 EXPORT_SYMBOL_GPL(stmmac_resume);
4506 
4507 #ifndef MODULE
4508 static int __init stmmac_cmdline_opt(char *str)
4509 {
4510 	char *opt;
4511 
4512 	if (!str || !*str)
4513 		return -EINVAL;
4514 	while ((opt = strsep(&str, ",")) != NULL) {
4515 		if (!strncmp(opt, "debug:", 6)) {
4516 			if (kstrtoint(opt + 6, 0, &debug))
4517 				goto err;
4518 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4519 			if (kstrtoint(opt + 8, 0, &phyaddr))
4520 				goto err;
4521 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4522 			if (kstrtoint(opt + 7, 0, &buf_sz))
4523 				goto err;
4524 		} else if (!strncmp(opt, "tc:", 3)) {
4525 			if (kstrtoint(opt + 3, 0, &tc))
4526 				goto err;
4527 		} else if (!strncmp(opt, "watchdog:", 9)) {
4528 			if (kstrtoint(opt + 9, 0, &watchdog))
4529 				goto err;
4530 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4531 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4532 				goto err;
4533 		} else if (!strncmp(opt, "pause:", 6)) {
4534 			if (kstrtoint(opt + 6, 0, &pause))
4535 				goto err;
4536 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4537 			if (kstrtoint(opt + 10, 0, &eee_timer))
4538 				goto err;
4539 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4540 			if (kstrtoint(opt + 11, 0, &chain_mode))
4541 				goto err;
4542 		}
4543 	}
4544 	return 0;
4545 
4546 err:
4547 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4548 	return -EINVAL;
4549 }
4550 
4551 __setup("stmmaceth=", stmmac_cmdline_opt);
4552 #endif /* MODULE */
4553 
4554 static int __init stmmac_init(void)
4555 {
4556 #ifdef CONFIG_DEBUG_FS
4557 	/* Create debugfs main directory if it doesn't exist yet */
4558 	if (!stmmac_fs_dir) {
4559 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4560 
4561 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4562 			pr_err("ERROR %s, debugfs create directory failed\n",
4563 			       STMMAC_RESOURCE_NAME);
4564 
4565 			return -ENOMEM;
4566 		}
4567 	}
4568 #endif
4569 
4570 	return 0;
4571 }
4572 
4573 static void __exit stmmac_exit(void)
4574 {
4575 #ifdef CONFIG_DEBUG_FS
4576 	debugfs_remove_recursive(stmmac_fs_dir);
4577 #endif
4578 }
4579 
4580 module_init(stmmac_init)
4581 module_exit(stmmac_exit)
4582 
4583 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4584 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4585 MODULE_LICENSE("GPL");
4586