xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision c1aac62f36c1e37ee81c9e09ee9ee733eef05dcb)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 
54 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
55 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 
57 /* Module parameters */
58 #define TX_TIMEO	5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62 
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66 
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70 
71 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77 
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81 
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86 
87 #define	DEFAULT_BUFSIZE	1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91 
92 #define	STMMAC_RX_COPYBREAK	256
93 
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97 
98 #define STMMAC_DEFAULT_LPI_TIMER	1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103 
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110 
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112 
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117 
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119 
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127 	if (unlikely(watchdog < 0))
128 		watchdog = TX_TIMEO;
129 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 		buf_sz = DEFAULT_BUFSIZE;
131 	if (unlikely(flow_ctrl > 1))
132 		flow_ctrl = FLOW_AUTO;
133 	else if (likely(flow_ctrl < 0))
134 		flow_ctrl = FLOW_OFF;
135 	if (unlikely((pause < 0) || (pause > 0xffff)))
136 		pause = PAUSE_TIME;
137 	if (eee_timer < 0)
138 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140 
141 /**
142  * stmmac_clk_csr_set - dynamically set the MDC clock
143  * @priv: driver private structure
144  * Description: this is to dynamically set the MDC clock according to the csr
145  * clock input.
146  * Note:
147  *	If a specific clk_csr value is passed from the platform
148  *	this means that the CSR Clock Range selection cannot be
149  *	changed at run-time and it is fixed (as reported in the driver
150  *	documentation). Viceversa the driver will try to set the MDC
151  *	clock dynamically according to the actual clock input.
152  */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155 	u32 clk_rate;
156 
157 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158 
159 	/* Platform provided default clk_csr would be assumed valid
160 	 * for all other cases except for the below mentioned ones.
161 	 * For values higher than the IEEE 802.3 specified frequency
162 	 * we can not estimate the proper divider as it is not known
163 	 * the frequency of clk_csr_i. So we do not change the default
164 	 * divider.
165 	 */
166 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167 		if (clk_rate < CSR_F_35M)
168 			priv->clk_csr = STMMAC_CSR_20_35M;
169 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170 			priv->clk_csr = STMMAC_CSR_35_60M;
171 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172 			priv->clk_csr = STMMAC_CSR_60_100M;
173 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174 			priv->clk_csr = STMMAC_CSR_100_150M;
175 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176 			priv->clk_csr = STMMAC_CSR_150_250M;
177 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178 			priv->clk_csr = STMMAC_CSR_250_300M;
179 	}
180 }
181 
182 static void print_pkt(unsigned char *buf, int len)
183 {
184 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187 
188 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189 {
190 	u32 avail;
191 
192 	if (priv->dirty_tx > priv->cur_tx)
193 		avail = priv->dirty_tx - priv->cur_tx - 1;
194 	else
195 		avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196 
197 	return avail;
198 }
199 
200 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201 {
202 	u32 dirty;
203 
204 	if (priv->dirty_rx <= priv->cur_rx)
205 		dirty = priv->cur_rx - priv->dirty_rx;
206 	else
207 		dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208 
209 	return dirty;
210 }
211 
212 /**
213  * stmmac_hw_fix_mac_speed - callback for speed selection
214  * @priv: driver private structure
215  * Description: on some platforms (e.g. ST), some HW system configuration
216  * registers have to be set according to the link speed negotiated.
217  */
218 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219 {
220 	struct net_device *ndev = priv->dev;
221 	struct phy_device *phydev = ndev->phydev;
222 
223 	if (likely(priv->plat->fix_mac_speed))
224 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 }
226 
227 /**
228  * stmmac_enable_eee_mode - check and enter in LPI mode
229  * @priv: driver private structure
230  * Description: this function is to verify and enter in LPI mode in case of
231  * EEE.
232  */
233 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234 {
235 	/* Check and enter in LPI mode */
236 	if ((priv->dirty_tx == priv->cur_tx) &&
237 	    (priv->tx_path_in_lpi_mode == false))
238 		priv->hw->mac->set_eee_mode(priv->hw,
239 					    priv->plat->en_tx_lpi_clockgating);
240 }
241 
242 /**
243  * stmmac_disable_eee_mode - disable and exit from LPI mode
244  * @priv: driver private structure
245  * Description: this function is to exit and disable EEE in case of
246  * LPI state is true. This is called by the xmit.
247  */
248 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249 {
250 	priv->hw->mac->reset_eee_mode(priv->hw);
251 	del_timer_sync(&priv->eee_ctrl_timer);
252 	priv->tx_path_in_lpi_mode = false;
253 }
254 
255 /**
256  * stmmac_eee_ctrl_timer - EEE TX SW timer.
257  * @arg : data hook
258  * Description:
259  *  if there is no data transfer and if we are not in LPI state,
260  *  then MAC Transmitter can be moved to LPI state.
261  */
262 static void stmmac_eee_ctrl_timer(unsigned long arg)
263 {
264 	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265 
266 	stmmac_enable_eee_mode(priv);
267 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 }
269 
270 /**
271  * stmmac_eee_init - init EEE
272  * @priv: driver private structure
273  * Description:
274  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
275  *  can also manage EEE, this function enable the LPI state and start related
276  *  timer.
277  */
278 bool stmmac_eee_init(struct stmmac_priv *priv)
279 {
280 	struct net_device *ndev = priv->dev;
281 	unsigned long flags;
282 	bool ret = false;
283 
284 	/* Using PCS we cannot dial with the phy registers at this stage
285 	 * so we do not support extra feature like EEE.
286 	 */
287 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
289 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
290 		goto out;
291 
292 	/* MAC core supports the EEE feature. */
293 	if (priv->dma_cap.eee) {
294 		int tx_lpi_timer = priv->tx_lpi_timer;
295 
296 		/* Check if the PHY supports EEE */
297 		if (phy_init_eee(ndev->phydev, 1)) {
298 			/* To manage at run-time if the EEE cannot be supported
299 			 * anymore (for example because the lp caps have been
300 			 * changed).
301 			 * In that case the driver disable own timers.
302 			 */
303 			spin_lock_irqsave(&priv->lock, flags);
304 			if (priv->eee_active) {
305 				netdev_dbg(priv->dev, "disable EEE\n");
306 				del_timer_sync(&priv->eee_ctrl_timer);
307 				priv->hw->mac->set_eee_timer(priv->hw, 0,
308 							     tx_lpi_timer);
309 			}
310 			priv->eee_active = 0;
311 			spin_unlock_irqrestore(&priv->lock, flags);
312 			goto out;
313 		}
314 		/* Activate the EEE and start timers */
315 		spin_lock_irqsave(&priv->lock, flags);
316 		if (!priv->eee_active) {
317 			priv->eee_active = 1;
318 			setup_timer(&priv->eee_ctrl_timer,
319 				    stmmac_eee_ctrl_timer,
320 				    (unsigned long)priv);
321 			mod_timer(&priv->eee_ctrl_timer,
322 				  STMMAC_LPI_T(eee_timer));
323 
324 			priv->hw->mac->set_eee_timer(priv->hw,
325 						     STMMAC_DEFAULT_LIT_LS,
326 						     tx_lpi_timer);
327 		}
328 		/* Set HW EEE according to the speed */
329 		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330 
331 		ret = true;
332 		spin_unlock_irqrestore(&priv->lock, flags);
333 
334 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335 	}
336 out:
337 	return ret;
338 }
339 
340 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
341  * @priv: driver private structure
342  * @p : descriptor pointer
343  * @skb : the socket buffer
344  * Description :
345  * This function will read timestamp from the descriptor & pass it to stack.
346  * and also perform some sanity checks.
347  */
348 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349 				   struct dma_desc *p, struct sk_buff *skb)
350 {
351 	struct skb_shared_hwtstamps shhwtstamp;
352 	u64 ns;
353 
354 	if (!priv->hwts_tx_en)
355 		return;
356 
357 	/* exit if skb doesn't support hw tstamp */
358 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359 		return;
360 
361 	/* check tx tstamp status */
362 	if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363 		/* get the valid tstamp */
364 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365 
366 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
368 
369 		netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370 		/* pass tstamp to stack */
371 		skb_tstamp_tx(skb, &shhwtstamp);
372 	}
373 
374 	return;
375 }
376 
377 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
378  * @priv: driver private structure
379  * @p : descriptor pointer
380  * @np : next descriptor pointer
381  * @skb : the socket buffer
382  * Description :
383  * This function will read received packet's timestamp from the descriptor
384  * and pass it to stack. It also perform some sanity checks.
385  */
386 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387 				   struct dma_desc *np, struct sk_buff *skb)
388 {
389 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
390 	u64 ns;
391 
392 	if (!priv->hwts_rx_en)
393 		return;
394 
395 	/* Check if timestamp is available */
396 	if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397 		/* For GMAC4, the valid timestamp is from CTX next desc. */
398 		if (priv->plat->has_gmac4)
399 			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400 		else
401 			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402 
403 		netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404 		shhwtstamp = skb_hwtstamps(skb);
405 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
407 	} else  {
408 		netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409 	}
410 }
411 
412 /**
413  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
414  *  @dev: device pointer.
415  *  @ifr: An IOCTL specific structure, that can contain a pointer to
416  *  a proprietary structure used to pass information to the driver.
417  *  Description:
418  *  This function configures the MAC to enable/disable both outgoing(TX)
419  *  and incoming(RX) packets time stamping based on user input.
420  *  Return Value:
421  *  0 on success and an appropriate -ve integer on failure.
422  */
423 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424 {
425 	struct stmmac_priv *priv = netdev_priv(dev);
426 	struct hwtstamp_config config;
427 	struct timespec64 now;
428 	u64 temp = 0;
429 	u32 ptp_v2 = 0;
430 	u32 tstamp_all = 0;
431 	u32 ptp_over_ipv4_udp = 0;
432 	u32 ptp_over_ipv6_udp = 0;
433 	u32 ptp_over_ethernet = 0;
434 	u32 snap_type_sel = 0;
435 	u32 ts_master_en = 0;
436 	u32 ts_event_en = 0;
437 	u32 value = 0;
438 	u32 sec_inc;
439 
440 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441 		netdev_alert(priv->dev, "No support for HW time stamping\n");
442 		priv->hwts_tx_en = 0;
443 		priv->hwts_rx_en = 0;
444 
445 		return -EOPNOTSUPP;
446 	}
447 
448 	if (copy_from_user(&config, ifr->ifr_data,
449 			   sizeof(struct hwtstamp_config)))
450 		return -EFAULT;
451 
452 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453 		   __func__, config.flags, config.tx_type, config.rx_filter);
454 
455 	/* reserved for future extensions */
456 	if (config.flags)
457 		return -EINVAL;
458 
459 	if (config.tx_type != HWTSTAMP_TX_OFF &&
460 	    config.tx_type != HWTSTAMP_TX_ON)
461 		return -ERANGE;
462 
463 	if (priv->adv_ts) {
464 		switch (config.rx_filter) {
465 		case HWTSTAMP_FILTER_NONE:
466 			/* time stamp no incoming packet at all */
467 			config.rx_filter = HWTSTAMP_FILTER_NONE;
468 			break;
469 
470 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
471 			/* PTP v1, UDP, any kind of event packet */
472 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473 			/* take time stamp for all event messages */
474 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475 
476 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478 			break;
479 
480 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
481 			/* PTP v1, UDP, Sync packet */
482 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483 			/* take time stamp for SYNC messages only */
484 			ts_event_en = PTP_TCR_TSEVNTENA;
485 
486 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488 			break;
489 
490 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
491 			/* PTP v1, UDP, Delay_req packet */
492 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493 			/* take time stamp for Delay_Req messages only */
494 			ts_master_en = PTP_TCR_TSMSTRENA;
495 			ts_event_en = PTP_TCR_TSEVNTENA;
496 
497 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499 			break;
500 
501 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
502 			/* PTP v2, UDP, any kind of event packet */
503 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504 			ptp_v2 = PTP_TCR_TSVER2ENA;
505 			/* take time stamp for all event messages */
506 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507 
508 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510 			break;
511 
512 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
513 			/* PTP v2, UDP, Sync packet */
514 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515 			ptp_v2 = PTP_TCR_TSVER2ENA;
516 			/* take time stamp for SYNC messages only */
517 			ts_event_en = PTP_TCR_TSEVNTENA;
518 
519 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521 			break;
522 
523 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
524 			/* PTP v2, UDP, Delay_req packet */
525 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526 			ptp_v2 = PTP_TCR_TSVER2ENA;
527 			/* take time stamp for Delay_Req messages only */
528 			ts_master_en = PTP_TCR_TSMSTRENA;
529 			ts_event_en = PTP_TCR_TSEVNTENA;
530 
531 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533 			break;
534 
535 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
536 			/* PTP v2/802.AS1 any layer, any kind of event packet */
537 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538 			ptp_v2 = PTP_TCR_TSVER2ENA;
539 			/* take time stamp for all event messages */
540 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541 
542 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544 			ptp_over_ethernet = PTP_TCR_TSIPENA;
545 			break;
546 
547 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
548 			/* PTP v2/802.AS1, any layer, Sync packet */
549 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550 			ptp_v2 = PTP_TCR_TSVER2ENA;
551 			/* take time stamp for SYNC messages only */
552 			ts_event_en = PTP_TCR_TSEVNTENA;
553 
554 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556 			ptp_over_ethernet = PTP_TCR_TSIPENA;
557 			break;
558 
559 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
560 			/* PTP v2/802.AS1, any layer, Delay_req packet */
561 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562 			ptp_v2 = PTP_TCR_TSVER2ENA;
563 			/* take time stamp for Delay_Req messages only */
564 			ts_master_en = PTP_TCR_TSMSTRENA;
565 			ts_event_en = PTP_TCR_TSEVNTENA;
566 
567 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569 			ptp_over_ethernet = PTP_TCR_TSIPENA;
570 			break;
571 
572 		case HWTSTAMP_FILTER_ALL:
573 			/* time stamp any incoming packet */
574 			config.rx_filter = HWTSTAMP_FILTER_ALL;
575 			tstamp_all = PTP_TCR_TSENALL;
576 			break;
577 
578 		default:
579 			return -ERANGE;
580 		}
581 	} else {
582 		switch (config.rx_filter) {
583 		case HWTSTAMP_FILTER_NONE:
584 			config.rx_filter = HWTSTAMP_FILTER_NONE;
585 			break;
586 		default:
587 			/* PTP v1, UDP, any kind of event packet */
588 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589 			break;
590 		}
591 	}
592 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594 
595 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597 	else {
598 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
599 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
600 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601 			 ts_master_en | snap_type_sel);
602 		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603 
604 		/* program Sub Second Increment reg */
605 		sec_inc = priv->hw->ptp->config_sub_second_increment(
606 			priv->ptpaddr, priv->plat->clk_ptp_rate,
607 			priv->plat->has_gmac4);
608 		temp = div_u64(1000000000ULL, sec_inc);
609 
610 		/* calculate default added value:
611 		 * formula is :
612 		 * addend = (2^32)/freq_div_ratio;
613 		 * where, freq_div_ratio = 1e9ns/sec_inc
614 		 */
615 		temp = (u64)(temp << 32);
616 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617 		priv->hw->ptp->config_addend(priv->ptpaddr,
618 					     priv->default_addend);
619 
620 		/* initialize system time */
621 		ktime_get_real_ts64(&now);
622 
623 		/* lower 32 bits of tv_sec are safe until y2106 */
624 		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625 					    now.tv_nsec);
626 	}
627 
628 	return copy_to_user(ifr->ifr_data, &config,
629 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630 }
631 
632 /**
633  * stmmac_init_ptp - init PTP
634  * @priv: driver private structure
635  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636  * This is done by looking at the HW cap. register.
637  * This function also registers the ptp driver.
638  */
639 static int stmmac_init_ptp(struct stmmac_priv *priv)
640 {
641 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642 		return -EOPNOTSUPP;
643 
644 	priv->adv_ts = 0;
645 	/* Check if adv_ts can be enabled for dwmac 4.x core */
646 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647 		priv->adv_ts = 1;
648 	/* Dwmac 3.x core with extend_desc can support adv_ts */
649 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650 		priv->adv_ts = 1;
651 
652 	if (priv->dma_cap.time_stamp)
653 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654 
655 	if (priv->adv_ts)
656 		netdev_info(priv->dev,
657 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
658 
659 	priv->hw->ptp = &stmmac_ptp;
660 	priv->hwts_tx_en = 0;
661 	priv->hwts_rx_en = 0;
662 
663 	stmmac_ptp_register(priv);
664 
665 	return 0;
666 }
667 
668 static void stmmac_release_ptp(struct stmmac_priv *priv)
669 {
670 	if (priv->plat->clk_ptp_ref)
671 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
672 	stmmac_ptp_unregister(priv);
673 }
674 
675 /**
676  * stmmac_adjust_link - adjusts the link parameters
677  * @dev: net device structure
678  * Description: this is the helper called by the physical abstraction layer
679  * drivers to communicate the phy link status. According the speed and duplex
680  * this driver can invoke registered glue-logic as well.
681  * It also invoke the eee initialization because it could happen when switch
682  * on different networks (that are eee capable).
683  */
684 static void stmmac_adjust_link(struct net_device *dev)
685 {
686 	struct stmmac_priv *priv = netdev_priv(dev);
687 	struct phy_device *phydev = dev->phydev;
688 	unsigned long flags;
689 	int new_state = 0;
690 	unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
691 
692 	if (!phydev)
693 		return;
694 
695 	spin_lock_irqsave(&priv->lock, flags);
696 
697 	if (phydev->link) {
698 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
699 
700 		/* Now we make sure that we can be in full duplex mode.
701 		 * If not, we operate in half-duplex mode. */
702 		if (phydev->duplex != priv->oldduplex) {
703 			new_state = 1;
704 			if (!(phydev->duplex))
705 				ctrl &= ~priv->hw->link.duplex;
706 			else
707 				ctrl |= priv->hw->link.duplex;
708 			priv->oldduplex = phydev->duplex;
709 		}
710 		/* Flow Control operation */
711 		if (phydev->pause)
712 			priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
713 						 fc, pause_time);
714 
715 		if (phydev->speed != priv->speed) {
716 			new_state = 1;
717 			switch (phydev->speed) {
718 			case 1000:
719 				if (priv->plat->has_gmac ||
720 				    priv->plat->has_gmac4)
721 					ctrl &= ~priv->hw->link.port;
722 				break;
723 			case 100:
724 				if (priv->plat->has_gmac ||
725 				    priv->plat->has_gmac4) {
726 					ctrl |= priv->hw->link.port;
727 					ctrl |= priv->hw->link.speed;
728 				} else {
729 					ctrl &= ~priv->hw->link.port;
730 				}
731 				break;
732 			case 10:
733 				if (priv->plat->has_gmac ||
734 				    priv->plat->has_gmac4) {
735 					ctrl |= priv->hw->link.port;
736 					ctrl &= ~(priv->hw->link.speed);
737 				} else {
738 					ctrl &= ~priv->hw->link.port;
739 				}
740 				break;
741 			default:
742 				netif_warn(priv, link, priv->dev,
743 					   "broken speed: %d\n", phydev->speed);
744 				phydev->speed = SPEED_UNKNOWN;
745 				break;
746 			}
747 			if (phydev->speed != SPEED_UNKNOWN)
748 				stmmac_hw_fix_mac_speed(priv);
749 			priv->speed = phydev->speed;
750 		}
751 
752 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
753 
754 		if (!priv->oldlink) {
755 			new_state = 1;
756 			priv->oldlink = 1;
757 		}
758 	} else if (priv->oldlink) {
759 		new_state = 1;
760 		priv->oldlink = 0;
761 		priv->speed = SPEED_UNKNOWN;
762 		priv->oldduplex = DUPLEX_UNKNOWN;
763 	}
764 
765 	if (new_state && netif_msg_link(priv))
766 		phy_print_status(phydev);
767 
768 	spin_unlock_irqrestore(&priv->lock, flags);
769 
770 	if (phydev->is_pseudo_fixed_link)
771 		/* Stop PHY layer to call the hook to adjust the link in case
772 		 * of a switch is attached to the stmmac driver.
773 		 */
774 		phydev->irq = PHY_IGNORE_INTERRUPT;
775 	else
776 		/* At this stage, init the EEE if supported.
777 		 * Never called in case of fixed_link.
778 		 */
779 		priv->eee_enabled = stmmac_eee_init(priv);
780 }
781 
782 /**
783  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
784  * @priv: driver private structure
785  * Description: this is to verify if the HW supports the PCS.
786  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
787  * configured for the TBI, RTBI, or SGMII PHY interface.
788  */
789 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
790 {
791 	int interface = priv->plat->interface;
792 
793 	if (priv->dma_cap.pcs) {
794 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
795 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
796 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
797 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
798 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
799 			priv->hw->pcs = STMMAC_PCS_RGMII;
800 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
801 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
802 			priv->hw->pcs = STMMAC_PCS_SGMII;
803 		}
804 	}
805 }
806 
807 /**
808  * stmmac_init_phy - PHY initialization
809  * @dev: net device structure
810  * Description: it initializes the driver's PHY state, and attaches the PHY
811  * to the mac driver.
812  *  Return value:
813  *  0 on success
814  */
815 static int stmmac_init_phy(struct net_device *dev)
816 {
817 	struct stmmac_priv *priv = netdev_priv(dev);
818 	struct phy_device *phydev;
819 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
820 	char bus_id[MII_BUS_ID_SIZE];
821 	int interface = priv->plat->interface;
822 	int max_speed = priv->plat->max_speed;
823 	priv->oldlink = 0;
824 	priv->speed = SPEED_UNKNOWN;
825 	priv->oldduplex = DUPLEX_UNKNOWN;
826 
827 	if (priv->plat->phy_node) {
828 		phydev = of_phy_connect(dev, priv->plat->phy_node,
829 					&stmmac_adjust_link, 0, interface);
830 	} else {
831 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
832 			 priv->plat->bus_id);
833 
834 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
835 			 priv->plat->phy_addr);
836 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
837 			   phy_id_fmt);
838 
839 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
840 				     interface);
841 	}
842 
843 	if (IS_ERR_OR_NULL(phydev)) {
844 		netdev_err(priv->dev, "Could not attach to PHY\n");
845 		if (!phydev)
846 			return -ENODEV;
847 
848 		return PTR_ERR(phydev);
849 	}
850 
851 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
852 	if ((interface == PHY_INTERFACE_MODE_MII) ||
853 	    (interface == PHY_INTERFACE_MODE_RMII) ||
854 		(max_speed < 1000 && max_speed > 0))
855 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
856 					 SUPPORTED_1000baseT_Full);
857 
858 	/*
859 	 * Broken HW is sometimes missing the pull-up resistor on the
860 	 * MDIO line, which results in reads to non-existent devices returning
861 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
862 	 * device as well.
863 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
864 	 */
865 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
866 		phy_disconnect(phydev);
867 		return -ENODEV;
868 	}
869 
870 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
871 	 * subsequent PHY polling, make sure we force a link transition if
872 	 * we have a UP/DOWN/UP transition
873 	 */
874 	if (phydev->is_pseudo_fixed_link)
875 		phydev->irq = PHY_POLL;
876 
877 	phy_attached_info(phydev);
878 	return 0;
879 }
880 
881 static void stmmac_display_rings(struct stmmac_priv *priv)
882 {
883 	void *head_rx, *head_tx;
884 
885 	if (priv->extend_desc) {
886 		head_rx = (void *)priv->dma_erx;
887 		head_tx = (void *)priv->dma_etx;
888 	} else {
889 		head_rx = (void *)priv->dma_rx;
890 		head_tx = (void *)priv->dma_tx;
891 	}
892 
893 	/* Display Rx ring */
894 	priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
895 	/* Display Tx ring */
896 	priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
897 }
898 
899 static int stmmac_set_bfsize(int mtu, int bufsize)
900 {
901 	int ret = bufsize;
902 
903 	if (mtu >= BUF_SIZE_4KiB)
904 		ret = BUF_SIZE_8KiB;
905 	else if (mtu >= BUF_SIZE_2KiB)
906 		ret = BUF_SIZE_4KiB;
907 	else if (mtu > DEFAULT_BUFSIZE)
908 		ret = BUF_SIZE_2KiB;
909 	else
910 		ret = DEFAULT_BUFSIZE;
911 
912 	return ret;
913 }
914 
915 /**
916  * stmmac_clear_descriptors - clear descriptors
917  * @priv: driver private structure
918  * Description: this function is called to clear the tx and rx descriptors
919  * in case of both basic and extended descriptors are used.
920  */
921 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
922 {
923 	int i;
924 
925 	/* Clear the Rx/Tx descriptors */
926 	for (i = 0; i < DMA_RX_SIZE; i++)
927 		if (priv->extend_desc)
928 			priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
929 						     priv->use_riwt, priv->mode,
930 						     (i == DMA_RX_SIZE - 1));
931 		else
932 			priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
933 						     priv->use_riwt, priv->mode,
934 						     (i == DMA_RX_SIZE - 1));
935 	for (i = 0; i < DMA_TX_SIZE; i++)
936 		if (priv->extend_desc)
937 			priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
938 						     priv->mode,
939 						     (i == DMA_TX_SIZE - 1));
940 		else
941 			priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
942 						     priv->mode,
943 						     (i == DMA_TX_SIZE - 1));
944 }
945 
946 /**
947  * stmmac_init_rx_buffers - init the RX descriptor buffer.
948  * @priv: driver private structure
949  * @p: descriptor pointer
950  * @i: descriptor index
951  * @flags: gfp flag.
952  * Description: this function is called to allocate a receive buffer, perform
953  * the DMA mapping and init the descriptor.
954  */
955 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
956 				  int i, gfp_t flags)
957 {
958 	struct sk_buff *skb;
959 
960 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
961 	if (!skb) {
962 		netdev_err(priv->dev,
963 			   "%s: Rx init fails; skb is NULL\n", __func__);
964 		return -ENOMEM;
965 	}
966 	priv->rx_skbuff[i] = skb;
967 	priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
968 						priv->dma_buf_sz,
969 						DMA_FROM_DEVICE);
970 	if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
971 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
972 		dev_kfree_skb_any(skb);
973 		return -EINVAL;
974 	}
975 
976 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
977 		p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
978 	else
979 		p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
980 
981 	if ((priv->hw->mode->init_desc3) &&
982 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
983 		priv->hw->mode->init_desc3(p);
984 
985 	return 0;
986 }
987 
988 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
989 {
990 	if (priv->rx_skbuff[i]) {
991 		dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
992 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
993 		dev_kfree_skb_any(priv->rx_skbuff[i]);
994 	}
995 	priv->rx_skbuff[i] = NULL;
996 }
997 
998 /**
999  * init_dma_desc_rings - init the RX/TX descriptor rings
1000  * @dev: net device structure
1001  * @flags: gfp flag.
1002  * Description: this function initializes the DMA RX/TX descriptors
1003  * and allocates the socket buffers. It supports the chained and ring
1004  * modes.
1005  */
1006 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1007 {
1008 	int i;
1009 	struct stmmac_priv *priv = netdev_priv(dev);
1010 	unsigned int bfsize = 0;
1011 	int ret = -ENOMEM;
1012 
1013 	if (priv->hw->mode->set_16kib_bfsize)
1014 		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1015 
1016 	if (bfsize < BUF_SIZE_16KiB)
1017 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1018 
1019 	priv->dma_buf_sz = bfsize;
1020 
1021 	netif_dbg(priv, probe, priv->dev,
1022 		  "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1023 		  __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1024 
1025 	/* RX INITIALIZATION */
1026 	netif_dbg(priv, probe, priv->dev,
1027 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1028 
1029 	for (i = 0; i < DMA_RX_SIZE; i++) {
1030 		struct dma_desc *p;
1031 		if (priv->extend_desc)
1032 			p = &((priv->dma_erx + i)->basic);
1033 		else
1034 			p = priv->dma_rx + i;
1035 
1036 		ret = stmmac_init_rx_buffers(priv, p, i, flags);
1037 		if (ret)
1038 			goto err_init_rx_buffers;
1039 
1040 		netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1041 			  priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1042 			  (unsigned int)priv->rx_skbuff_dma[i]);
1043 	}
1044 	priv->cur_rx = 0;
1045 	priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1046 	buf_sz = bfsize;
1047 
1048 	/* Setup the chained descriptor addresses */
1049 	if (priv->mode == STMMAC_CHAIN_MODE) {
1050 		if (priv->extend_desc) {
1051 			priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1052 					     DMA_RX_SIZE, 1);
1053 			priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1054 					     DMA_TX_SIZE, 1);
1055 		} else {
1056 			priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1057 					     DMA_RX_SIZE, 0);
1058 			priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1059 					     DMA_TX_SIZE, 0);
1060 		}
1061 	}
1062 
1063 	/* TX INITIALIZATION */
1064 	for (i = 0; i < DMA_TX_SIZE; i++) {
1065 		struct dma_desc *p;
1066 		if (priv->extend_desc)
1067 			p = &((priv->dma_etx + i)->basic);
1068 		else
1069 			p = priv->dma_tx + i;
1070 
1071 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1072 			p->des0 = 0;
1073 			p->des1 = 0;
1074 			p->des2 = 0;
1075 			p->des3 = 0;
1076 		} else {
1077 			p->des2 = 0;
1078 		}
1079 
1080 		priv->tx_skbuff_dma[i].buf = 0;
1081 		priv->tx_skbuff_dma[i].map_as_page = false;
1082 		priv->tx_skbuff_dma[i].len = 0;
1083 		priv->tx_skbuff_dma[i].last_segment = false;
1084 		priv->tx_skbuff[i] = NULL;
1085 	}
1086 
1087 	priv->dirty_tx = 0;
1088 	priv->cur_tx = 0;
1089 	netdev_reset_queue(priv->dev);
1090 
1091 	stmmac_clear_descriptors(priv);
1092 
1093 	if (netif_msg_hw(priv))
1094 		stmmac_display_rings(priv);
1095 
1096 	return 0;
1097 err_init_rx_buffers:
1098 	while (--i >= 0)
1099 		stmmac_free_rx_buffers(priv, i);
1100 	return ret;
1101 }
1102 
1103 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1104 {
1105 	int i;
1106 
1107 	for (i = 0; i < DMA_RX_SIZE; i++)
1108 		stmmac_free_rx_buffers(priv, i);
1109 }
1110 
1111 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1112 {
1113 	int i;
1114 
1115 	for (i = 0; i < DMA_TX_SIZE; i++) {
1116 		if (priv->tx_skbuff_dma[i].buf) {
1117 			if (priv->tx_skbuff_dma[i].map_as_page)
1118 				dma_unmap_page(priv->device,
1119 					       priv->tx_skbuff_dma[i].buf,
1120 					       priv->tx_skbuff_dma[i].len,
1121 					       DMA_TO_DEVICE);
1122 			else
1123 				dma_unmap_single(priv->device,
1124 						 priv->tx_skbuff_dma[i].buf,
1125 						 priv->tx_skbuff_dma[i].len,
1126 						 DMA_TO_DEVICE);
1127 		}
1128 
1129 		if (priv->tx_skbuff[i]) {
1130 			dev_kfree_skb_any(priv->tx_skbuff[i]);
1131 			priv->tx_skbuff[i] = NULL;
1132 			priv->tx_skbuff_dma[i].buf = 0;
1133 			priv->tx_skbuff_dma[i].map_as_page = false;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * alloc_dma_desc_resources - alloc TX/RX resources.
1140  * @priv: private structure
1141  * Description: according to which descriptor can be used (extend or basic)
1142  * this function allocates the resources for TX and RX paths. In case of
1143  * reception, for example, it pre-allocated the RX socket buffer in order to
1144  * allow zero-copy mechanism.
1145  */
1146 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1147 {
1148 	int ret = -ENOMEM;
1149 
1150 	priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1151 					    GFP_KERNEL);
1152 	if (!priv->rx_skbuff_dma)
1153 		return -ENOMEM;
1154 
1155 	priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1156 					GFP_KERNEL);
1157 	if (!priv->rx_skbuff)
1158 		goto err_rx_skbuff;
1159 
1160 	priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1161 					    sizeof(*priv->tx_skbuff_dma),
1162 					    GFP_KERNEL);
1163 	if (!priv->tx_skbuff_dma)
1164 		goto err_tx_skbuff_dma;
1165 
1166 	priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1167 					GFP_KERNEL);
1168 	if (!priv->tx_skbuff)
1169 		goto err_tx_skbuff;
1170 
1171 	if (priv->extend_desc) {
1172 		priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1173 						    sizeof(struct
1174 							   dma_extended_desc),
1175 						    &priv->dma_rx_phy,
1176 						    GFP_KERNEL);
1177 		if (!priv->dma_erx)
1178 			goto err_dma;
1179 
1180 		priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1181 						    sizeof(struct
1182 							   dma_extended_desc),
1183 						    &priv->dma_tx_phy,
1184 						    GFP_KERNEL);
1185 		if (!priv->dma_etx) {
1186 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1187 					  sizeof(struct dma_extended_desc),
1188 					  priv->dma_erx, priv->dma_rx_phy);
1189 			goto err_dma;
1190 		}
1191 	} else {
1192 		priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1193 						   sizeof(struct dma_desc),
1194 						   &priv->dma_rx_phy,
1195 						   GFP_KERNEL);
1196 		if (!priv->dma_rx)
1197 			goto err_dma;
1198 
1199 		priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1200 						   sizeof(struct dma_desc),
1201 						   &priv->dma_tx_phy,
1202 						   GFP_KERNEL);
1203 		if (!priv->dma_tx) {
1204 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1205 					  sizeof(struct dma_desc),
1206 					  priv->dma_rx, priv->dma_rx_phy);
1207 			goto err_dma;
1208 		}
1209 	}
1210 
1211 	return 0;
1212 
1213 err_dma:
1214 	kfree(priv->tx_skbuff);
1215 err_tx_skbuff:
1216 	kfree(priv->tx_skbuff_dma);
1217 err_tx_skbuff_dma:
1218 	kfree(priv->rx_skbuff);
1219 err_rx_skbuff:
1220 	kfree(priv->rx_skbuff_dma);
1221 	return ret;
1222 }
1223 
1224 static void free_dma_desc_resources(struct stmmac_priv *priv)
1225 {
1226 	/* Release the DMA TX/RX socket buffers */
1227 	dma_free_rx_skbufs(priv);
1228 	dma_free_tx_skbufs(priv);
1229 
1230 	/* Free DMA regions of consistent memory previously allocated */
1231 	if (!priv->extend_desc) {
1232 		dma_free_coherent(priv->device,
1233 				  DMA_TX_SIZE * sizeof(struct dma_desc),
1234 				  priv->dma_tx, priv->dma_tx_phy);
1235 		dma_free_coherent(priv->device,
1236 				  DMA_RX_SIZE * sizeof(struct dma_desc),
1237 				  priv->dma_rx, priv->dma_rx_phy);
1238 	} else {
1239 		dma_free_coherent(priv->device, DMA_TX_SIZE *
1240 				  sizeof(struct dma_extended_desc),
1241 				  priv->dma_etx, priv->dma_tx_phy);
1242 		dma_free_coherent(priv->device, DMA_RX_SIZE *
1243 				  sizeof(struct dma_extended_desc),
1244 				  priv->dma_erx, priv->dma_rx_phy);
1245 	}
1246 	kfree(priv->rx_skbuff_dma);
1247 	kfree(priv->rx_skbuff);
1248 	kfree(priv->tx_skbuff_dma);
1249 	kfree(priv->tx_skbuff);
1250 }
1251 
1252 /**
1253  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1254  *  @priv: driver private structure
1255  *  Description: It is used for enabling the rx queues in the MAC
1256  */
1257 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1258 {
1259 	int rx_count = priv->dma_cap.number_rx_queues;
1260 	int queue = 0;
1261 
1262 	/* If GMAC does not have multiple queues, then this is not necessary*/
1263 	if (rx_count == 1)
1264 		return;
1265 
1266 	/**
1267 	 *  If the core is synthesized with multiple rx queues / multiple
1268 	 *  dma channels, then rx queues will be disabled by default.
1269 	 *  For now only rx queue 0 is enabled.
1270 	 */
1271 	priv->hw->mac->rx_queue_enable(priv->hw, queue);
1272 }
1273 
1274 /**
1275  *  stmmac_dma_operation_mode - HW DMA operation mode
1276  *  @priv: driver private structure
1277  *  Description: it is used for configuring the DMA operation mode register in
1278  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1279  */
1280 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1281 {
1282 	int rxfifosz = priv->plat->rx_fifo_size;
1283 
1284 	if (priv->plat->force_thresh_dma_mode)
1285 		priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1286 	else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1287 		/*
1288 		 * In case of GMAC, SF mode can be enabled
1289 		 * to perform the TX COE in HW. This depends on:
1290 		 * 1) TX COE if actually supported
1291 		 * 2) There is no bugged Jumbo frame support
1292 		 *    that needs to not insert csum in the TDES.
1293 		 */
1294 		priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1295 					rxfifosz);
1296 		priv->xstats.threshold = SF_DMA_MODE;
1297 	} else
1298 		priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1299 					rxfifosz);
1300 }
1301 
1302 /**
1303  * stmmac_tx_clean - to manage the transmission completion
1304  * @priv: driver private structure
1305  * Description: it reclaims the transmit resources after transmission completes.
1306  */
1307 static void stmmac_tx_clean(struct stmmac_priv *priv)
1308 {
1309 	unsigned int bytes_compl = 0, pkts_compl = 0;
1310 	unsigned int entry = priv->dirty_tx;
1311 
1312 	netif_tx_lock(priv->dev);
1313 
1314 	priv->xstats.tx_clean++;
1315 
1316 	while (entry != priv->cur_tx) {
1317 		struct sk_buff *skb = priv->tx_skbuff[entry];
1318 		struct dma_desc *p;
1319 		int status;
1320 
1321 		if (priv->extend_desc)
1322 			p = (struct dma_desc *)(priv->dma_etx + entry);
1323 		else
1324 			p = priv->dma_tx + entry;
1325 
1326 		status = priv->hw->desc->tx_status(&priv->dev->stats,
1327 						      &priv->xstats, p,
1328 						      priv->ioaddr);
1329 		/* Check if the descriptor is owned by the DMA */
1330 		if (unlikely(status & tx_dma_own))
1331 			break;
1332 
1333 		/* Just consider the last segment and ...*/
1334 		if (likely(!(status & tx_not_ls))) {
1335 			/* ... verify the status error condition */
1336 			if (unlikely(status & tx_err)) {
1337 				priv->dev->stats.tx_errors++;
1338 			} else {
1339 				priv->dev->stats.tx_packets++;
1340 				priv->xstats.tx_pkt_n++;
1341 			}
1342 			stmmac_get_tx_hwtstamp(priv, p, skb);
1343 		}
1344 
1345 		if (likely(priv->tx_skbuff_dma[entry].buf)) {
1346 			if (priv->tx_skbuff_dma[entry].map_as_page)
1347 				dma_unmap_page(priv->device,
1348 					       priv->tx_skbuff_dma[entry].buf,
1349 					       priv->tx_skbuff_dma[entry].len,
1350 					       DMA_TO_DEVICE);
1351 			else
1352 				dma_unmap_single(priv->device,
1353 						 priv->tx_skbuff_dma[entry].buf,
1354 						 priv->tx_skbuff_dma[entry].len,
1355 						 DMA_TO_DEVICE);
1356 			priv->tx_skbuff_dma[entry].buf = 0;
1357 			priv->tx_skbuff_dma[entry].len = 0;
1358 			priv->tx_skbuff_dma[entry].map_as_page = false;
1359 		}
1360 
1361 		if (priv->hw->mode->clean_desc3)
1362 			priv->hw->mode->clean_desc3(priv, p);
1363 
1364 		priv->tx_skbuff_dma[entry].last_segment = false;
1365 		priv->tx_skbuff_dma[entry].is_jumbo = false;
1366 
1367 		if (likely(skb != NULL)) {
1368 			pkts_compl++;
1369 			bytes_compl += skb->len;
1370 			dev_consume_skb_any(skb);
1371 			priv->tx_skbuff[entry] = NULL;
1372 		}
1373 
1374 		priv->hw->desc->release_tx_desc(p, priv->mode);
1375 
1376 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1377 	}
1378 	priv->dirty_tx = entry;
1379 
1380 	netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1381 
1382 	if (unlikely(netif_queue_stopped(priv->dev) &&
1383 	    stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1384 		netif_dbg(priv, tx_done, priv->dev,
1385 			  "%s: restart transmit\n", __func__);
1386 		netif_wake_queue(priv->dev);
1387 	}
1388 
1389 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1390 		stmmac_enable_eee_mode(priv);
1391 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1392 	}
1393 	netif_tx_unlock(priv->dev);
1394 }
1395 
1396 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1397 {
1398 	priv->hw->dma->enable_dma_irq(priv->ioaddr);
1399 }
1400 
1401 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1402 {
1403 	priv->hw->dma->disable_dma_irq(priv->ioaddr);
1404 }
1405 
1406 /**
1407  * stmmac_tx_err - to manage the tx error
1408  * @priv: driver private structure
1409  * Description: it cleans the descriptors and restarts the transmission
1410  * in case of transmission errors.
1411  */
1412 static void stmmac_tx_err(struct stmmac_priv *priv)
1413 {
1414 	int i;
1415 	netif_stop_queue(priv->dev);
1416 
1417 	priv->hw->dma->stop_tx(priv->ioaddr);
1418 	dma_free_tx_skbufs(priv);
1419 	for (i = 0; i < DMA_TX_SIZE; i++)
1420 		if (priv->extend_desc)
1421 			priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1422 						     priv->mode,
1423 						     (i == DMA_TX_SIZE - 1));
1424 		else
1425 			priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1426 						     priv->mode,
1427 						     (i == DMA_TX_SIZE - 1));
1428 	priv->dirty_tx = 0;
1429 	priv->cur_tx = 0;
1430 	netdev_reset_queue(priv->dev);
1431 	priv->hw->dma->start_tx(priv->ioaddr);
1432 
1433 	priv->dev->stats.tx_errors++;
1434 	netif_wake_queue(priv->dev);
1435 }
1436 
1437 /**
1438  * stmmac_dma_interrupt - DMA ISR
1439  * @priv: driver private structure
1440  * Description: this is the DMA ISR. It is called by the main ISR.
1441  * It calls the dwmac dma routine and schedule poll method in case of some
1442  * work can be done.
1443  */
1444 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1445 {
1446 	int status;
1447 	int rxfifosz = priv->plat->rx_fifo_size;
1448 
1449 	status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1450 	if (likely((status & handle_rx)) || (status & handle_tx)) {
1451 		if (likely(napi_schedule_prep(&priv->napi))) {
1452 			stmmac_disable_dma_irq(priv);
1453 			__napi_schedule(&priv->napi);
1454 		}
1455 	}
1456 	if (unlikely(status & tx_hard_error_bump_tc)) {
1457 		/* Try to bump up the dma threshold on this failure */
1458 		if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1459 		    (tc <= 256)) {
1460 			tc += 64;
1461 			if (priv->plat->force_thresh_dma_mode)
1462 				priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1463 							rxfifosz);
1464 			else
1465 				priv->hw->dma->dma_mode(priv->ioaddr, tc,
1466 							SF_DMA_MODE, rxfifosz);
1467 			priv->xstats.threshold = tc;
1468 		}
1469 	} else if (unlikely(status == tx_hard_error))
1470 		stmmac_tx_err(priv);
1471 }
1472 
1473 /**
1474  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1475  * @priv: driver private structure
1476  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1477  */
1478 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1479 {
1480 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1481 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1482 
1483 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1484 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1485 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1486 	} else {
1487 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1488 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1489 	}
1490 
1491 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
1492 
1493 	if (priv->dma_cap.rmon) {
1494 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
1495 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1496 	} else
1497 		netdev_info(priv->dev, "No MAC Management Counters available\n");
1498 }
1499 
1500 /**
1501  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1502  * @priv: driver private structure
1503  * Description: select the Enhanced/Alternate or Normal descriptors.
1504  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1505  * supported by the HW capability register.
1506  */
1507 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1508 {
1509 	if (priv->plat->enh_desc) {
1510 		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1511 
1512 		/* GMAC older than 3.50 has no extended descriptors */
1513 		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1514 			dev_info(priv->device, "Enabled extended descriptors\n");
1515 			priv->extend_desc = 1;
1516 		} else
1517 			dev_warn(priv->device, "Extended descriptors not supported\n");
1518 
1519 		priv->hw->desc = &enh_desc_ops;
1520 	} else {
1521 		dev_info(priv->device, "Normal descriptors\n");
1522 		priv->hw->desc = &ndesc_ops;
1523 	}
1524 }
1525 
1526 /**
1527  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1528  * @priv: driver private structure
1529  * Description:
1530  *  new GMAC chip generations have a new register to indicate the
1531  *  presence of the optional feature/functions.
1532  *  This can be also used to override the value passed through the
1533  *  platform and necessary for old MAC10/100 and GMAC chips.
1534  */
1535 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1536 {
1537 	u32 ret = 0;
1538 
1539 	if (priv->hw->dma->get_hw_feature) {
1540 		priv->hw->dma->get_hw_feature(priv->ioaddr,
1541 					      &priv->dma_cap);
1542 		ret = 1;
1543 	}
1544 
1545 	return ret;
1546 }
1547 
1548 /**
1549  * stmmac_check_ether_addr - check if the MAC addr is valid
1550  * @priv: driver private structure
1551  * Description:
1552  * it is to verify if the MAC address is valid, in case of failures it
1553  * generates a random MAC address
1554  */
1555 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1556 {
1557 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1558 		priv->hw->mac->get_umac_addr(priv->hw,
1559 					     priv->dev->dev_addr, 0);
1560 		if (!is_valid_ether_addr(priv->dev->dev_addr))
1561 			eth_hw_addr_random(priv->dev);
1562 		netdev_info(priv->dev, "device MAC address %pM\n",
1563 			    priv->dev->dev_addr);
1564 	}
1565 }
1566 
1567 /**
1568  * stmmac_init_dma_engine - DMA init.
1569  * @priv: driver private structure
1570  * Description:
1571  * It inits the DMA invoking the specific MAC/GMAC callback.
1572  * Some DMA parameters can be passed from the platform;
1573  * in case of these are not passed a default is kept for the MAC or GMAC.
1574  */
1575 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1576 {
1577 	int atds = 0;
1578 	int ret = 0;
1579 
1580 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1581 		dev_err(priv->device, "Invalid DMA configuration\n");
1582 		return -EINVAL;
1583 	}
1584 
1585 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1586 		atds = 1;
1587 
1588 	ret = priv->hw->dma->reset(priv->ioaddr);
1589 	if (ret) {
1590 		dev_err(priv->device, "Failed to reset the dma\n");
1591 		return ret;
1592 	}
1593 
1594 	priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1595 			    priv->dma_tx_phy, priv->dma_rx_phy, atds);
1596 
1597 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1598 		priv->rx_tail_addr = priv->dma_rx_phy +
1599 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
1600 		priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1601 					       STMMAC_CHAN0);
1602 
1603 		priv->tx_tail_addr = priv->dma_tx_phy +
1604 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
1605 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1606 					       STMMAC_CHAN0);
1607 	}
1608 
1609 	if (priv->plat->axi && priv->hw->dma->axi)
1610 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1611 
1612 	return ret;
1613 }
1614 
1615 /**
1616  * stmmac_tx_timer - mitigation sw timer for tx.
1617  * @data: data pointer
1618  * Description:
1619  * This is the timer handler to directly invoke the stmmac_tx_clean.
1620  */
1621 static void stmmac_tx_timer(unsigned long data)
1622 {
1623 	struct stmmac_priv *priv = (struct stmmac_priv *)data;
1624 
1625 	stmmac_tx_clean(priv);
1626 }
1627 
1628 /**
1629  * stmmac_init_tx_coalesce - init tx mitigation options.
1630  * @priv: driver private structure
1631  * Description:
1632  * This inits the transmit coalesce parameters: i.e. timer rate,
1633  * timer handler and default threshold used for enabling the
1634  * interrupt on completion bit.
1635  */
1636 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1637 {
1638 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
1639 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1640 	init_timer(&priv->txtimer);
1641 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1642 	priv->txtimer.data = (unsigned long)priv;
1643 	priv->txtimer.function = stmmac_tx_timer;
1644 	add_timer(&priv->txtimer);
1645 }
1646 
1647 /**
1648  * stmmac_hw_setup - setup mac in a usable state.
1649  *  @dev : pointer to the device structure.
1650  *  Description:
1651  *  this is the main function to setup the HW in a usable state because the
1652  *  dma engine is reset, the core registers are configured (e.g. AXI,
1653  *  Checksum features, timers). The DMA is ready to start receiving and
1654  *  transmitting.
1655  *  Return value:
1656  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1657  *  file on failure.
1658  */
1659 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1660 {
1661 	struct stmmac_priv *priv = netdev_priv(dev);
1662 	int ret;
1663 
1664 	/* DMA initialization and SW reset */
1665 	ret = stmmac_init_dma_engine(priv);
1666 	if (ret < 0) {
1667 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
1668 			   __func__);
1669 		return ret;
1670 	}
1671 
1672 	/* Copy the MAC addr into the HW  */
1673 	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1674 
1675 	/* PS and related bits will be programmed according to the speed */
1676 	if (priv->hw->pcs) {
1677 		int speed = priv->plat->mac_port_sel_speed;
1678 
1679 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
1680 		    (speed == SPEED_1000)) {
1681 			priv->hw->ps = speed;
1682 		} else {
1683 			dev_warn(priv->device, "invalid port speed\n");
1684 			priv->hw->ps = 0;
1685 		}
1686 	}
1687 
1688 	/* Initialize the MAC Core */
1689 	priv->hw->mac->core_init(priv->hw, dev->mtu);
1690 
1691 	/* Initialize MAC RX Queues */
1692 	if (priv->hw->mac->rx_queue_enable)
1693 		stmmac_mac_enable_rx_queues(priv);
1694 
1695 	ret = priv->hw->mac->rx_ipc(priv->hw);
1696 	if (!ret) {
1697 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
1698 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1699 		priv->hw->rx_csum = 0;
1700 	}
1701 
1702 	/* Enable the MAC Rx/Tx */
1703 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1704 		stmmac_dwmac4_set_mac(priv->ioaddr, true);
1705 	else
1706 		stmmac_set_mac(priv->ioaddr, true);
1707 
1708 	/* Set the HW DMA mode and the COE */
1709 	stmmac_dma_operation_mode(priv);
1710 
1711 	stmmac_mmc_setup(priv);
1712 
1713 	if (init_ptp) {
1714 		ret = stmmac_init_ptp(priv);
1715 		if (ret == -EOPNOTSUPP)
1716 			netdev_warn(priv->dev, "PTP not supported by HW\n");
1717 		else if (ret)
1718 			netdev_warn(priv->dev, "PTP init failed\n");
1719 	}
1720 
1721 #ifdef CONFIG_DEBUG_FS
1722 	ret = stmmac_init_fs(dev);
1723 	if (ret < 0)
1724 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1725 			    __func__);
1726 #endif
1727 	/* Start the ball rolling... */
1728 	netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
1729 	priv->hw->dma->start_tx(priv->ioaddr);
1730 	priv->hw->dma->start_rx(priv->ioaddr);
1731 
1732 	/* Dump DMA/MAC registers */
1733 	if (netif_msg_hw(priv)) {
1734 		priv->hw->mac->dump_regs(priv->hw);
1735 		priv->hw->dma->dump_regs(priv->ioaddr);
1736 	}
1737 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1738 
1739 	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1740 		priv->rx_riwt = MAX_DMA_RIWT;
1741 		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1742 	}
1743 
1744 	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1745 		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1746 
1747 	/*  set TX ring length */
1748 	if (priv->hw->dma->set_tx_ring_len)
1749 		priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1750 					       (DMA_TX_SIZE - 1));
1751 	/*  set RX ring length */
1752 	if (priv->hw->dma->set_rx_ring_len)
1753 		priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1754 					       (DMA_RX_SIZE - 1));
1755 	/* Enable TSO */
1756 	if (priv->tso)
1757 		priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1758 
1759 	return 0;
1760 }
1761 
1762 /**
1763  *  stmmac_open - open entry point of the driver
1764  *  @dev : pointer to the device structure.
1765  *  Description:
1766  *  This function is the open entry point of the driver.
1767  *  Return value:
1768  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1769  *  file on failure.
1770  */
1771 static int stmmac_open(struct net_device *dev)
1772 {
1773 	struct stmmac_priv *priv = netdev_priv(dev);
1774 	int ret;
1775 
1776 	stmmac_check_ether_addr(priv);
1777 
1778 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1779 	    priv->hw->pcs != STMMAC_PCS_TBI &&
1780 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
1781 		ret = stmmac_init_phy(dev);
1782 		if (ret) {
1783 			netdev_err(priv->dev,
1784 				   "%s: Cannot attach to PHY (error: %d)\n",
1785 				   __func__, ret);
1786 			return ret;
1787 		}
1788 	}
1789 
1790 	/* Extra statistics */
1791 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1792 	priv->xstats.threshold = tc;
1793 
1794 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1795 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1796 
1797 	ret = alloc_dma_desc_resources(priv);
1798 	if (ret < 0) {
1799 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1800 			   __func__);
1801 		goto dma_desc_error;
1802 	}
1803 
1804 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
1805 	if (ret < 0) {
1806 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1807 			   __func__);
1808 		goto init_error;
1809 	}
1810 
1811 	ret = stmmac_hw_setup(dev, true);
1812 	if (ret < 0) {
1813 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
1814 		goto init_error;
1815 	}
1816 
1817 	stmmac_init_tx_coalesce(priv);
1818 
1819 	if (dev->phydev)
1820 		phy_start(dev->phydev);
1821 
1822 	/* Request the IRQ lines */
1823 	ret = request_irq(dev->irq, stmmac_interrupt,
1824 			  IRQF_SHARED, dev->name, dev);
1825 	if (unlikely(ret < 0)) {
1826 		netdev_err(priv->dev,
1827 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1828 			   __func__, dev->irq, ret);
1829 		goto init_error;
1830 	}
1831 
1832 	/* Request the Wake IRQ in case of another line is used for WoL */
1833 	if (priv->wol_irq != dev->irq) {
1834 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
1835 				  IRQF_SHARED, dev->name, dev);
1836 		if (unlikely(ret < 0)) {
1837 			netdev_err(priv->dev,
1838 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1839 				   __func__, priv->wol_irq, ret);
1840 			goto wolirq_error;
1841 		}
1842 	}
1843 
1844 	/* Request the IRQ lines */
1845 	if (priv->lpi_irq > 0) {
1846 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1847 				  dev->name, dev);
1848 		if (unlikely(ret < 0)) {
1849 			netdev_err(priv->dev,
1850 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1851 				   __func__, priv->lpi_irq, ret);
1852 			goto lpiirq_error;
1853 		}
1854 	}
1855 
1856 	napi_enable(&priv->napi);
1857 	netif_start_queue(dev);
1858 
1859 	return 0;
1860 
1861 lpiirq_error:
1862 	if (priv->wol_irq != dev->irq)
1863 		free_irq(priv->wol_irq, dev);
1864 wolirq_error:
1865 	free_irq(dev->irq, dev);
1866 
1867 init_error:
1868 	free_dma_desc_resources(priv);
1869 dma_desc_error:
1870 	if (dev->phydev)
1871 		phy_disconnect(dev->phydev);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  *  stmmac_release - close entry point of the driver
1878  *  @dev : device pointer.
1879  *  Description:
1880  *  This is the stop entry point of the driver.
1881  */
1882 static int stmmac_release(struct net_device *dev)
1883 {
1884 	struct stmmac_priv *priv = netdev_priv(dev);
1885 
1886 	if (priv->eee_enabled)
1887 		del_timer_sync(&priv->eee_ctrl_timer);
1888 
1889 	/* Stop and disconnect the PHY */
1890 	if (dev->phydev) {
1891 		phy_stop(dev->phydev);
1892 		phy_disconnect(dev->phydev);
1893 	}
1894 
1895 	netif_stop_queue(dev);
1896 
1897 	napi_disable(&priv->napi);
1898 
1899 	del_timer_sync(&priv->txtimer);
1900 
1901 	/* Free the IRQ lines */
1902 	free_irq(dev->irq, dev);
1903 	if (priv->wol_irq != dev->irq)
1904 		free_irq(priv->wol_irq, dev);
1905 	if (priv->lpi_irq > 0)
1906 		free_irq(priv->lpi_irq, dev);
1907 
1908 	/* Stop TX/RX DMA and clear the descriptors */
1909 	priv->hw->dma->stop_tx(priv->ioaddr);
1910 	priv->hw->dma->stop_rx(priv->ioaddr);
1911 
1912 	/* Release and free the Rx/Tx resources */
1913 	free_dma_desc_resources(priv);
1914 
1915 	/* Disable the MAC Rx/Tx */
1916 	stmmac_set_mac(priv->ioaddr, false);
1917 
1918 	netif_carrier_off(dev);
1919 
1920 #ifdef CONFIG_DEBUG_FS
1921 	stmmac_exit_fs(dev);
1922 #endif
1923 
1924 	stmmac_release_ptp(priv);
1925 
1926 	return 0;
1927 }
1928 
1929 /**
1930  *  stmmac_tso_allocator - close entry point of the driver
1931  *  @priv: driver private structure
1932  *  @des: buffer start address
1933  *  @total_len: total length to fill in descriptors
1934  *  @last_segmant: condition for the last descriptor
1935  *  Description:
1936  *  This function fills descriptor and request new descriptors according to
1937  *  buffer length to fill
1938  */
1939 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1940 				 int total_len, bool last_segment)
1941 {
1942 	struct dma_desc *desc;
1943 	int tmp_len;
1944 	u32 buff_size;
1945 
1946 	tmp_len = total_len;
1947 
1948 	while (tmp_len > 0) {
1949 		priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1950 		desc = priv->dma_tx + priv->cur_tx;
1951 
1952 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
1953 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
1954 			    TSO_MAX_BUFF_SIZE : tmp_len;
1955 
1956 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
1957 			0, 1,
1958 			(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
1959 			0, 0);
1960 
1961 		tmp_len -= TSO_MAX_BUFF_SIZE;
1962 	}
1963 }
1964 
1965 /**
1966  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
1967  *  @skb : the socket buffer
1968  *  @dev : device pointer
1969  *  Description: this is the transmit function that is called on TSO frames
1970  *  (support available on GMAC4 and newer chips).
1971  *  Diagram below show the ring programming in case of TSO frames:
1972  *
1973  *  First Descriptor
1974  *   --------
1975  *   | DES0 |---> buffer1 = L2/L3/L4 header
1976  *   | DES1 |---> TCP Payload (can continue on next descr...)
1977  *   | DES2 |---> buffer 1 and 2 len
1978  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
1979  *   --------
1980  *	|
1981  *     ...
1982  *	|
1983  *   --------
1984  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
1985  *   | DES1 | --|
1986  *   | DES2 | --> buffer 1 and 2 len
1987  *   | DES3 |
1988  *   --------
1989  *
1990  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
1991  */
1992 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
1993 {
1994 	u32 pay_len, mss;
1995 	int tmp_pay_len = 0;
1996 	struct stmmac_priv *priv = netdev_priv(dev);
1997 	int nfrags = skb_shinfo(skb)->nr_frags;
1998 	unsigned int first_entry, des;
1999 	struct dma_desc *desc, *first, *mss_desc = NULL;
2000 	u8 proto_hdr_len;
2001 	int i;
2002 
2003 	/* Compute header lengths */
2004 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2005 
2006 	/* Desc availability based on threshold should be enough safe */
2007 	if (unlikely(stmmac_tx_avail(priv) <
2008 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2009 		if (!netif_queue_stopped(dev)) {
2010 			netif_stop_queue(dev);
2011 			/* This is a hard error, log it. */
2012 			netdev_err(priv->dev,
2013 				   "%s: Tx Ring full when queue awake\n",
2014 				   __func__);
2015 		}
2016 		return NETDEV_TX_BUSY;
2017 	}
2018 
2019 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2020 
2021 	mss = skb_shinfo(skb)->gso_size;
2022 
2023 	/* set new MSS value if needed */
2024 	if (mss != priv->mss) {
2025 		mss_desc = priv->dma_tx + priv->cur_tx;
2026 		priv->hw->desc->set_mss(mss_desc, mss);
2027 		priv->mss = mss;
2028 		priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2029 	}
2030 
2031 	if (netif_msg_tx_queued(priv)) {
2032 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2033 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2034 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2035 			skb->data_len);
2036 	}
2037 
2038 	first_entry = priv->cur_tx;
2039 
2040 	desc = priv->dma_tx + first_entry;
2041 	first = desc;
2042 
2043 	/* first descriptor: fill Headers on Buf1 */
2044 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2045 			     DMA_TO_DEVICE);
2046 	if (dma_mapping_error(priv->device, des))
2047 		goto dma_map_err;
2048 
2049 	priv->tx_skbuff_dma[first_entry].buf = des;
2050 	priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2051 	priv->tx_skbuff[first_entry] = skb;
2052 
2053 	first->des0 = cpu_to_le32(des);
2054 
2055 	/* Fill start of payload in buff2 of first descriptor */
2056 	if (pay_len)
2057 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2058 
2059 	/* If needed take extra descriptors to fill the remaining payload */
2060 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2061 
2062 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2063 
2064 	/* Prepare fragments */
2065 	for (i = 0; i < nfrags; i++) {
2066 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2067 
2068 		des = skb_frag_dma_map(priv->device, frag, 0,
2069 				       skb_frag_size(frag),
2070 				       DMA_TO_DEVICE);
2071 
2072 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2073 				     (i == nfrags - 1));
2074 
2075 		priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2076 		priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2077 		priv->tx_skbuff[priv->cur_tx] = NULL;
2078 		priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2079 	}
2080 
2081 	priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2082 
2083 	priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2084 
2085 	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2086 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2087 			  __func__);
2088 		netif_stop_queue(dev);
2089 	}
2090 
2091 	dev->stats.tx_bytes += skb->len;
2092 	priv->xstats.tx_tso_frames++;
2093 	priv->xstats.tx_tso_nfrags += nfrags;
2094 
2095 	/* Manage tx mitigation */
2096 	priv->tx_count_frames += nfrags + 1;
2097 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2098 		mod_timer(&priv->txtimer,
2099 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2100 	} else {
2101 		priv->tx_count_frames = 0;
2102 		priv->hw->desc->set_tx_ic(desc);
2103 		priv->xstats.tx_set_ic_bit++;
2104 	}
2105 
2106 	if (!priv->hwts_tx_en)
2107 		skb_tx_timestamp(skb);
2108 
2109 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2110 		     priv->hwts_tx_en)) {
2111 		/* declare that device is doing timestamping */
2112 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2113 		priv->hw->desc->enable_tx_timestamp(first);
2114 	}
2115 
2116 	/* Complete the first descriptor before granting the DMA */
2117 	priv->hw->desc->prepare_tso_tx_desc(first, 1,
2118 			proto_hdr_len,
2119 			pay_len,
2120 			1, priv->tx_skbuff_dma[first_entry].last_segment,
2121 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2122 
2123 	/* If context desc is used to change MSS */
2124 	if (mss_desc)
2125 		priv->hw->desc->set_tx_owner(mss_desc);
2126 
2127 	/* The own bit must be the latest setting done when prepare the
2128 	 * descriptor and then barrier is needed to make sure that
2129 	 * all is coherent before granting the DMA engine.
2130 	 */
2131 	dma_wmb();
2132 
2133 	if (netif_msg_pktdata(priv)) {
2134 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2135 			__func__, priv->cur_tx, priv->dirty_tx, first_entry,
2136 			priv->cur_tx, first, nfrags);
2137 
2138 		priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2139 					     0);
2140 
2141 		pr_info(">>> frame to be transmitted: ");
2142 		print_pkt(skb->data, skb_headlen(skb));
2143 	}
2144 
2145 	netdev_sent_queue(dev, skb->len);
2146 
2147 	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2148 				       STMMAC_CHAN0);
2149 
2150 	return NETDEV_TX_OK;
2151 
2152 dma_map_err:
2153 	dev_err(priv->device, "Tx dma map failed\n");
2154 	dev_kfree_skb(skb);
2155 	priv->dev->stats.tx_dropped++;
2156 	return NETDEV_TX_OK;
2157 }
2158 
2159 /**
2160  *  stmmac_xmit - Tx entry point of the driver
2161  *  @skb : the socket buffer
2162  *  @dev : device pointer
2163  *  Description : this is the tx entry point of the driver.
2164  *  It programs the chain or the ring and supports oversized frames
2165  *  and SG feature.
2166  */
2167 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2168 {
2169 	struct stmmac_priv *priv = netdev_priv(dev);
2170 	unsigned int nopaged_len = skb_headlen(skb);
2171 	int i, csum_insertion = 0, is_jumbo = 0;
2172 	int nfrags = skb_shinfo(skb)->nr_frags;
2173 	unsigned int entry, first_entry;
2174 	struct dma_desc *desc, *first;
2175 	unsigned int enh_desc;
2176 	unsigned int des;
2177 
2178 	/* Manage oversized TCP frames for GMAC4 device */
2179 	if (skb_is_gso(skb) && priv->tso) {
2180 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2181 			return stmmac_tso_xmit(skb, dev);
2182 	}
2183 
2184 	if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2185 		if (!netif_queue_stopped(dev)) {
2186 			netif_stop_queue(dev);
2187 			/* This is a hard error, log it. */
2188 			netdev_err(priv->dev,
2189 				   "%s: Tx Ring full when queue awake\n",
2190 				   __func__);
2191 		}
2192 		return NETDEV_TX_BUSY;
2193 	}
2194 
2195 	if (priv->tx_path_in_lpi_mode)
2196 		stmmac_disable_eee_mode(priv);
2197 
2198 	entry = priv->cur_tx;
2199 	first_entry = entry;
2200 
2201 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2202 
2203 	if (likely(priv->extend_desc))
2204 		desc = (struct dma_desc *)(priv->dma_etx + entry);
2205 	else
2206 		desc = priv->dma_tx + entry;
2207 
2208 	first = desc;
2209 
2210 	priv->tx_skbuff[first_entry] = skb;
2211 
2212 	enh_desc = priv->plat->enh_desc;
2213 	/* To program the descriptors according to the size of the frame */
2214 	if (enh_desc)
2215 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2216 
2217 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2218 					 DWMAC_CORE_4_00)) {
2219 		entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2220 		if (unlikely(entry < 0))
2221 			goto dma_map_err;
2222 	}
2223 
2224 	for (i = 0; i < nfrags; i++) {
2225 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2226 		int len = skb_frag_size(frag);
2227 		bool last_segment = (i == (nfrags - 1));
2228 
2229 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2230 
2231 		if (likely(priv->extend_desc))
2232 			desc = (struct dma_desc *)(priv->dma_etx + entry);
2233 		else
2234 			desc = priv->dma_tx + entry;
2235 
2236 		des = skb_frag_dma_map(priv->device, frag, 0, len,
2237 				       DMA_TO_DEVICE);
2238 		if (dma_mapping_error(priv->device, des))
2239 			goto dma_map_err; /* should reuse desc w/o issues */
2240 
2241 		priv->tx_skbuff[entry] = NULL;
2242 
2243 		priv->tx_skbuff_dma[entry].buf = des;
2244 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2245 			desc->des0 = cpu_to_le32(des);
2246 		else
2247 			desc->des2 = cpu_to_le32(des);
2248 
2249 		priv->tx_skbuff_dma[entry].map_as_page = true;
2250 		priv->tx_skbuff_dma[entry].len = len;
2251 		priv->tx_skbuff_dma[entry].last_segment = last_segment;
2252 
2253 		/* Prepare the descriptor and set the own bit too */
2254 		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2255 						priv->mode, 1, last_segment);
2256 	}
2257 
2258 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2259 
2260 	priv->cur_tx = entry;
2261 
2262 	if (netif_msg_pktdata(priv)) {
2263 		void *tx_head;
2264 
2265 		netdev_dbg(priv->dev,
2266 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2267 			   __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2268 			   entry, first, nfrags);
2269 
2270 		if (priv->extend_desc)
2271 			tx_head = (void *)priv->dma_etx;
2272 		else
2273 			tx_head = (void *)priv->dma_tx;
2274 
2275 		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2276 
2277 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2278 		print_pkt(skb->data, skb->len);
2279 	}
2280 
2281 	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2282 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2283 			  __func__);
2284 		netif_stop_queue(dev);
2285 	}
2286 
2287 	dev->stats.tx_bytes += skb->len;
2288 
2289 	/* According to the coalesce parameter the IC bit for the latest
2290 	 * segment is reset and the timer re-started to clean the tx status.
2291 	 * This approach takes care about the fragments: desc is the first
2292 	 * element in case of no SG.
2293 	 */
2294 	priv->tx_count_frames += nfrags + 1;
2295 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2296 		mod_timer(&priv->txtimer,
2297 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2298 	} else {
2299 		priv->tx_count_frames = 0;
2300 		priv->hw->desc->set_tx_ic(desc);
2301 		priv->xstats.tx_set_ic_bit++;
2302 	}
2303 
2304 	if (!priv->hwts_tx_en)
2305 		skb_tx_timestamp(skb);
2306 
2307 	/* Ready to fill the first descriptor and set the OWN bit w/o any
2308 	 * problems because all the descriptors are actually ready to be
2309 	 * passed to the DMA engine.
2310 	 */
2311 	if (likely(!is_jumbo)) {
2312 		bool last_segment = (nfrags == 0);
2313 
2314 		des = dma_map_single(priv->device, skb->data,
2315 				     nopaged_len, DMA_TO_DEVICE);
2316 		if (dma_mapping_error(priv->device, des))
2317 			goto dma_map_err;
2318 
2319 		priv->tx_skbuff_dma[first_entry].buf = des;
2320 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2321 			first->des0 = cpu_to_le32(des);
2322 		else
2323 			first->des2 = cpu_to_le32(des);
2324 
2325 		priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2326 		priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2327 
2328 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2329 			     priv->hwts_tx_en)) {
2330 			/* declare that device is doing timestamping */
2331 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2332 			priv->hw->desc->enable_tx_timestamp(first);
2333 		}
2334 
2335 		/* Prepare the first descriptor setting the OWN bit too */
2336 		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2337 						csum_insertion, priv->mode, 1,
2338 						last_segment);
2339 
2340 		/* The own bit must be the latest setting done when prepare the
2341 		 * descriptor and then barrier is needed to make sure that
2342 		 * all is coherent before granting the DMA engine.
2343 		 */
2344 		dma_wmb();
2345 	}
2346 
2347 	netdev_sent_queue(dev, skb->len);
2348 
2349 	if (priv->synopsys_id < DWMAC_CORE_4_00)
2350 		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2351 	else
2352 		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2353 					       STMMAC_CHAN0);
2354 
2355 	return NETDEV_TX_OK;
2356 
2357 dma_map_err:
2358 	netdev_err(priv->dev, "Tx DMA map failed\n");
2359 	dev_kfree_skb(skb);
2360 	priv->dev->stats.tx_dropped++;
2361 	return NETDEV_TX_OK;
2362 }
2363 
2364 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2365 {
2366 	struct ethhdr *ehdr;
2367 	u16 vlanid;
2368 
2369 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2370 	    NETIF_F_HW_VLAN_CTAG_RX &&
2371 	    !__vlan_get_tag(skb, &vlanid)) {
2372 		/* pop the vlan tag */
2373 		ehdr = (struct ethhdr *)skb->data;
2374 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2375 		skb_pull(skb, VLAN_HLEN);
2376 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2377 	}
2378 }
2379 
2380 
2381 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2382 {
2383 	if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2384 		return 0;
2385 
2386 	return 1;
2387 }
2388 
2389 /**
2390  * stmmac_rx_refill - refill used skb preallocated buffers
2391  * @priv: driver private structure
2392  * Description : this is to reallocate the skb for the reception process
2393  * that is based on zero-copy.
2394  */
2395 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2396 {
2397 	int bfsize = priv->dma_buf_sz;
2398 	unsigned int entry = priv->dirty_rx;
2399 	int dirty = stmmac_rx_dirty(priv);
2400 
2401 	while (dirty-- > 0) {
2402 		struct dma_desc *p;
2403 
2404 		if (priv->extend_desc)
2405 			p = (struct dma_desc *)(priv->dma_erx + entry);
2406 		else
2407 			p = priv->dma_rx + entry;
2408 
2409 		if (likely(priv->rx_skbuff[entry] == NULL)) {
2410 			struct sk_buff *skb;
2411 
2412 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2413 			if (unlikely(!skb)) {
2414 				/* so for a while no zero-copy! */
2415 				priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2416 				if (unlikely(net_ratelimit()))
2417 					dev_err(priv->device,
2418 						"fail to alloc skb entry %d\n",
2419 						entry);
2420 				break;
2421 			}
2422 
2423 			priv->rx_skbuff[entry] = skb;
2424 			priv->rx_skbuff_dma[entry] =
2425 			    dma_map_single(priv->device, skb->data, bfsize,
2426 					   DMA_FROM_DEVICE);
2427 			if (dma_mapping_error(priv->device,
2428 					      priv->rx_skbuff_dma[entry])) {
2429 				netdev_err(priv->dev, "Rx DMA map failed\n");
2430 				dev_kfree_skb(skb);
2431 				break;
2432 			}
2433 
2434 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2435 				p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2436 				p->des1 = 0;
2437 			} else {
2438 				p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2439 			}
2440 			if (priv->hw->mode->refill_desc3)
2441 				priv->hw->mode->refill_desc3(priv, p);
2442 
2443 			if (priv->rx_zeroc_thresh > 0)
2444 				priv->rx_zeroc_thresh--;
2445 
2446 			netif_dbg(priv, rx_status, priv->dev,
2447 				  "refill entry #%d\n", entry);
2448 		}
2449 		dma_wmb();
2450 
2451 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2452 			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2453 		else
2454 			priv->hw->desc->set_rx_owner(p);
2455 
2456 		dma_wmb();
2457 
2458 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2459 	}
2460 	priv->dirty_rx = entry;
2461 }
2462 
2463 /**
2464  * stmmac_rx - manage the receive process
2465  * @priv: driver private structure
2466  * @limit: napi bugget.
2467  * Description :  this the function called by the napi poll method.
2468  * It gets all the frames inside the ring.
2469  */
2470 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2471 {
2472 	unsigned int entry = priv->cur_rx;
2473 	unsigned int next_entry;
2474 	unsigned int count = 0;
2475 	int coe = priv->hw->rx_csum;
2476 
2477 	if (netif_msg_rx_status(priv)) {
2478 		void *rx_head;
2479 
2480 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2481 		if (priv->extend_desc)
2482 			rx_head = (void *)priv->dma_erx;
2483 		else
2484 			rx_head = (void *)priv->dma_rx;
2485 
2486 		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2487 	}
2488 	while (count < limit) {
2489 		int status;
2490 		struct dma_desc *p;
2491 		struct dma_desc *np;
2492 
2493 		if (priv->extend_desc)
2494 			p = (struct dma_desc *)(priv->dma_erx + entry);
2495 		else
2496 			p = priv->dma_rx + entry;
2497 
2498 		/* read the status of the incoming frame */
2499 		status = priv->hw->desc->rx_status(&priv->dev->stats,
2500 						   &priv->xstats, p);
2501 		/* check if managed by the DMA otherwise go ahead */
2502 		if (unlikely(status & dma_own))
2503 			break;
2504 
2505 		count++;
2506 
2507 		priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2508 		next_entry = priv->cur_rx;
2509 
2510 		if (priv->extend_desc)
2511 			np = (struct dma_desc *)(priv->dma_erx + next_entry);
2512 		else
2513 			np = priv->dma_rx + next_entry;
2514 
2515 		prefetch(np);
2516 
2517 		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2518 			priv->hw->desc->rx_extended_status(&priv->dev->stats,
2519 							   &priv->xstats,
2520 							   priv->dma_erx +
2521 							   entry);
2522 		if (unlikely(status == discard_frame)) {
2523 			priv->dev->stats.rx_errors++;
2524 			if (priv->hwts_rx_en && !priv->extend_desc) {
2525 				/* DESC2 & DESC3 will be overwritten by device
2526 				 * with timestamp value, hence reinitialize
2527 				 * them in stmmac_rx_refill() function so that
2528 				 * device can reuse it.
2529 				 */
2530 				priv->rx_skbuff[entry] = NULL;
2531 				dma_unmap_single(priv->device,
2532 						 priv->rx_skbuff_dma[entry],
2533 						 priv->dma_buf_sz,
2534 						 DMA_FROM_DEVICE);
2535 			}
2536 		} else {
2537 			struct sk_buff *skb;
2538 			int frame_len;
2539 			unsigned int des;
2540 
2541 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2542 				des = le32_to_cpu(p->des0);
2543 			else
2544 				des = le32_to_cpu(p->des2);
2545 
2546 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2547 
2548 			/*  If frame length is greater than skb buffer size
2549 			 *  (preallocated during init) then the packet is
2550 			 *  ignored
2551 			 */
2552 			if (frame_len > priv->dma_buf_sz) {
2553 				netdev_err(priv->dev,
2554 					   "len %d larger than size (%d)\n",
2555 					   frame_len, priv->dma_buf_sz);
2556 				priv->dev->stats.rx_length_errors++;
2557 				break;
2558 			}
2559 
2560 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2561 			 * Type frames (LLC/LLC-SNAP)
2562 			 */
2563 			if (unlikely(status != llc_snap))
2564 				frame_len -= ETH_FCS_LEN;
2565 
2566 			if (netif_msg_rx_status(priv)) {
2567 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2568 					   p, entry, des);
2569 				if (frame_len > ETH_FRAME_LEN)
2570 					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2571 						   frame_len, status);
2572 			}
2573 
2574 			/* The zero-copy is always used for all the sizes
2575 			 * in case of GMAC4 because it needs
2576 			 * to refill the used descriptors, always.
2577 			 */
2578 			if (unlikely(!priv->plat->has_gmac4 &&
2579 				     ((frame_len < priv->rx_copybreak) ||
2580 				     stmmac_rx_threshold_count(priv)))) {
2581 				skb = netdev_alloc_skb_ip_align(priv->dev,
2582 								frame_len);
2583 				if (unlikely(!skb)) {
2584 					if (net_ratelimit())
2585 						dev_warn(priv->device,
2586 							 "packet dropped\n");
2587 					priv->dev->stats.rx_dropped++;
2588 					break;
2589 				}
2590 
2591 				dma_sync_single_for_cpu(priv->device,
2592 							priv->rx_skbuff_dma
2593 							[entry], frame_len,
2594 							DMA_FROM_DEVICE);
2595 				skb_copy_to_linear_data(skb,
2596 							priv->
2597 							rx_skbuff[entry]->data,
2598 							frame_len);
2599 
2600 				skb_put(skb, frame_len);
2601 				dma_sync_single_for_device(priv->device,
2602 							   priv->rx_skbuff_dma
2603 							   [entry], frame_len,
2604 							   DMA_FROM_DEVICE);
2605 			} else {
2606 				skb = priv->rx_skbuff[entry];
2607 				if (unlikely(!skb)) {
2608 					netdev_err(priv->dev,
2609 						   "%s: Inconsistent Rx chain\n",
2610 						   priv->dev->name);
2611 					priv->dev->stats.rx_dropped++;
2612 					break;
2613 				}
2614 				prefetch(skb->data - NET_IP_ALIGN);
2615 				priv->rx_skbuff[entry] = NULL;
2616 				priv->rx_zeroc_thresh++;
2617 
2618 				skb_put(skb, frame_len);
2619 				dma_unmap_single(priv->device,
2620 						 priv->rx_skbuff_dma[entry],
2621 						 priv->dma_buf_sz,
2622 						 DMA_FROM_DEVICE);
2623 			}
2624 
2625 			if (netif_msg_pktdata(priv)) {
2626 				netdev_dbg(priv->dev, "frame received (%dbytes)",
2627 					   frame_len);
2628 				print_pkt(skb->data, frame_len);
2629 			}
2630 
2631 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
2632 
2633 			stmmac_rx_vlan(priv->dev, skb);
2634 
2635 			skb->protocol = eth_type_trans(skb, priv->dev);
2636 
2637 			if (unlikely(!coe))
2638 				skb_checksum_none_assert(skb);
2639 			else
2640 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2641 
2642 			napi_gro_receive(&priv->napi, skb);
2643 
2644 			priv->dev->stats.rx_packets++;
2645 			priv->dev->stats.rx_bytes += frame_len;
2646 		}
2647 		entry = next_entry;
2648 	}
2649 
2650 	stmmac_rx_refill(priv);
2651 
2652 	priv->xstats.rx_pkt_n += count;
2653 
2654 	return count;
2655 }
2656 
2657 /**
2658  *  stmmac_poll - stmmac poll method (NAPI)
2659  *  @napi : pointer to the napi structure.
2660  *  @budget : maximum number of packets that the current CPU can receive from
2661  *	      all interfaces.
2662  *  Description :
2663  *  To look at the incoming frames and clear the tx resources.
2664  */
2665 static int stmmac_poll(struct napi_struct *napi, int budget)
2666 {
2667 	struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2668 	int work_done = 0;
2669 
2670 	priv->xstats.napi_poll++;
2671 	stmmac_tx_clean(priv);
2672 
2673 	work_done = stmmac_rx(priv, budget);
2674 	if (work_done < budget) {
2675 		napi_complete_done(napi, work_done);
2676 		stmmac_enable_dma_irq(priv);
2677 	}
2678 	return work_done;
2679 }
2680 
2681 /**
2682  *  stmmac_tx_timeout
2683  *  @dev : Pointer to net device structure
2684  *  Description: this function is called when a packet transmission fails to
2685  *   complete within a reasonable time. The driver will mark the error in the
2686  *   netdev structure and arrange for the device to be reset to a sane state
2687  *   in order to transmit a new packet.
2688  */
2689 static void stmmac_tx_timeout(struct net_device *dev)
2690 {
2691 	struct stmmac_priv *priv = netdev_priv(dev);
2692 
2693 	/* Clear Tx resources and restart transmitting again */
2694 	stmmac_tx_err(priv);
2695 }
2696 
2697 /**
2698  *  stmmac_set_rx_mode - entry point for multicast addressing
2699  *  @dev : pointer to the device structure
2700  *  Description:
2701  *  This function is a driver entry point which gets called by the kernel
2702  *  whenever multicast addresses must be enabled/disabled.
2703  *  Return value:
2704  *  void.
2705  */
2706 static void stmmac_set_rx_mode(struct net_device *dev)
2707 {
2708 	struct stmmac_priv *priv = netdev_priv(dev);
2709 
2710 	priv->hw->mac->set_filter(priv->hw, dev);
2711 }
2712 
2713 /**
2714  *  stmmac_change_mtu - entry point to change MTU size for the device.
2715  *  @dev : device pointer.
2716  *  @new_mtu : the new MTU size for the device.
2717  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2718  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2719  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2720  *  Return value:
2721  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2722  *  file on failure.
2723  */
2724 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2725 {
2726 	struct stmmac_priv *priv = netdev_priv(dev);
2727 
2728 	if (netif_running(dev)) {
2729 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
2730 		return -EBUSY;
2731 	}
2732 
2733 	dev->mtu = new_mtu;
2734 
2735 	netdev_update_features(dev);
2736 
2737 	return 0;
2738 }
2739 
2740 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2741 					     netdev_features_t features)
2742 {
2743 	struct stmmac_priv *priv = netdev_priv(dev);
2744 
2745 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2746 		features &= ~NETIF_F_RXCSUM;
2747 
2748 	if (!priv->plat->tx_coe)
2749 		features &= ~NETIF_F_CSUM_MASK;
2750 
2751 	/* Some GMAC devices have a bugged Jumbo frame support that
2752 	 * needs to have the Tx COE disabled for oversized frames
2753 	 * (due to limited buffer sizes). In this case we disable
2754 	 * the TX csum insertion in the TDES and not use SF.
2755 	 */
2756 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2757 		features &= ~NETIF_F_CSUM_MASK;
2758 
2759 	/* Disable tso if asked by ethtool */
2760 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2761 		if (features & NETIF_F_TSO)
2762 			priv->tso = true;
2763 		else
2764 			priv->tso = false;
2765 	}
2766 
2767 	return features;
2768 }
2769 
2770 static int stmmac_set_features(struct net_device *netdev,
2771 			       netdev_features_t features)
2772 {
2773 	struct stmmac_priv *priv = netdev_priv(netdev);
2774 
2775 	/* Keep the COE Type in case of csum is supporting */
2776 	if (features & NETIF_F_RXCSUM)
2777 		priv->hw->rx_csum = priv->plat->rx_coe;
2778 	else
2779 		priv->hw->rx_csum = 0;
2780 	/* No check needed because rx_coe has been set before and it will be
2781 	 * fixed in case of issue.
2782 	 */
2783 	priv->hw->mac->rx_ipc(priv->hw);
2784 
2785 	return 0;
2786 }
2787 
2788 /**
2789  *  stmmac_interrupt - main ISR
2790  *  @irq: interrupt number.
2791  *  @dev_id: to pass the net device pointer.
2792  *  Description: this is the main driver interrupt service routine.
2793  *  It can call:
2794  *  o DMA service routine (to manage incoming frame reception and transmission
2795  *    status)
2796  *  o Core interrupts to manage: remote wake-up, management counter, LPI
2797  *    interrupts.
2798  */
2799 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2800 {
2801 	struct net_device *dev = (struct net_device *)dev_id;
2802 	struct stmmac_priv *priv = netdev_priv(dev);
2803 
2804 	if (priv->irq_wake)
2805 		pm_wakeup_event(priv->device, 0);
2806 
2807 	if (unlikely(!dev)) {
2808 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2809 		return IRQ_NONE;
2810 	}
2811 
2812 	/* To handle GMAC own interrupts */
2813 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2814 		int status = priv->hw->mac->host_irq_status(priv->hw,
2815 							    &priv->xstats);
2816 		if (unlikely(status)) {
2817 			/* For LPI we need to save the tx status */
2818 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2819 				priv->tx_path_in_lpi_mode = true;
2820 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2821 				priv->tx_path_in_lpi_mode = false;
2822 			if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2823 				priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2824 							priv->rx_tail_addr,
2825 							STMMAC_CHAN0);
2826 		}
2827 
2828 		/* PCS link status */
2829 		if (priv->hw->pcs) {
2830 			if (priv->xstats.pcs_link)
2831 				netif_carrier_on(dev);
2832 			else
2833 				netif_carrier_off(dev);
2834 		}
2835 	}
2836 
2837 	/* To handle DMA interrupts */
2838 	stmmac_dma_interrupt(priv);
2839 
2840 	return IRQ_HANDLED;
2841 }
2842 
2843 #ifdef CONFIG_NET_POLL_CONTROLLER
2844 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2845  * to allow network I/O with interrupts disabled.
2846  */
2847 static void stmmac_poll_controller(struct net_device *dev)
2848 {
2849 	disable_irq(dev->irq);
2850 	stmmac_interrupt(dev->irq, dev);
2851 	enable_irq(dev->irq);
2852 }
2853 #endif
2854 
2855 /**
2856  *  stmmac_ioctl - Entry point for the Ioctl
2857  *  @dev: Device pointer.
2858  *  @rq: An IOCTL specefic structure, that can contain a pointer to
2859  *  a proprietary structure used to pass information to the driver.
2860  *  @cmd: IOCTL command
2861  *  Description:
2862  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2863  */
2864 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2865 {
2866 	int ret = -EOPNOTSUPP;
2867 
2868 	if (!netif_running(dev))
2869 		return -EINVAL;
2870 
2871 	switch (cmd) {
2872 	case SIOCGMIIPHY:
2873 	case SIOCGMIIREG:
2874 	case SIOCSMIIREG:
2875 		if (!dev->phydev)
2876 			return -EINVAL;
2877 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
2878 		break;
2879 	case SIOCSHWTSTAMP:
2880 		ret = stmmac_hwtstamp_ioctl(dev, rq);
2881 		break;
2882 	default:
2883 		break;
2884 	}
2885 
2886 	return ret;
2887 }
2888 
2889 #ifdef CONFIG_DEBUG_FS
2890 static struct dentry *stmmac_fs_dir;
2891 
2892 static void sysfs_display_ring(void *head, int size, int extend_desc,
2893 			       struct seq_file *seq)
2894 {
2895 	int i;
2896 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2897 	struct dma_desc *p = (struct dma_desc *)head;
2898 
2899 	for (i = 0; i < size; i++) {
2900 		if (extend_desc) {
2901 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2902 				   i, (unsigned int)virt_to_phys(ep),
2903 				   le32_to_cpu(ep->basic.des0),
2904 				   le32_to_cpu(ep->basic.des1),
2905 				   le32_to_cpu(ep->basic.des2),
2906 				   le32_to_cpu(ep->basic.des3));
2907 			ep++;
2908 		} else {
2909 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2910 				   i, (unsigned int)virt_to_phys(ep),
2911 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
2912 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
2913 			p++;
2914 		}
2915 		seq_printf(seq, "\n");
2916 	}
2917 }
2918 
2919 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2920 {
2921 	struct net_device *dev = seq->private;
2922 	struct stmmac_priv *priv = netdev_priv(dev);
2923 
2924 	if (priv->extend_desc) {
2925 		seq_printf(seq, "Extended RX descriptor ring:\n");
2926 		sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
2927 		seq_printf(seq, "Extended TX descriptor ring:\n");
2928 		sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
2929 	} else {
2930 		seq_printf(seq, "RX descriptor ring:\n");
2931 		sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
2932 		seq_printf(seq, "TX descriptor ring:\n");
2933 		sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
2934 	}
2935 
2936 	return 0;
2937 }
2938 
2939 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2940 {
2941 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2942 }
2943 
2944 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
2945 
2946 static const struct file_operations stmmac_rings_status_fops = {
2947 	.owner = THIS_MODULE,
2948 	.open = stmmac_sysfs_ring_open,
2949 	.read = seq_read,
2950 	.llseek = seq_lseek,
2951 	.release = single_release,
2952 };
2953 
2954 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2955 {
2956 	struct net_device *dev = seq->private;
2957 	struct stmmac_priv *priv = netdev_priv(dev);
2958 
2959 	if (!priv->hw_cap_support) {
2960 		seq_printf(seq, "DMA HW features not supported\n");
2961 		return 0;
2962 	}
2963 
2964 	seq_printf(seq, "==============================\n");
2965 	seq_printf(seq, "\tDMA HW features\n");
2966 	seq_printf(seq, "==============================\n");
2967 
2968 	seq_printf(seq, "\t10/100 Mbps: %s\n",
2969 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2970 	seq_printf(seq, "\t1000 Mbps: %s\n",
2971 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
2972 	seq_printf(seq, "\tHalf duplex: %s\n",
2973 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
2974 	seq_printf(seq, "\tHash Filter: %s\n",
2975 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
2976 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2977 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
2978 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
2979 		   (priv->dma_cap.pcs) ? "Y" : "N");
2980 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2981 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
2982 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
2983 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2984 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
2985 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2986 	seq_printf(seq, "\tRMON module: %s\n",
2987 		   (priv->dma_cap.rmon) ? "Y" : "N");
2988 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2989 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
2990 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
2991 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
2992 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
2993 		   (priv->dma_cap.eee) ? "Y" : "N");
2994 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2995 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2996 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
2997 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2998 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
2999 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3000 	} else {
3001 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3002 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3003 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3004 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3005 	}
3006 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3007 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3008 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3009 		   priv->dma_cap.number_rx_channel);
3010 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3011 		   priv->dma_cap.number_tx_channel);
3012 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3013 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3014 
3015 	return 0;
3016 }
3017 
3018 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3019 {
3020 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3021 }
3022 
3023 static const struct file_operations stmmac_dma_cap_fops = {
3024 	.owner = THIS_MODULE,
3025 	.open = stmmac_sysfs_dma_cap_open,
3026 	.read = seq_read,
3027 	.llseek = seq_lseek,
3028 	.release = single_release,
3029 };
3030 
3031 static int stmmac_init_fs(struct net_device *dev)
3032 {
3033 	struct stmmac_priv *priv = netdev_priv(dev);
3034 
3035 	/* Create per netdev entries */
3036 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3037 
3038 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3039 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3040 
3041 		return -ENOMEM;
3042 	}
3043 
3044 	/* Entry to report DMA RX/TX rings */
3045 	priv->dbgfs_rings_status =
3046 		debugfs_create_file("descriptors_status", S_IRUGO,
3047 				    priv->dbgfs_dir, dev,
3048 				    &stmmac_rings_status_fops);
3049 
3050 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3051 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3052 		debugfs_remove_recursive(priv->dbgfs_dir);
3053 
3054 		return -ENOMEM;
3055 	}
3056 
3057 	/* Entry to report the DMA HW features */
3058 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3059 					    priv->dbgfs_dir,
3060 					    dev, &stmmac_dma_cap_fops);
3061 
3062 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3063 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3064 		debugfs_remove_recursive(priv->dbgfs_dir);
3065 
3066 		return -ENOMEM;
3067 	}
3068 
3069 	return 0;
3070 }
3071 
3072 static void stmmac_exit_fs(struct net_device *dev)
3073 {
3074 	struct stmmac_priv *priv = netdev_priv(dev);
3075 
3076 	debugfs_remove_recursive(priv->dbgfs_dir);
3077 }
3078 #endif /* CONFIG_DEBUG_FS */
3079 
3080 static const struct net_device_ops stmmac_netdev_ops = {
3081 	.ndo_open = stmmac_open,
3082 	.ndo_start_xmit = stmmac_xmit,
3083 	.ndo_stop = stmmac_release,
3084 	.ndo_change_mtu = stmmac_change_mtu,
3085 	.ndo_fix_features = stmmac_fix_features,
3086 	.ndo_set_features = stmmac_set_features,
3087 	.ndo_set_rx_mode = stmmac_set_rx_mode,
3088 	.ndo_tx_timeout = stmmac_tx_timeout,
3089 	.ndo_do_ioctl = stmmac_ioctl,
3090 #ifdef CONFIG_NET_POLL_CONTROLLER
3091 	.ndo_poll_controller = stmmac_poll_controller,
3092 #endif
3093 	.ndo_set_mac_address = eth_mac_addr,
3094 };
3095 
3096 /**
3097  *  stmmac_hw_init - Init the MAC device
3098  *  @priv: driver private structure
3099  *  Description: this function is to configure the MAC device according to
3100  *  some platform parameters or the HW capability register. It prepares the
3101  *  driver to use either ring or chain modes and to setup either enhanced or
3102  *  normal descriptors.
3103  */
3104 static int stmmac_hw_init(struct stmmac_priv *priv)
3105 {
3106 	struct mac_device_info *mac;
3107 
3108 	/* Identify the MAC HW device */
3109 	if (priv->plat->has_gmac) {
3110 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3111 		mac = dwmac1000_setup(priv->ioaddr,
3112 				      priv->plat->multicast_filter_bins,
3113 				      priv->plat->unicast_filter_entries,
3114 				      &priv->synopsys_id);
3115 	} else if (priv->plat->has_gmac4) {
3116 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3117 		mac = dwmac4_setup(priv->ioaddr,
3118 				   priv->plat->multicast_filter_bins,
3119 				   priv->plat->unicast_filter_entries,
3120 				   &priv->synopsys_id);
3121 	} else {
3122 		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3123 	}
3124 	if (!mac)
3125 		return -ENOMEM;
3126 
3127 	priv->hw = mac;
3128 
3129 	/* To use the chained or ring mode */
3130 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3131 		priv->hw->mode = &dwmac4_ring_mode_ops;
3132 	} else {
3133 		if (chain_mode) {
3134 			priv->hw->mode = &chain_mode_ops;
3135 			dev_info(priv->device, "Chain mode enabled\n");
3136 			priv->mode = STMMAC_CHAIN_MODE;
3137 		} else {
3138 			priv->hw->mode = &ring_mode_ops;
3139 			dev_info(priv->device, "Ring mode enabled\n");
3140 			priv->mode = STMMAC_RING_MODE;
3141 		}
3142 	}
3143 
3144 	/* Get the HW capability (new GMAC newer than 3.50a) */
3145 	priv->hw_cap_support = stmmac_get_hw_features(priv);
3146 	if (priv->hw_cap_support) {
3147 		dev_info(priv->device, "DMA HW capability register supported\n");
3148 
3149 		/* We can override some gmac/dma configuration fields: e.g.
3150 		 * enh_desc, tx_coe (e.g. that are passed through the
3151 		 * platform) with the values from the HW capability
3152 		 * register (if supported).
3153 		 */
3154 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
3155 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3156 		priv->hw->pmt = priv->plat->pmt;
3157 
3158 		/* TXCOE doesn't work in thresh DMA mode */
3159 		if (priv->plat->force_thresh_dma_mode)
3160 			priv->plat->tx_coe = 0;
3161 		else
3162 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
3163 
3164 		/* In case of GMAC4 rx_coe is from HW cap register. */
3165 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
3166 
3167 		if (priv->dma_cap.rx_coe_type2)
3168 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3169 		else if (priv->dma_cap.rx_coe_type1)
3170 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3171 
3172 	} else {
3173 		dev_info(priv->device, "No HW DMA feature register supported\n");
3174 	}
3175 
3176 	/* To use alternate (extended), normal or GMAC4 descriptor structures */
3177 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
3178 		priv->hw->desc = &dwmac4_desc_ops;
3179 	else
3180 		stmmac_selec_desc_mode(priv);
3181 
3182 	if (priv->plat->rx_coe) {
3183 		priv->hw->rx_csum = priv->plat->rx_coe;
3184 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3185 		if (priv->synopsys_id < DWMAC_CORE_4_00)
3186 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3187 	}
3188 	if (priv->plat->tx_coe)
3189 		dev_info(priv->device, "TX Checksum insertion supported\n");
3190 
3191 	if (priv->plat->pmt) {
3192 		dev_info(priv->device, "Wake-Up On Lan supported\n");
3193 		device_set_wakeup_capable(priv->device, 1);
3194 	}
3195 
3196 	if (priv->dma_cap.tsoen)
3197 		dev_info(priv->device, "TSO supported\n");
3198 
3199 	return 0;
3200 }
3201 
3202 /**
3203  * stmmac_dvr_probe
3204  * @device: device pointer
3205  * @plat_dat: platform data pointer
3206  * @res: stmmac resource pointer
3207  * Description: this is the main probe function used to
3208  * call the alloc_etherdev, allocate the priv structure.
3209  * Return:
3210  * returns 0 on success, otherwise errno.
3211  */
3212 int stmmac_dvr_probe(struct device *device,
3213 		     struct plat_stmmacenet_data *plat_dat,
3214 		     struct stmmac_resources *res)
3215 {
3216 	int ret = 0;
3217 	struct net_device *ndev = NULL;
3218 	struct stmmac_priv *priv;
3219 
3220 	ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3221 	if (!ndev)
3222 		return -ENOMEM;
3223 
3224 	SET_NETDEV_DEV(ndev, device);
3225 
3226 	priv = netdev_priv(ndev);
3227 	priv->device = device;
3228 	priv->dev = ndev;
3229 
3230 	stmmac_set_ethtool_ops(ndev);
3231 	priv->pause = pause;
3232 	priv->plat = plat_dat;
3233 	priv->ioaddr = res->addr;
3234 	priv->dev->base_addr = (unsigned long)res->addr;
3235 
3236 	priv->dev->irq = res->irq;
3237 	priv->wol_irq = res->wol_irq;
3238 	priv->lpi_irq = res->lpi_irq;
3239 
3240 	if (res->mac)
3241 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3242 
3243 	dev_set_drvdata(device, priv->dev);
3244 
3245 	/* Verify driver arguments */
3246 	stmmac_verify_args();
3247 
3248 	/* Override with kernel parameters if supplied XXX CRS XXX
3249 	 * this needs to have multiple instances
3250 	 */
3251 	if ((phyaddr >= 0) && (phyaddr <= 31))
3252 		priv->plat->phy_addr = phyaddr;
3253 
3254 	if (priv->plat->stmmac_rst)
3255 		reset_control_deassert(priv->plat->stmmac_rst);
3256 
3257 	/* Init MAC and get the capabilities */
3258 	ret = stmmac_hw_init(priv);
3259 	if (ret)
3260 		goto error_hw_init;
3261 
3262 	ndev->netdev_ops = &stmmac_netdev_ops;
3263 
3264 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3265 			    NETIF_F_RXCSUM;
3266 
3267 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3268 		ndev->hw_features |= NETIF_F_TSO;
3269 		priv->tso = true;
3270 		dev_info(priv->device, "TSO feature enabled\n");
3271 	}
3272 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3273 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3274 #ifdef STMMAC_VLAN_TAG_USED
3275 	/* Both mac100 and gmac support receive VLAN tag detection */
3276 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3277 #endif
3278 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
3279 
3280 	/* MTU range: 46 - hw-specific max */
3281 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3282 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3283 		ndev->max_mtu = JUMBO_LEN;
3284 	else
3285 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3286 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3287 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3288 	 */
3289 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
3290 	    (priv->plat->maxmtu >= ndev->min_mtu))
3291 		ndev->max_mtu = priv->plat->maxmtu;
3292 	else if (priv->plat->maxmtu < ndev->min_mtu)
3293 		dev_warn(priv->device,
3294 			 "%s: warning: maxmtu having invalid value (%d)\n",
3295 			 __func__, priv->plat->maxmtu);
3296 
3297 	if (flow_ctrl)
3298 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
3299 
3300 	/* Rx Watchdog is available in the COREs newer than the 3.40.
3301 	 * In some case, for example on bugged HW this feature
3302 	 * has to be disable and this can be done by passing the
3303 	 * riwt_off field from the platform.
3304 	 */
3305 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3306 		priv->use_riwt = 1;
3307 		dev_info(priv->device,
3308 			 "Enable RX Mitigation via HW Watchdog Timer\n");
3309 	}
3310 
3311 	netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3312 
3313 	spin_lock_init(&priv->lock);
3314 
3315 	/* If a specific clk_csr value is passed from the platform
3316 	 * this means that the CSR Clock Range selection cannot be
3317 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
3318 	 * set the MDC clock dynamically according to the csr actual
3319 	 * clock input.
3320 	 */
3321 	if (!priv->plat->clk_csr)
3322 		stmmac_clk_csr_set(priv);
3323 	else
3324 		priv->clk_csr = priv->plat->clk_csr;
3325 
3326 	stmmac_check_pcs_mode(priv);
3327 
3328 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
3329 	    priv->hw->pcs != STMMAC_PCS_TBI &&
3330 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
3331 		/* MDIO bus Registration */
3332 		ret = stmmac_mdio_register(ndev);
3333 		if (ret < 0) {
3334 			dev_err(priv->device,
3335 				"%s: MDIO bus (id: %d) registration failed",
3336 				__func__, priv->plat->bus_id);
3337 			goto error_mdio_register;
3338 		}
3339 	}
3340 
3341 	ret = register_netdev(ndev);
3342 	if (ret) {
3343 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
3344 			__func__, ret);
3345 		goto error_netdev_register;
3346 	}
3347 
3348 	return ret;
3349 
3350 error_netdev_register:
3351 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3352 	    priv->hw->pcs != STMMAC_PCS_TBI &&
3353 	    priv->hw->pcs != STMMAC_PCS_RTBI)
3354 		stmmac_mdio_unregister(ndev);
3355 error_mdio_register:
3356 	netif_napi_del(&priv->napi);
3357 error_hw_init:
3358 	free_netdev(ndev);
3359 
3360 	return ret;
3361 }
3362 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3363 
3364 /**
3365  * stmmac_dvr_remove
3366  * @dev: device pointer
3367  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3368  * changes the link status, releases the DMA descriptor rings.
3369  */
3370 int stmmac_dvr_remove(struct device *dev)
3371 {
3372 	struct net_device *ndev = dev_get_drvdata(dev);
3373 	struct stmmac_priv *priv = netdev_priv(ndev);
3374 
3375 	netdev_info(priv->dev, "%s: removing driver", __func__);
3376 
3377 	priv->hw->dma->stop_rx(priv->ioaddr);
3378 	priv->hw->dma->stop_tx(priv->ioaddr);
3379 
3380 	stmmac_set_mac(priv->ioaddr, false);
3381 	netif_carrier_off(ndev);
3382 	unregister_netdev(ndev);
3383 	if (priv->plat->stmmac_rst)
3384 		reset_control_assert(priv->plat->stmmac_rst);
3385 	clk_disable_unprepare(priv->plat->pclk);
3386 	clk_disable_unprepare(priv->plat->stmmac_clk);
3387 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3388 	    priv->hw->pcs != STMMAC_PCS_TBI &&
3389 	    priv->hw->pcs != STMMAC_PCS_RTBI)
3390 		stmmac_mdio_unregister(ndev);
3391 	free_netdev(ndev);
3392 
3393 	return 0;
3394 }
3395 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3396 
3397 /**
3398  * stmmac_suspend - suspend callback
3399  * @dev: device pointer
3400  * Description: this is the function to suspend the device and it is called
3401  * by the platform driver to stop the network queue, release the resources,
3402  * program the PMT register (for WoL), clean and release driver resources.
3403  */
3404 int stmmac_suspend(struct device *dev)
3405 {
3406 	struct net_device *ndev = dev_get_drvdata(dev);
3407 	struct stmmac_priv *priv = netdev_priv(ndev);
3408 	unsigned long flags;
3409 
3410 	if (!ndev || !netif_running(ndev))
3411 		return 0;
3412 
3413 	if (ndev->phydev)
3414 		phy_stop(ndev->phydev);
3415 
3416 	spin_lock_irqsave(&priv->lock, flags);
3417 
3418 	netif_device_detach(ndev);
3419 	netif_stop_queue(ndev);
3420 
3421 	napi_disable(&priv->napi);
3422 
3423 	/* Stop TX/RX DMA */
3424 	priv->hw->dma->stop_tx(priv->ioaddr);
3425 	priv->hw->dma->stop_rx(priv->ioaddr);
3426 
3427 	/* Enable Power down mode by programming the PMT regs */
3428 	if (device_may_wakeup(priv->device)) {
3429 		priv->hw->mac->pmt(priv->hw, priv->wolopts);
3430 		priv->irq_wake = 1;
3431 	} else {
3432 		stmmac_set_mac(priv->ioaddr, false);
3433 		pinctrl_pm_select_sleep_state(priv->device);
3434 		/* Disable clock in case of PWM is off */
3435 		clk_disable(priv->plat->pclk);
3436 		clk_disable(priv->plat->stmmac_clk);
3437 	}
3438 	spin_unlock_irqrestore(&priv->lock, flags);
3439 
3440 	priv->oldlink = 0;
3441 	priv->speed = SPEED_UNKNOWN;
3442 	priv->oldduplex = DUPLEX_UNKNOWN;
3443 	return 0;
3444 }
3445 EXPORT_SYMBOL_GPL(stmmac_suspend);
3446 
3447 /**
3448  * stmmac_resume - resume callback
3449  * @dev: device pointer
3450  * Description: when resume this function is invoked to setup the DMA and CORE
3451  * in a usable state.
3452  */
3453 int stmmac_resume(struct device *dev)
3454 {
3455 	struct net_device *ndev = dev_get_drvdata(dev);
3456 	struct stmmac_priv *priv = netdev_priv(ndev);
3457 	unsigned long flags;
3458 
3459 	if (!netif_running(ndev))
3460 		return 0;
3461 
3462 	/* Power Down bit, into the PM register, is cleared
3463 	 * automatically as soon as a magic packet or a Wake-up frame
3464 	 * is received. Anyway, it's better to manually clear
3465 	 * this bit because it can generate problems while resuming
3466 	 * from another devices (e.g. serial console).
3467 	 */
3468 	if (device_may_wakeup(priv->device)) {
3469 		spin_lock_irqsave(&priv->lock, flags);
3470 		priv->hw->mac->pmt(priv->hw, 0);
3471 		spin_unlock_irqrestore(&priv->lock, flags);
3472 		priv->irq_wake = 0;
3473 	} else {
3474 		pinctrl_pm_select_default_state(priv->device);
3475 		/* enable the clk previously disabled */
3476 		clk_enable(priv->plat->stmmac_clk);
3477 		clk_enable(priv->plat->pclk);
3478 		/* reset the phy so that it's ready */
3479 		if (priv->mii)
3480 			stmmac_mdio_reset(priv->mii);
3481 	}
3482 
3483 	netif_device_attach(ndev);
3484 
3485 	spin_lock_irqsave(&priv->lock, flags);
3486 
3487 	priv->cur_rx = 0;
3488 	priv->dirty_rx = 0;
3489 	priv->dirty_tx = 0;
3490 	priv->cur_tx = 0;
3491 	/* reset private mss value to force mss context settings at
3492 	 * next tso xmit (only used for gmac4).
3493 	 */
3494 	priv->mss = 0;
3495 
3496 	stmmac_clear_descriptors(priv);
3497 
3498 	stmmac_hw_setup(ndev, false);
3499 	stmmac_init_tx_coalesce(priv);
3500 	stmmac_set_rx_mode(ndev);
3501 
3502 	napi_enable(&priv->napi);
3503 
3504 	netif_start_queue(ndev);
3505 
3506 	spin_unlock_irqrestore(&priv->lock, flags);
3507 
3508 	if (ndev->phydev)
3509 		phy_start(ndev->phydev);
3510 
3511 	return 0;
3512 }
3513 EXPORT_SYMBOL_GPL(stmmac_resume);
3514 
3515 #ifndef MODULE
3516 static int __init stmmac_cmdline_opt(char *str)
3517 {
3518 	char *opt;
3519 
3520 	if (!str || !*str)
3521 		return -EINVAL;
3522 	while ((opt = strsep(&str, ",")) != NULL) {
3523 		if (!strncmp(opt, "debug:", 6)) {
3524 			if (kstrtoint(opt + 6, 0, &debug))
3525 				goto err;
3526 		} else if (!strncmp(opt, "phyaddr:", 8)) {
3527 			if (kstrtoint(opt + 8, 0, &phyaddr))
3528 				goto err;
3529 		} else if (!strncmp(opt, "buf_sz:", 7)) {
3530 			if (kstrtoint(opt + 7, 0, &buf_sz))
3531 				goto err;
3532 		} else if (!strncmp(opt, "tc:", 3)) {
3533 			if (kstrtoint(opt + 3, 0, &tc))
3534 				goto err;
3535 		} else if (!strncmp(opt, "watchdog:", 9)) {
3536 			if (kstrtoint(opt + 9, 0, &watchdog))
3537 				goto err;
3538 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
3539 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
3540 				goto err;
3541 		} else if (!strncmp(opt, "pause:", 6)) {
3542 			if (kstrtoint(opt + 6, 0, &pause))
3543 				goto err;
3544 		} else if (!strncmp(opt, "eee_timer:", 10)) {
3545 			if (kstrtoint(opt + 10, 0, &eee_timer))
3546 				goto err;
3547 		} else if (!strncmp(opt, "chain_mode:", 11)) {
3548 			if (kstrtoint(opt + 11, 0, &chain_mode))
3549 				goto err;
3550 		}
3551 	}
3552 	return 0;
3553 
3554 err:
3555 	pr_err("%s: ERROR broken module parameter conversion", __func__);
3556 	return -EINVAL;
3557 }
3558 
3559 __setup("stmmaceth=", stmmac_cmdline_opt);
3560 #endif /* MODULE */
3561 
3562 static int __init stmmac_init(void)
3563 {
3564 #ifdef CONFIG_DEBUG_FS
3565 	/* Create debugfs main directory if it doesn't exist yet */
3566 	if (!stmmac_fs_dir) {
3567 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3568 
3569 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3570 			pr_err("ERROR %s, debugfs create directory failed\n",
3571 			       STMMAC_RESOURCE_NAME);
3572 
3573 			return -ENOMEM;
3574 		}
3575 	}
3576 #endif
3577 
3578 	return 0;
3579 }
3580 
3581 static void __exit stmmac_exit(void)
3582 {
3583 #ifdef CONFIG_DEBUG_FS
3584 	debugfs_remove_recursive(stmmac_fs_dir);
3585 #endif
3586 }
3587 
3588 module_init(stmmac_init)
3589 module_exit(stmmac_exit)
3590 
3591 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3592 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3593 MODULE_LICENSE("GPL");
3594