xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56 
57 #define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
59 
60 /* Module parameters */
61 #define TX_TIMEO	5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65 
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69 
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73 
74 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
76 
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80 
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84 
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89 
90 #define	DEFAULT_BUFSIZE	1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94 
95 #define	STMMAC_RX_COPYBREAK	256
96 
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100 
101 #define STMMAC_DEFAULT_LPI_TIMER	1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106 
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113 
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115 
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120 
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122 
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130 	if (unlikely(watchdog < 0))
131 		watchdog = TX_TIMEO;
132 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 		buf_sz = DEFAULT_BUFSIZE;
134 	if (unlikely(flow_ctrl > 1))
135 		flow_ctrl = FLOW_AUTO;
136 	else if (likely(flow_ctrl < 0))
137 		flow_ctrl = FLOW_OFF;
138 	if (unlikely((pause < 0) || (pause > 0xffff)))
139 		pause = PAUSE_TIME;
140 	if (eee_timer < 0)
141 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143 
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 	u32 queue;
152 
153 	for (queue = 0; queue < rx_queues_cnt; queue++) {
154 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
155 
156 		napi_disable(&rx_q->napi);
157 	}
158 }
159 
160 /**
161  * stmmac_enable_all_queues - Enable all queues
162  * @priv: driver private structure
163  */
164 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
165 {
166 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
167 	u32 queue;
168 
169 	for (queue = 0; queue < rx_queues_cnt; queue++) {
170 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
171 
172 		napi_enable(&rx_q->napi);
173 	}
174 }
175 
176 /**
177  * stmmac_stop_all_queues - Stop all queues
178  * @priv: driver private structure
179  */
180 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
181 {
182 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
183 	u32 queue;
184 
185 	for (queue = 0; queue < tx_queues_cnt; queue++)
186 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
187 }
188 
189 /**
190  * stmmac_start_all_queues - Start all queues
191  * @priv: driver private structure
192  */
193 static void stmmac_start_all_queues(struct stmmac_priv *priv)
194 {
195 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
196 	u32 queue;
197 
198 	for (queue = 0; queue < tx_queues_cnt; queue++)
199 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
200 }
201 
202 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
203 {
204 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
205 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
206 		queue_work(priv->wq, &priv->service_task);
207 }
208 
209 static void stmmac_global_err(struct stmmac_priv *priv)
210 {
211 	netif_carrier_off(priv->dev);
212 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
213 	stmmac_service_event_schedule(priv);
214 }
215 
216 /**
217  * stmmac_clk_csr_set - dynamically set the MDC clock
218  * @priv: driver private structure
219  * Description: this is to dynamically set the MDC clock according to the csr
220  * clock input.
221  * Note:
222  *	If a specific clk_csr value is passed from the platform
223  *	this means that the CSR Clock Range selection cannot be
224  *	changed at run-time and it is fixed (as reported in the driver
225  *	documentation). Viceversa the driver will try to set the MDC
226  *	clock dynamically according to the actual clock input.
227  */
228 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
229 {
230 	u32 clk_rate;
231 
232 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
233 
234 	/* Platform provided default clk_csr would be assumed valid
235 	 * for all other cases except for the below mentioned ones.
236 	 * For values higher than the IEEE 802.3 specified frequency
237 	 * we can not estimate the proper divider as it is not known
238 	 * the frequency of clk_csr_i. So we do not change the default
239 	 * divider.
240 	 */
241 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
242 		if (clk_rate < CSR_F_35M)
243 			priv->clk_csr = STMMAC_CSR_20_35M;
244 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
245 			priv->clk_csr = STMMAC_CSR_35_60M;
246 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
247 			priv->clk_csr = STMMAC_CSR_60_100M;
248 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
249 			priv->clk_csr = STMMAC_CSR_100_150M;
250 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
251 			priv->clk_csr = STMMAC_CSR_150_250M;
252 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
253 			priv->clk_csr = STMMAC_CSR_250_300M;
254 	}
255 
256 	if (priv->plat->has_sun8i) {
257 		if (clk_rate > 160000000)
258 			priv->clk_csr = 0x03;
259 		else if (clk_rate > 80000000)
260 			priv->clk_csr = 0x02;
261 		else if (clk_rate > 40000000)
262 			priv->clk_csr = 0x01;
263 		else
264 			priv->clk_csr = 0;
265 	}
266 
267 	if (priv->plat->has_xgmac) {
268 		if (clk_rate > 400000000)
269 			priv->clk_csr = 0x5;
270 		else if (clk_rate > 350000000)
271 			priv->clk_csr = 0x4;
272 		else if (clk_rate > 300000000)
273 			priv->clk_csr = 0x3;
274 		else if (clk_rate > 250000000)
275 			priv->clk_csr = 0x2;
276 		else if (clk_rate > 150000000)
277 			priv->clk_csr = 0x1;
278 		else
279 			priv->clk_csr = 0x0;
280 	}
281 }
282 
283 static void print_pkt(unsigned char *buf, int len)
284 {
285 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
286 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
287 }
288 
289 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
290 {
291 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
292 	u32 avail;
293 
294 	if (tx_q->dirty_tx > tx_q->cur_tx)
295 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
296 	else
297 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
298 
299 	return avail;
300 }
301 
302 /**
303  * stmmac_rx_dirty - Get RX queue dirty
304  * @priv: driver private structure
305  * @queue: RX queue index
306  */
307 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
308 {
309 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
310 	u32 dirty;
311 
312 	if (rx_q->dirty_rx <= rx_q->cur_rx)
313 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
314 	else
315 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
316 
317 	return dirty;
318 }
319 
320 /**
321  * stmmac_hw_fix_mac_speed - callback for speed selection
322  * @priv: driver private structure
323  * Description: on some platforms (e.g. ST), some HW system configuration
324  * registers have to be set according to the link speed negotiated.
325  */
326 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
327 {
328 	struct net_device *ndev = priv->dev;
329 	struct phy_device *phydev = ndev->phydev;
330 
331 	if (likely(priv->plat->fix_mac_speed))
332 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
333 }
334 
335 /**
336  * stmmac_enable_eee_mode - check and enter in LPI mode
337  * @priv: driver private structure
338  * Description: this function is to verify and enter in LPI mode in case of
339  * EEE.
340  */
341 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
342 {
343 	u32 tx_cnt = priv->plat->tx_queues_to_use;
344 	u32 queue;
345 
346 	/* check if all TX queues have the work finished */
347 	for (queue = 0; queue < tx_cnt; queue++) {
348 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
349 
350 		if (tx_q->dirty_tx != tx_q->cur_tx)
351 			return; /* still unfinished work */
352 	}
353 
354 	/* Check and enter in LPI mode */
355 	if (!priv->tx_path_in_lpi_mode)
356 		stmmac_set_eee_mode(priv, priv->hw,
357 				priv->plat->en_tx_lpi_clockgating);
358 }
359 
360 /**
361  * stmmac_disable_eee_mode - disable and exit from LPI mode
362  * @priv: driver private structure
363  * Description: this function is to exit and disable EEE in case of
364  * LPI state is true. This is called by the xmit.
365  */
366 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
367 {
368 	stmmac_reset_eee_mode(priv, priv->hw);
369 	del_timer_sync(&priv->eee_ctrl_timer);
370 	priv->tx_path_in_lpi_mode = false;
371 }
372 
373 /**
374  * stmmac_eee_ctrl_timer - EEE TX SW timer.
375  * @arg : data hook
376  * Description:
377  *  if there is no data transfer and if we are not in LPI state,
378  *  then MAC Transmitter can be moved to LPI state.
379  */
380 static void stmmac_eee_ctrl_timer(struct timer_list *t)
381 {
382 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
383 
384 	stmmac_enable_eee_mode(priv);
385 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
386 }
387 
388 /**
389  * stmmac_eee_init - init EEE
390  * @priv: driver private structure
391  * Description:
392  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
393  *  can also manage EEE, this function enable the LPI state and start related
394  *  timer.
395  */
396 bool stmmac_eee_init(struct stmmac_priv *priv)
397 {
398 	struct net_device *ndev = priv->dev;
399 	int interface = priv->plat->interface;
400 	bool ret = false;
401 
402 	if ((interface != PHY_INTERFACE_MODE_MII) &&
403 	    (interface != PHY_INTERFACE_MODE_GMII) &&
404 	    !phy_interface_mode_is_rgmii(interface))
405 		goto out;
406 
407 	/* Using PCS we cannot dial with the phy registers at this stage
408 	 * so we do not support extra feature like EEE.
409 	 */
410 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
411 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
412 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
413 		goto out;
414 
415 	/* MAC core supports the EEE feature. */
416 	if (priv->dma_cap.eee) {
417 		int tx_lpi_timer = priv->tx_lpi_timer;
418 
419 		/* Check if the PHY supports EEE */
420 		if (phy_init_eee(ndev->phydev, 1)) {
421 			/* To manage at run-time if the EEE cannot be supported
422 			 * anymore (for example because the lp caps have been
423 			 * changed).
424 			 * In that case the driver disable own timers.
425 			 */
426 			mutex_lock(&priv->lock);
427 			if (priv->eee_active) {
428 				netdev_dbg(priv->dev, "disable EEE\n");
429 				del_timer_sync(&priv->eee_ctrl_timer);
430 				stmmac_set_eee_timer(priv, priv->hw, 0,
431 						tx_lpi_timer);
432 			}
433 			priv->eee_active = 0;
434 			mutex_unlock(&priv->lock);
435 			goto out;
436 		}
437 		/* Activate the EEE and start timers */
438 		mutex_lock(&priv->lock);
439 		if (!priv->eee_active) {
440 			priv->eee_active = 1;
441 			timer_setup(&priv->eee_ctrl_timer,
442 				    stmmac_eee_ctrl_timer, 0);
443 			mod_timer(&priv->eee_ctrl_timer,
444 				  STMMAC_LPI_T(eee_timer));
445 
446 			stmmac_set_eee_timer(priv, priv->hw,
447 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
448 		}
449 		/* Set HW EEE according to the speed */
450 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
451 
452 		ret = true;
453 		mutex_unlock(&priv->lock);
454 
455 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
456 	}
457 out:
458 	return ret;
459 }
460 
461 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
462  * @priv: driver private structure
463  * @p : descriptor pointer
464  * @skb : the socket buffer
465  * Description :
466  * This function will read timestamp from the descriptor & pass it to stack.
467  * and also perform some sanity checks.
468  */
469 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
470 				   struct dma_desc *p, struct sk_buff *skb)
471 {
472 	struct skb_shared_hwtstamps shhwtstamp;
473 	u64 ns;
474 
475 	if (!priv->hwts_tx_en)
476 		return;
477 
478 	/* exit if skb doesn't support hw tstamp */
479 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
480 		return;
481 
482 	/* check tx tstamp status */
483 	if (stmmac_get_tx_timestamp_status(priv, p)) {
484 		/* get the valid tstamp */
485 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
486 
487 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
488 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
489 
490 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
491 		/* pass tstamp to stack */
492 		skb_tstamp_tx(skb, &shhwtstamp);
493 	}
494 
495 	return;
496 }
497 
498 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
499  * @priv: driver private structure
500  * @p : descriptor pointer
501  * @np : next descriptor pointer
502  * @skb : the socket buffer
503  * Description :
504  * This function will read received packet's timestamp from the descriptor
505  * and pass it to stack. It also perform some sanity checks.
506  */
507 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
508 				   struct dma_desc *np, struct sk_buff *skb)
509 {
510 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
511 	struct dma_desc *desc = p;
512 	u64 ns;
513 
514 	if (!priv->hwts_rx_en)
515 		return;
516 	/* For GMAC4, the valid timestamp is from CTX next desc. */
517 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
518 		desc = np;
519 
520 	/* Check if timestamp is available */
521 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
522 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
523 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
524 		shhwtstamp = skb_hwtstamps(skb);
525 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
526 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
527 	} else  {
528 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
529 	}
530 }
531 
532 /**
533  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
534  *  @dev: device pointer.
535  *  @ifr: An IOCTL specific structure, that can contain a pointer to
536  *  a proprietary structure used to pass information to the driver.
537  *  Description:
538  *  This function configures the MAC to enable/disable both outgoing(TX)
539  *  and incoming(RX) packets time stamping based on user input.
540  *  Return Value:
541  *  0 on success and an appropriate -ve integer on failure.
542  */
543 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
544 {
545 	struct stmmac_priv *priv = netdev_priv(dev);
546 	struct hwtstamp_config config;
547 	struct timespec64 now;
548 	u64 temp = 0;
549 	u32 ptp_v2 = 0;
550 	u32 tstamp_all = 0;
551 	u32 ptp_over_ipv4_udp = 0;
552 	u32 ptp_over_ipv6_udp = 0;
553 	u32 ptp_over_ethernet = 0;
554 	u32 snap_type_sel = 0;
555 	u32 ts_master_en = 0;
556 	u32 ts_event_en = 0;
557 	u32 value = 0;
558 	u32 sec_inc;
559 	bool xmac;
560 
561 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
562 
563 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
564 		netdev_alert(priv->dev, "No support for HW time stamping\n");
565 		priv->hwts_tx_en = 0;
566 		priv->hwts_rx_en = 0;
567 
568 		return -EOPNOTSUPP;
569 	}
570 
571 	if (copy_from_user(&config, ifr->ifr_data,
572 			   sizeof(struct hwtstamp_config)))
573 		return -EFAULT;
574 
575 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
576 		   __func__, config.flags, config.tx_type, config.rx_filter);
577 
578 	/* reserved for future extensions */
579 	if (config.flags)
580 		return -EINVAL;
581 
582 	if (config.tx_type != HWTSTAMP_TX_OFF &&
583 	    config.tx_type != HWTSTAMP_TX_ON)
584 		return -ERANGE;
585 
586 	if (priv->adv_ts) {
587 		switch (config.rx_filter) {
588 		case HWTSTAMP_FILTER_NONE:
589 			/* time stamp no incoming packet at all */
590 			config.rx_filter = HWTSTAMP_FILTER_NONE;
591 			break;
592 
593 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
594 			/* PTP v1, UDP, any kind of event packet */
595 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
596 			/* take time stamp for all event messages */
597 			if (xmac)
598 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
599 			else
600 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
601 
602 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
603 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
604 			break;
605 
606 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
607 			/* PTP v1, UDP, Sync packet */
608 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
609 			/* take time stamp for SYNC messages only */
610 			ts_event_en = PTP_TCR_TSEVNTENA;
611 
612 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614 			break;
615 
616 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
617 			/* PTP v1, UDP, Delay_req packet */
618 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
619 			/* take time stamp for Delay_Req messages only */
620 			ts_master_en = PTP_TCR_TSMSTRENA;
621 			ts_event_en = PTP_TCR_TSEVNTENA;
622 
623 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 			break;
626 
627 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
628 			/* PTP v2, UDP, any kind of event packet */
629 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
630 			ptp_v2 = PTP_TCR_TSVER2ENA;
631 			/* take time stamp for all event messages */
632 			if (xmac)
633 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
634 			else
635 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
636 
637 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
638 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
639 			break;
640 
641 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
642 			/* PTP v2, UDP, Sync packet */
643 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
644 			ptp_v2 = PTP_TCR_TSVER2ENA;
645 			/* take time stamp for SYNC messages only */
646 			ts_event_en = PTP_TCR_TSEVNTENA;
647 
648 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
649 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
653 			/* PTP v2, UDP, Delay_req packet */
654 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
655 			ptp_v2 = PTP_TCR_TSVER2ENA;
656 			/* take time stamp for Delay_Req messages only */
657 			ts_master_en = PTP_TCR_TSMSTRENA;
658 			ts_event_en = PTP_TCR_TSEVNTENA;
659 
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
665 			/* PTP v2/802.AS1 any layer, any kind of event packet */
666 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
667 			ptp_v2 = PTP_TCR_TSVER2ENA;
668 			/* take time stamp for all event messages */
669 			if (xmac)
670 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
671 			else
672 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
673 
674 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
675 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
676 			ptp_over_ethernet = PTP_TCR_TSIPENA;
677 			break;
678 
679 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
680 			/* PTP v2/802.AS1, any layer, Sync packet */
681 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
682 			ptp_v2 = PTP_TCR_TSVER2ENA;
683 			/* take time stamp for SYNC messages only */
684 			ts_event_en = PTP_TCR_TSEVNTENA;
685 
686 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 			ptp_over_ethernet = PTP_TCR_TSIPENA;
689 			break;
690 
691 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
692 			/* PTP v2/802.AS1, any layer, Delay_req packet */
693 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
694 			ptp_v2 = PTP_TCR_TSVER2ENA;
695 			/* take time stamp for Delay_Req messages only */
696 			ts_master_en = PTP_TCR_TSMSTRENA;
697 			ts_event_en = PTP_TCR_TSEVNTENA;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			ptp_over_ethernet = PTP_TCR_TSIPENA;
702 			break;
703 
704 		case HWTSTAMP_FILTER_NTP_ALL:
705 		case HWTSTAMP_FILTER_ALL:
706 			/* time stamp any incoming packet */
707 			config.rx_filter = HWTSTAMP_FILTER_ALL;
708 			tstamp_all = PTP_TCR_TSENALL;
709 			break;
710 
711 		default:
712 			return -ERANGE;
713 		}
714 	} else {
715 		switch (config.rx_filter) {
716 		case HWTSTAMP_FILTER_NONE:
717 			config.rx_filter = HWTSTAMP_FILTER_NONE;
718 			break;
719 		default:
720 			/* PTP v1, UDP, any kind of event packet */
721 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
722 			break;
723 		}
724 	}
725 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
726 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
727 
728 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
729 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
730 	else {
731 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
732 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
733 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
734 			 ts_master_en | snap_type_sel);
735 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
736 
737 		/* program Sub Second Increment reg */
738 		stmmac_config_sub_second_increment(priv,
739 				priv->ptpaddr, priv->plat->clk_ptp_rate,
740 				xmac, &sec_inc);
741 		temp = div_u64(1000000000ULL, sec_inc);
742 
743 		/* Store sub second increment and flags for later use */
744 		priv->sub_second_inc = sec_inc;
745 		priv->systime_flags = value;
746 
747 		/* calculate default added value:
748 		 * formula is :
749 		 * addend = (2^32)/freq_div_ratio;
750 		 * where, freq_div_ratio = 1e9ns/sec_inc
751 		 */
752 		temp = (u64)(temp << 32);
753 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
754 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
755 
756 		/* initialize system time */
757 		ktime_get_real_ts64(&now);
758 
759 		/* lower 32 bits of tv_sec are safe until y2106 */
760 		stmmac_init_systime(priv, priv->ptpaddr,
761 				(u32)now.tv_sec, now.tv_nsec);
762 	}
763 
764 	return copy_to_user(ifr->ifr_data, &config,
765 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
766 }
767 
768 /**
769  * stmmac_init_ptp - init PTP
770  * @priv: driver private structure
771  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
772  * This is done by looking at the HW cap. register.
773  * This function also registers the ptp driver.
774  */
775 static int stmmac_init_ptp(struct stmmac_priv *priv)
776 {
777 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
778 
779 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
780 		return -EOPNOTSUPP;
781 
782 	priv->adv_ts = 0;
783 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
784 	if (xmac && priv->dma_cap.atime_stamp)
785 		priv->adv_ts = 1;
786 	/* Dwmac 3.x core with extend_desc can support adv_ts */
787 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
788 		priv->adv_ts = 1;
789 
790 	if (priv->dma_cap.time_stamp)
791 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
792 
793 	if (priv->adv_ts)
794 		netdev_info(priv->dev,
795 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
796 
797 	priv->hwts_tx_en = 0;
798 	priv->hwts_rx_en = 0;
799 
800 	stmmac_ptp_register(priv);
801 
802 	return 0;
803 }
804 
805 static void stmmac_release_ptp(struct stmmac_priv *priv)
806 {
807 	if (priv->plat->clk_ptp_ref)
808 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
809 	stmmac_ptp_unregister(priv);
810 }
811 
812 /**
813  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
814  *  @priv: driver private structure
815  *  Description: It is used for configuring the flow control in all queues
816  */
817 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
818 {
819 	u32 tx_cnt = priv->plat->tx_queues_to_use;
820 
821 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
822 			priv->pause, tx_cnt);
823 }
824 
825 /**
826  * stmmac_adjust_link - adjusts the link parameters
827  * @dev: net device structure
828  * Description: this is the helper called by the physical abstraction layer
829  * drivers to communicate the phy link status. According the speed and duplex
830  * this driver can invoke registered glue-logic as well.
831  * It also invoke the eee initialization because it could happen when switch
832  * on different networks (that are eee capable).
833  */
834 static void stmmac_adjust_link(struct net_device *dev)
835 {
836 	struct stmmac_priv *priv = netdev_priv(dev);
837 	struct phy_device *phydev = dev->phydev;
838 	bool new_state = false;
839 
840 	if (!phydev)
841 		return;
842 
843 	mutex_lock(&priv->lock);
844 
845 	if (phydev->link) {
846 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
847 
848 		/* Now we make sure that we can be in full duplex mode.
849 		 * If not, we operate in half-duplex mode. */
850 		if (phydev->duplex != priv->oldduplex) {
851 			new_state = true;
852 			if (!phydev->duplex)
853 				ctrl &= ~priv->hw->link.duplex;
854 			else
855 				ctrl |= priv->hw->link.duplex;
856 			priv->oldduplex = phydev->duplex;
857 		}
858 		/* Flow Control operation */
859 		if (phydev->pause)
860 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
861 
862 		if (phydev->speed != priv->speed) {
863 			new_state = true;
864 			ctrl &= ~priv->hw->link.speed_mask;
865 			switch (phydev->speed) {
866 			case SPEED_1000:
867 				ctrl |= priv->hw->link.speed1000;
868 				break;
869 			case SPEED_100:
870 				ctrl |= priv->hw->link.speed100;
871 				break;
872 			case SPEED_10:
873 				ctrl |= priv->hw->link.speed10;
874 				break;
875 			default:
876 				netif_warn(priv, link, priv->dev,
877 					   "broken speed: %d\n", phydev->speed);
878 				phydev->speed = SPEED_UNKNOWN;
879 				break;
880 			}
881 			if (phydev->speed != SPEED_UNKNOWN)
882 				stmmac_hw_fix_mac_speed(priv);
883 			priv->speed = phydev->speed;
884 		}
885 
886 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
887 
888 		if (!priv->oldlink) {
889 			new_state = true;
890 			priv->oldlink = true;
891 		}
892 	} else if (priv->oldlink) {
893 		new_state = true;
894 		priv->oldlink = false;
895 		priv->speed = SPEED_UNKNOWN;
896 		priv->oldduplex = DUPLEX_UNKNOWN;
897 	}
898 
899 	if (new_state && netif_msg_link(priv))
900 		phy_print_status(phydev);
901 
902 	mutex_unlock(&priv->lock);
903 
904 	if (phydev->is_pseudo_fixed_link)
905 		/* Stop PHY layer to call the hook to adjust the link in case
906 		 * of a switch is attached to the stmmac driver.
907 		 */
908 		phydev->irq = PHY_IGNORE_INTERRUPT;
909 	else
910 		/* At this stage, init the EEE if supported.
911 		 * Never called in case of fixed_link.
912 		 */
913 		priv->eee_enabled = stmmac_eee_init(priv);
914 }
915 
916 /**
917  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
918  * @priv: driver private structure
919  * Description: this is to verify if the HW supports the PCS.
920  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
921  * configured for the TBI, RTBI, or SGMII PHY interface.
922  */
923 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
924 {
925 	int interface = priv->plat->interface;
926 
927 	if (priv->dma_cap.pcs) {
928 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
929 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
930 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
931 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
932 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
933 			priv->hw->pcs = STMMAC_PCS_RGMII;
934 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
935 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
936 			priv->hw->pcs = STMMAC_PCS_SGMII;
937 		}
938 	}
939 }
940 
941 /**
942  * stmmac_init_phy - PHY initialization
943  * @dev: net device structure
944  * Description: it initializes the driver's PHY state, and attaches the PHY
945  * to the mac driver.
946  *  Return value:
947  *  0 on success
948  */
949 static int stmmac_init_phy(struct net_device *dev)
950 {
951 	struct stmmac_priv *priv = netdev_priv(dev);
952 	u32 tx_cnt = priv->plat->tx_queues_to_use;
953 	struct phy_device *phydev;
954 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
955 	char bus_id[MII_BUS_ID_SIZE];
956 	int interface = priv->plat->interface;
957 	int max_speed = priv->plat->max_speed;
958 	priv->oldlink = false;
959 	priv->speed = SPEED_UNKNOWN;
960 	priv->oldduplex = DUPLEX_UNKNOWN;
961 
962 	if (priv->plat->phy_node) {
963 		phydev = of_phy_connect(dev, priv->plat->phy_node,
964 					&stmmac_adjust_link, 0, interface);
965 	} else {
966 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
967 			 priv->plat->bus_id);
968 
969 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
970 			 priv->plat->phy_addr);
971 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
972 			   phy_id_fmt);
973 
974 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
975 				     interface);
976 	}
977 
978 	if (IS_ERR_OR_NULL(phydev)) {
979 		netdev_err(priv->dev, "Could not attach to PHY\n");
980 		if (!phydev)
981 			return -ENODEV;
982 
983 		return PTR_ERR(phydev);
984 	}
985 
986 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
987 	if ((interface == PHY_INTERFACE_MODE_MII) ||
988 	    (interface == PHY_INTERFACE_MODE_RMII) ||
989 		(max_speed < 1000 && max_speed > 0))
990 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
991 					 SUPPORTED_1000baseT_Full);
992 
993 	/*
994 	 * Half-duplex mode not supported with multiqueue
995 	 * half-duplex can only works with single queue
996 	 */
997 	if (tx_cnt > 1)
998 		phydev->supported &= ~(SUPPORTED_1000baseT_Half |
999 				       SUPPORTED_100baseT_Half |
1000 				       SUPPORTED_10baseT_Half);
1001 
1002 	/*
1003 	 * Broken HW is sometimes missing the pull-up resistor on the
1004 	 * MDIO line, which results in reads to non-existent devices returning
1005 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1006 	 * device as well.
1007 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
1008 	 */
1009 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
1010 		phy_disconnect(phydev);
1011 		return -ENODEV;
1012 	}
1013 
1014 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1015 	 * subsequent PHY polling, make sure we force a link transition if
1016 	 * we have a UP/DOWN/UP transition
1017 	 */
1018 	if (phydev->is_pseudo_fixed_link)
1019 		phydev->irq = PHY_POLL;
1020 
1021 	phy_attached_info(phydev);
1022 	return 0;
1023 }
1024 
1025 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1026 {
1027 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1028 	void *head_rx;
1029 	u32 queue;
1030 
1031 	/* Display RX rings */
1032 	for (queue = 0; queue < rx_cnt; queue++) {
1033 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1034 
1035 		pr_info("\tRX Queue %u rings\n", queue);
1036 
1037 		if (priv->extend_desc)
1038 			head_rx = (void *)rx_q->dma_erx;
1039 		else
1040 			head_rx = (void *)rx_q->dma_rx;
1041 
1042 		/* Display RX ring */
1043 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1044 	}
1045 }
1046 
1047 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1048 {
1049 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1050 	void *head_tx;
1051 	u32 queue;
1052 
1053 	/* Display TX rings */
1054 	for (queue = 0; queue < tx_cnt; queue++) {
1055 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1056 
1057 		pr_info("\tTX Queue %d rings\n", queue);
1058 
1059 		if (priv->extend_desc)
1060 			head_tx = (void *)tx_q->dma_etx;
1061 		else
1062 			head_tx = (void *)tx_q->dma_tx;
1063 
1064 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1065 	}
1066 }
1067 
1068 static void stmmac_display_rings(struct stmmac_priv *priv)
1069 {
1070 	/* Display RX ring */
1071 	stmmac_display_rx_rings(priv);
1072 
1073 	/* Display TX ring */
1074 	stmmac_display_tx_rings(priv);
1075 }
1076 
1077 static int stmmac_set_bfsize(int mtu, int bufsize)
1078 {
1079 	int ret = bufsize;
1080 
1081 	if (mtu >= BUF_SIZE_4KiB)
1082 		ret = BUF_SIZE_8KiB;
1083 	else if (mtu >= BUF_SIZE_2KiB)
1084 		ret = BUF_SIZE_4KiB;
1085 	else if (mtu > DEFAULT_BUFSIZE)
1086 		ret = BUF_SIZE_2KiB;
1087 	else
1088 		ret = DEFAULT_BUFSIZE;
1089 
1090 	return ret;
1091 }
1092 
1093 /**
1094  * stmmac_clear_rx_descriptors - clear RX descriptors
1095  * @priv: driver private structure
1096  * @queue: RX queue index
1097  * Description: this function is called to clear the RX descriptors
1098  * in case of both basic and extended descriptors are used.
1099  */
1100 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1101 {
1102 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1103 	int i;
1104 
1105 	/* Clear the RX descriptors */
1106 	for (i = 0; i < DMA_RX_SIZE; i++)
1107 		if (priv->extend_desc)
1108 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1109 					priv->use_riwt, priv->mode,
1110 					(i == DMA_RX_SIZE - 1));
1111 		else
1112 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1113 					priv->use_riwt, priv->mode,
1114 					(i == DMA_RX_SIZE - 1));
1115 }
1116 
1117 /**
1118  * stmmac_clear_tx_descriptors - clear tx descriptors
1119  * @priv: driver private structure
1120  * @queue: TX queue index.
1121  * Description: this function is called to clear the TX descriptors
1122  * in case of both basic and extended descriptors are used.
1123  */
1124 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1125 {
1126 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1127 	int i;
1128 
1129 	/* Clear the TX descriptors */
1130 	for (i = 0; i < DMA_TX_SIZE; i++)
1131 		if (priv->extend_desc)
1132 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1133 					priv->mode, (i == DMA_TX_SIZE - 1));
1134 		else
1135 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1136 					priv->mode, (i == DMA_TX_SIZE - 1));
1137 }
1138 
1139 /**
1140  * stmmac_clear_descriptors - clear descriptors
1141  * @priv: driver private structure
1142  * Description: this function is called to clear the TX and RX descriptors
1143  * in case of both basic and extended descriptors are used.
1144  */
1145 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1146 {
1147 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1148 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1149 	u32 queue;
1150 
1151 	/* Clear the RX descriptors */
1152 	for (queue = 0; queue < rx_queue_cnt; queue++)
1153 		stmmac_clear_rx_descriptors(priv, queue);
1154 
1155 	/* Clear the TX descriptors */
1156 	for (queue = 0; queue < tx_queue_cnt; queue++)
1157 		stmmac_clear_tx_descriptors(priv, queue);
1158 }
1159 
1160 /**
1161  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1162  * @priv: driver private structure
1163  * @p: descriptor pointer
1164  * @i: descriptor index
1165  * @flags: gfp flag
1166  * @queue: RX queue index
1167  * Description: this function is called to allocate a receive buffer, perform
1168  * the DMA mapping and init the descriptor.
1169  */
1170 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1171 				  int i, gfp_t flags, u32 queue)
1172 {
1173 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1174 	struct sk_buff *skb;
1175 
1176 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1177 	if (!skb) {
1178 		netdev_err(priv->dev,
1179 			   "%s: Rx init fails; skb is NULL\n", __func__);
1180 		return -ENOMEM;
1181 	}
1182 	rx_q->rx_skbuff[i] = skb;
1183 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1184 						priv->dma_buf_sz,
1185 						DMA_FROM_DEVICE);
1186 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1187 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1188 		dev_kfree_skb_any(skb);
1189 		return -EINVAL;
1190 	}
1191 
1192 	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1193 
1194 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1195 		stmmac_init_desc3(priv, p);
1196 
1197 	return 0;
1198 }
1199 
1200 /**
1201  * stmmac_free_rx_buffer - free RX dma buffers
1202  * @priv: private structure
1203  * @queue: RX queue index
1204  * @i: buffer index.
1205  */
1206 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1207 {
1208 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1209 
1210 	if (rx_q->rx_skbuff[i]) {
1211 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1212 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1213 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1214 	}
1215 	rx_q->rx_skbuff[i] = NULL;
1216 }
1217 
1218 /**
1219  * stmmac_free_tx_buffer - free RX dma buffers
1220  * @priv: private structure
1221  * @queue: RX queue index
1222  * @i: buffer index.
1223  */
1224 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1225 {
1226 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1227 
1228 	if (tx_q->tx_skbuff_dma[i].buf) {
1229 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1230 			dma_unmap_page(priv->device,
1231 				       tx_q->tx_skbuff_dma[i].buf,
1232 				       tx_q->tx_skbuff_dma[i].len,
1233 				       DMA_TO_DEVICE);
1234 		else
1235 			dma_unmap_single(priv->device,
1236 					 tx_q->tx_skbuff_dma[i].buf,
1237 					 tx_q->tx_skbuff_dma[i].len,
1238 					 DMA_TO_DEVICE);
1239 	}
1240 
1241 	if (tx_q->tx_skbuff[i]) {
1242 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1243 		tx_q->tx_skbuff[i] = NULL;
1244 		tx_q->tx_skbuff_dma[i].buf = 0;
1245 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1246 	}
1247 }
1248 
1249 /**
1250  * init_dma_rx_desc_rings - init the RX descriptor rings
1251  * @dev: net device structure
1252  * @flags: gfp flag.
1253  * Description: this function initializes the DMA RX descriptors
1254  * and allocates the socket buffers. It supports the chained and ring
1255  * modes.
1256  */
1257 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1258 {
1259 	struct stmmac_priv *priv = netdev_priv(dev);
1260 	u32 rx_count = priv->plat->rx_queues_to_use;
1261 	int ret = -ENOMEM;
1262 	int bfsize = 0;
1263 	int queue;
1264 	int i;
1265 
1266 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1267 	if (bfsize < 0)
1268 		bfsize = 0;
1269 
1270 	if (bfsize < BUF_SIZE_16KiB)
1271 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1272 
1273 	priv->dma_buf_sz = bfsize;
1274 
1275 	/* RX INITIALIZATION */
1276 	netif_dbg(priv, probe, priv->dev,
1277 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1278 
1279 	for (queue = 0; queue < rx_count; queue++) {
1280 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1281 
1282 		netif_dbg(priv, probe, priv->dev,
1283 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1284 			  (u32)rx_q->dma_rx_phy);
1285 
1286 		for (i = 0; i < DMA_RX_SIZE; i++) {
1287 			struct dma_desc *p;
1288 
1289 			if (priv->extend_desc)
1290 				p = &((rx_q->dma_erx + i)->basic);
1291 			else
1292 				p = rx_q->dma_rx + i;
1293 
1294 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1295 						     queue);
1296 			if (ret)
1297 				goto err_init_rx_buffers;
1298 
1299 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1300 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1301 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1302 		}
1303 
1304 		rx_q->cur_rx = 0;
1305 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1306 
1307 		stmmac_clear_rx_descriptors(priv, queue);
1308 
1309 		/* Setup the chained descriptor addresses */
1310 		if (priv->mode == STMMAC_CHAIN_MODE) {
1311 			if (priv->extend_desc)
1312 				stmmac_mode_init(priv, rx_q->dma_erx,
1313 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1314 			else
1315 				stmmac_mode_init(priv, rx_q->dma_rx,
1316 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1317 		}
1318 	}
1319 
1320 	buf_sz = bfsize;
1321 
1322 	return 0;
1323 
1324 err_init_rx_buffers:
1325 	while (queue >= 0) {
1326 		while (--i >= 0)
1327 			stmmac_free_rx_buffer(priv, queue, i);
1328 
1329 		if (queue == 0)
1330 			break;
1331 
1332 		i = DMA_RX_SIZE;
1333 		queue--;
1334 	}
1335 
1336 	return ret;
1337 }
1338 
1339 /**
1340  * init_dma_tx_desc_rings - init the TX descriptor rings
1341  * @dev: net device structure.
1342  * Description: this function initializes the DMA TX descriptors
1343  * and allocates the socket buffers. It supports the chained and ring
1344  * modes.
1345  */
1346 static int init_dma_tx_desc_rings(struct net_device *dev)
1347 {
1348 	struct stmmac_priv *priv = netdev_priv(dev);
1349 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1350 	u32 queue;
1351 	int i;
1352 
1353 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1354 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1355 
1356 		netif_dbg(priv, probe, priv->dev,
1357 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1358 			 (u32)tx_q->dma_tx_phy);
1359 
1360 		/* Setup the chained descriptor addresses */
1361 		if (priv->mode == STMMAC_CHAIN_MODE) {
1362 			if (priv->extend_desc)
1363 				stmmac_mode_init(priv, tx_q->dma_etx,
1364 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1365 			else
1366 				stmmac_mode_init(priv, tx_q->dma_tx,
1367 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1368 		}
1369 
1370 		for (i = 0; i < DMA_TX_SIZE; i++) {
1371 			struct dma_desc *p;
1372 			if (priv->extend_desc)
1373 				p = &((tx_q->dma_etx + i)->basic);
1374 			else
1375 				p = tx_q->dma_tx + i;
1376 
1377 			stmmac_clear_desc(priv, p);
1378 
1379 			tx_q->tx_skbuff_dma[i].buf = 0;
1380 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1381 			tx_q->tx_skbuff_dma[i].len = 0;
1382 			tx_q->tx_skbuff_dma[i].last_segment = false;
1383 			tx_q->tx_skbuff[i] = NULL;
1384 		}
1385 
1386 		tx_q->dirty_tx = 0;
1387 		tx_q->cur_tx = 0;
1388 		tx_q->mss = 0;
1389 
1390 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 /**
1397  * init_dma_desc_rings - init the RX/TX descriptor rings
1398  * @dev: net device structure
1399  * @flags: gfp flag.
1400  * Description: this function initializes the DMA RX/TX descriptors
1401  * and allocates the socket buffers. It supports the chained and ring
1402  * modes.
1403  */
1404 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1405 {
1406 	struct stmmac_priv *priv = netdev_priv(dev);
1407 	int ret;
1408 
1409 	ret = init_dma_rx_desc_rings(dev, flags);
1410 	if (ret)
1411 		return ret;
1412 
1413 	ret = init_dma_tx_desc_rings(dev);
1414 
1415 	stmmac_clear_descriptors(priv);
1416 
1417 	if (netif_msg_hw(priv))
1418 		stmmac_display_rings(priv);
1419 
1420 	return ret;
1421 }
1422 
1423 /**
1424  * dma_free_rx_skbufs - free RX dma buffers
1425  * @priv: private structure
1426  * @queue: RX queue index
1427  */
1428 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1429 {
1430 	int i;
1431 
1432 	for (i = 0; i < DMA_RX_SIZE; i++)
1433 		stmmac_free_rx_buffer(priv, queue, i);
1434 }
1435 
1436 /**
1437  * dma_free_tx_skbufs - free TX dma buffers
1438  * @priv: private structure
1439  * @queue: TX queue index
1440  */
1441 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1442 {
1443 	int i;
1444 
1445 	for (i = 0; i < DMA_TX_SIZE; i++)
1446 		stmmac_free_tx_buffer(priv, queue, i);
1447 }
1448 
1449 /**
1450  * free_dma_rx_desc_resources - free RX dma desc resources
1451  * @priv: private structure
1452  */
1453 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1454 {
1455 	u32 rx_count = priv->plat->rx_queues_to_use;
1456 	u32 queue;
1457 
1458 	/* Free RX queue resources */
1459 	for (queue = 0; queue < rx_count; queue++) {
1460 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1461 
1462 		/* Release the DMA RX socket buffers */
1463 		dma_free_rx_skbufs(priv, queue);
1464 
1465 		/* Free DMA regions of consistent memory previously allocated */
1466 		if (!priv->extend_desc)
1467 			dma_free_coherent(priv->device,
1468 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1469 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1470 		else
1471 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1472 					  sizeof(struct dma_extended_desc),
1473 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1474 
1475 		kfree(rx_q->rx_skbuff_dma);
1476 		kfree(rx_q->rx_skbuff);
1477 	}
1478 }
1479 
1480 /**
1481  * free_dma_tx_desc_resources - free TX dma desc resources
1482  * @priv: private structure
1483  */
1484 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1485 {
1486 	u32 tx_count = priv->plat->tx_queues_to_use;
1487 	u32 queue;
1488 
1489 	/* Free TX queue resources */
1490 	for (queue = 0; queue < tx_count; queue++) {
1491 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1492 
1493 		/* Release the DMA TX socket buffers */
1494 		dma_free_tx_skbufs(priv, queue);
1495 
1496 		/* Free DMA regions of consistent memory previously allocated */
1497 		if (!priv->extend_desc)
1498 			dma_free_coherent(priv->device,
1499 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1500 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1501 		else
1502 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1503 					  sizeof(struct dma_extended_desc),
1504 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1505 
1506 		kfree(tx_q->tx_skbuff_dma);
1507 		kfree(tx_q->tx_skbuff);
1508 	}
1509 }
1510 
1511 /**
1512  * alloc_dma_rx_desc_resources - alloc RX resources.
1513  * @priv: private structure
1514  * Description: according to which descriptor can be used (extend or basic)
1515  * this function allocates the resources for TX and RX paths. In case of
1516  * reception, for example, it pre-allocated the RX socket buffer in order to
1517  * allow zero-copy mechanism.
1518  */
1519 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1520 {
1521 	u32 rx_count = priv->plat->rx_queues_to_use;
1522 	int ret = -ENOMEM;
1523 	u32 queue;
1524 
1525 	/* RX queues buffers and DMA */
1526 	for (queue = 0; queue < rx_count; queue++) {
1527 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1528 
1529 		rx_q->queue_index = queue;
1530 		rx_q->priv_data = priv;
1531 
1532 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1533 						    sizeof(dma_addr_t),
1534 						    GFP_KERNEL);
1535 		if (!rx_q->rx_skbuff_dma)
1536 			goto err_dma;
1537 
1538 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1539 						sizeof(struct sk_buff *),
1540 						GFP_KERNEL);
1541 		if (!rx_q->rx_skbuff)
1542 			goto err_dma;
1543 
1544 		if (priv->extend_desc) {
1545 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1546 							    DMA_RX_SIZE *
1547 							    sizeof(struct
1548 							    dma_extended_desc),
1549 							    &rx_q->dma_rx_phy,
1550 							    GFP_KERNEL);
1551 			if (!rx_q->dma_erx)
1552 				goto err_dma;
1553 
1554 		} else {
1555 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1556 							   DMA_RX_SIZE *
1557 							   sizeof(struct
1558 							   dma_desc),
1559 							   &rx_q->dma_rx_phy,
1560 							   GFP_KERNEL);
1561 			if (!rx_q->dma_rx)
1562 				goto err_dma;
1563 		}
1564 	}
1565 
1566 	return 0;
1567 
1568 err_dma:
1569 	free_dma_rx_desc_resources(priv);
1570 
1571 	return ret;
1572 }
1573 
1574 /**
1575  * alloc_dma_tx_desc_resources - alloc TX resources.
1576  * @priv: private structure
1577  * Description: according to which descriptor can be used (extend or basic)
1578  * this function allocates the resources for TX and RX paths. In case of
1579  * reception, for example, it pre-allocated the RX socket buffer in order to
1580  * allow zero-copy mechanism.
1581  */
1582 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1583 {
1584 	u32 tx_count = priv->plat->tx_queues_to_use;
1585 	int ret = -ENOMEM;
1586 	u32 queue;
1587 
1588 	/* TX queues buffers and DMA */
1589 	for (queue = 0; queue < tx_count; queue++) {
1590 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1591 
1592 		tx_q->queue_index = queue;
1593 		tx_q->priv_data = priv;
1594 
1595 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1596 						    sizeof(*tx_q->tx_skbuff_dma),
1597 						    GFP_KERNEL);
1598 		if (!tx_q->tx_skbuff_dma)
1599 			goto err_dma;
1600 
1601 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1602 						sizeof(struct sk_buff *),
1603 						GFP_KERNEL);
1604 		if (!tx_q->tx_skbuff)
1605 			goto err_dma;
1606 
1607 		if (priv->extend_desc) {
1608 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1609 							    DMA_TX_SIZE *
1610 							    sizeof(struct
1611 							    dma_extended_desc),
1612 							    &tx_q->dma_tx_phy,
1613 							    GFP_KERNEL);
1614 			if (!tx_q->dma_etx)
1615 				goto err_dma;
1616 		} else {
1617 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1618 							   DMA_TX_SIZE *
1619 							   sizeof(struct
1620 								  dma_desc),
1621 							   &tx_q->dma_tx_phy,
1622 							   GFP_KERNEL);
1623 			if (!tx_q->dma_tx)
1624 				goto err_dma;
1625 		}
1626 	}
1627 
1628 	return 0;
1629 
1630 err_dma:
1631 	free_dma_tx_desc_resources(priv);
1632 
1633 	return ret;
1634 }
1635 
1636 /**
1637  * alloc_dma_desc_resources - alloc TX/RX resources.
1638  * @priv: private structure
1639  * Description: according to which descriptor can be used (extend or basic)
1640  * this function allocates the resources for TX and RX paths. In case of
1641  * reception, for example, it pre-allocated the RX socket buffer in order to
1642  * allow zero-copy mechanism.
1643  */
1644 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1645 {
1646 	/* RX Allocation */
1647 	int ret = alloc_dma_rx_desc_resources(priv);
1648 
1649 	if (ret)
1650 		return ret;
1651 
1652 	ret = alloc_dma_tx_desc_resources(priv);
1653 
1654 	return ret;
1655 }
1656 
1657 /**
1658  * free_dma_desc_resources - free dma desc resources
1659  * @priv: private structure
1660  */
1661 static void free_dma_desc_resources(struct stmmac_priv *priv)
1662 {
1663 	/* Release the DMA RX socket buffers */
1664 	free_dma_rx_desc_resources(priv);
1665 
1666 	/* Release the DMA TX socket buffers */
1667 	free_dma_tx_desc_resources(priv);
1668 }
1669 
1670 /**
1671  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1672  *  @priv: driver private structure
1673  *  Description: It is used for enabling the rx queues in the MAC
1674  */
1675 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1676 {
1677 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1678 	int queue;
1679 	u8 mode;
1680 
1681 	for (queue = 0; queue < rx_queues_count; queue++) {
1682 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1683 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1684 	}
1685 }
1686 
1687 /**
1688  * stmmac_start_rx_dma - start RX DMA channel
1689  * @priv: driver private structure
1690  * @chan: RX channel index
1691  * Description:
1692  * This starts a RX DMA channel
1693  */
1694 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1695 {
1696 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1697 	stmmac_start_rx(priv, priv->ioaddr, chan);
1698 }
1699 
1700 /**
1701  * stmmac_start_tx_dma - start TX DMA channel
1702  * @priv: driver private structure
1703  * @chan: TX channel index
1704  * Description:
1705  * This starts a TX DMA channel
1706  */
1707 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1708 {
1709 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1710 	stmmac_start_tx(priv, priv->ioaddr, chan);
1711 }
1712 
1713 /**
1714  * stmmac_stop_rx_dma - stop RX DMA channel
1715  * @priv: driver private structure
1716  * @chan: RX channel index
1717  * Description:
1718  * This stops a RX DMA channel
1719  */
1720 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1721 {
1722 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1723 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1724 }
1725 
1726 /**
1727  * stmmac_stop_tx_dma - stop TX DMA channel
1728  * @priv: driver private structure
1729  * @chan: TX channel index
1730  * Description:
1731  * This stops a TX DMA channel
1732  */
1733 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1734 {
1735 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1736 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1737 }
1738 
1739 /**
1740  * stmmac_start_all_dma - start all RX and TX DMA channels
1741  * @priv: driver private structure
1742  * Description:
1743  * This starts all the RX and TX DMA channels
1744  */
1745 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1746 {
1747 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1748 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1749 	u32 chan = 0;
1750 
1751 	for (chan = 0; chan < rx_channels_count; chan++)
1752 		stmmac_start_rx_dma(priv, chan);
1753 
1754 	for (chan = 0; chan < tx_channels_count; chan++)
1755 		stmmac_start_tx_dma(priv, chan);
1756 }
1757 
1758 /**
1759  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1760  * @priv: driver private structure
1761  * Description:
1762  * This stops the RX and TX DMA channels
1763  */
1764 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1765 {
1766 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1767 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1768 	u32 chan = 0;
1769 
1770 	for (chan = 0; chan < rx_channels_count; chan++)
1771 		stmmac_stop_rx_dma(priv, chan);
1772 
1773 	for (chan = 0; chan < tx_channels_count; chan++)
1774 		stmmac_stop_tx_dma(priv, chan);
1775 }
1776 
1777 /**
1778  *  stmmac_dma_operation_mode - HW DMA operation mode
1779  *  @priv: driver private structure
1780  *  Description: it is used for configuring the DMA operation mode register in
1781  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1782  */
1783 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1784 {
1785 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1786 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1787 	int rxfifosz = priv->plat->rx_fifo_size;
1788 	int txfifosz = priv->plat->tx_fifo_size;
1789 	u32 txmode = 0;
1790 	u32 rxmode = 0;
1791 	u32 chan = 0;
1792 	u8 qmode = 0;
1793 
1794 	if (rxfifosz == 0)
1795 		rxfifosz = priv->dma_cap.rx_fifo_size;
1796 	if (txfifosz == 0)
1797 		txfifosz = priv->dma_cap.tx_fifo_size;
1798 
1799 	/* Adjust for real per queue fifo size */
1800 	rxfifosz /= rx_channels_count;
1801 	txfifosz /= tx_channels_count;
1802 
1803 	if (priv->plat->force_thresh_dma_mode) {
1804 		txmode = tc;
1805 		rxmode = tc;
1806 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1807 		/*
1808 		 * In case of GMAC, SF mode can be enabled
1809 		 * to perform the TX COE in HW. This depends on:
1810 		 * 1) TX COE if actually supported
1811 		 * 2) There is no bugged Jumbo frame support
1812 		 *    that needs to not insert csum in the TDES.
1813 		 */
1814 		txmode = SF_DMA_MODE;
1815 		rxmode = SF_DMA_MODE;
1816 		priv->xstats.threshold = SF_DMA_MODE;
1817 	} else {
1818 		txmode = tc;
1819 		rxmode = SF_DMA_MODE;
1820 	}
1821 
1822 	/* configure all channels */
1823 	for (chan = 0; chan < rx_channels_count; chan++) {
1824 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1825 
1826 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1827 				rxfifosz, qmode);
1828 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1829 				chan);
1830 	}
1831 
1832 	for (chan = 0; chan < tx_channels_count; chan++) {
1833 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1834 
1835 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1836 				txfifosz, qmode);
1837 	}
1838 }
1839 
1840 /**
1841  * stmmac_tx_clean - to manage the transmission completion
1842  * @priv: driver private structure
1843  * @queue: TX queue index
1844  * Description: it reclaims the transmit resources after transmission completes.
1845  */
1846 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1847 {
1848 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1849 	unsigned int bytes_compl = 0, pkts_compl = 0;
1850 	unsigned int entry;
1851 
1852 	netif_tx_lock(priv->dev);
1853 
1854 	priv->xstats.tx_clean++;
1855 
1856 	entry = tx_q->dirty_tx;
1857 	while (entry != tx_q->cur_tx) {
1858 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1859 		struct dma_desc *p;
1860 		int status;
1861 
1862 		if (priv->extend_desc)
1863 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1864 		else
1865 			p = tx_q->dma_tx + entry;
1866 
1867 		status = stmmac_tx_status(priv, &priv->dev->stats,
1868 				&priv->xstats, p, priv->ioaddr);
1869 		/* Check if the descriptor is owned by the DMA */
1870 		if (unlikely(status & tx_dma_own))
1871 			break;
1872 
1873 		/* Make sure descriptor fields are read after reading
1874 		 * the own bit.
1875 		 */
1876 		dma_rmb();
1877 
1878 		/* Just consider the last segment and ...*/
1879 		if (likely(!(status & tx_not_ls))) {
1880 			/* ... verify the status error condition */
1881 			if (unlikely(status & tx_err)) {
1882 				priv->dev->stats.tx_errors++;
1883 			} else {
1884 				priv->dev->stats.tx_packets++;
1885 				priv->xstats.tx_pkt_n++;
1886 			}
1887 			stmmac_get_tx_hwtstamp(priv, p, skb);
1888 		}
1889 
1890 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1891 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1892 				dma_unmap_page(priv->device,
1893 					       tx_q->tx_skbuff_dma[entry].buf,
1894 					       tx_q->tx_skbuff_dma[entry].len,
1895 					       DMA_TO_DEVICE);
1896 			else
1897 				dma_unmap_single(priv->device,
1898 						 tx_q->tx_skbuff_dma[entry].buf,
1899 						 tx_q->tx_skbuff_dma[entry].len,
1900 						 DMA_TO_DEVICE);
1901 			tx_q->tx_skbuff_dma[entry].buf = 0;
1902 			tx_q->tx_skbuff_dma[entry].len = 0;
1903 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1904 		}
1905 
1906 		stmmac_clean_desc3(priv, tx_q, p);
1907 
1908 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1909 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1910 
1911 		if (likely(skb != NULL)) {
1912 			pkts_compl++;
1913 			bytes_compl += skb->len;
1914 			dev_consume_skb_any(skb);
1915 			tx_q->tx_skbuff[entry] = NULL;
1916 		}
1917 
1918 		stmmac_release_tx_desc(priv, p, priv->mode);
1919 
1920 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1921 	}
1922 	tx_q->dirty_tx = entry;
1923 
1924 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1925 				  pkts_compl, bytes_compl);
1926 
1927 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1928 								queue))) &&
1929 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1930 
1931 		netif_dbg(priv, tx_done, priv->dev,
1932 			  "%s: restart transmit\n", __func__);
1933 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1934 	}
1935 
1936 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1937 		stmmac_enable_eee_mode(priv);
1938 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1939 	}
1940 	netif_tx_unlock(priv->dev);
1941 }
1942 
1943 /**
1944  * stmmac_tx_err - to manage the tx error
1945  * @priv: driver private structure
1946  * @chan: channel index
1947  * Description: it cleans the descriptors and restarts the transmission
1948  * in case of transmission errors.
1949  */
1950 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1951 {
1952 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1953 	int i;
1954 
1955 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1956 
1957 	stmmac_stop_tx_dma(priv, chan);
1958 	dma_free_tx_skbufs(priv, chan);
1959 	for (i = 0; i < DMA_TX_SIZE; i++)
1960 		if (priv->extend_desc)
1961 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1962 					priv->mode, (i == DMA_TX_SIZE - 1));
1963 		else
1964 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1965 					priv->mode, (i == DMA_TX_SIZE - 1));
1966 	tx_q->dirty_tx = 0;
1967 	tx_q->cur_tx = 0;
1968 	tx_q->mss = 0;
1969 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1970 	stmmac_start_tx_dma(priv, chan);
1971 
1972 	priv->dev->stats.tx_errors++;
1973 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1974 }
1975 
1976 /**
1977  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1978  *  @priv: driver private structure
1979  *  @txmode: TX operating mode
1980  *  @rxmode: RX operating mode
1981  *  @chan: channel index
1982  *  Description: it is used for configuring of the DMA operation mode in
1983  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1984  *  mode.
1985  */
1986 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1987 					  u32 rxmode, u32 chan)
1988 {
1989 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1990 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1991 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1992 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1993 	int rxfifosz = priv->plat->rx_fifo_size;
1994 	int txfifosz = priv->plat->tx_fifo_size;
1995 
1996 	if (rxfifosz == 0)
1997 		rxfifosz = priv->dma_cap.rx_fifo_size;
1998 	if (txfifosz == 0)
1999 		txfifosz = priv->dma_cap.tx_fifo_size;
2000 
2001 	/* Adjust for real per queue fifo size */
2002 	rxfifosz /= rx_channels_count;
2003 	txfifosz /= tx_channels_count;
2004 
2005 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2006 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2007 }
2008 
2009 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2010 {
2011 	int ret;
2012 
2013 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2014 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2015 	if (ret && (ret != -EINVAL)) {
2016 		stmmac_global_err(priv);
2017 		return true;
2018 	}
2019 
2020 	return false;
2021 }
2022 
2023 /**
2024  * stmmac_dma_interrupt - DMA ISR
2025  * @priv: driver private structure
2026  * Description: this is the DMA ISR. It is called by the main ISR.
2027  * It calls the dwmac dma routine and schedule poll method in case of some
2028  * work can be done.
2029  */
2030 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2031 {
2032 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2033 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2034 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2035 				tx_channel_count : rx_channel_count;
2036 	u32 chan;
2037 	bool poll_scheduled = false;
2038 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2039 
2040 	/* Make sure we never check beyond our status buffer. */
2041 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2042 		channels_to_check = ARRAY_SIZE(status);
2043 
2044 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2045 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2046 	 * stmmac_channel struct.
2047 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2048 	 * all tx queues rather than just a single tx queue.
2049 	 */
2050 	for (chan = 0; chan < channels_to_check; chan++)
2051 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2052 				&priv->xstats, chan);
2053 
2054 	for (chan = 0; chan < rx_channel_count; chan++) {
2055 		if (likely(status[chan] & handle_rx)) {
2056 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2057 
2058 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2059 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2060 				__napi_schedule(&rx_q->napi);
2061 				poll_scheduled = true;
2062 			}
2063 		}
2064 	}
2065 
2066 	/* If we scheduled poll, we already know that tx queues will be checked.
2067 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2068 	 * completed transmission, if so, call stmmac_poll (once).
2069 	 */
2070 	if (!poll_scheduled) {
2071 		for (chan = 0; chan < tx_channel_count; chan++) {
2072 			if (status[chan] & handle_tx) {
2073 				/* It doesn't matter what rx queue we choose
2074 				 * here. We use 0 since it always exists.
2075 				 */
2076 				struct stmmac_rx_queue *rx_q =
2077 					&priv->rx_queue[0];
2078 
2079 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2080 					stmmac_disable_dma_irq(priv,
2081 							priv->ioaddr, chan);
2082 					__napi_schedule(&rx_q->napi);
2083 				}
2084 				break;
2085 			}
2086 		}
2087 	}
2088 
2089 	for (chan = 0; chan < tx_channel_count; chan++) {
2090 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2091 			/* Try to bump up the dma threshold on this failure */
2092 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2093 			    (tc <= 256)) {
2094 				tc += 64;
2095 				if (priv->plat->force_thresh_dma_mode)
2096 					stmmac_set_dma_operation_mode(priv,
2097 								      tc,
2098 								      tc,
2099 								      chan);
2100 				else
2101 					stmmac_set_dma_operation_mode(priv,
2102 								    tc,
2103 								    SF_DMA_MODE,
2104 								    chan);
2105 				priv->xstats.threshold = tc;
2106 			}
2107 		} else if (unlikely(status[chan] == tx_hard_error)) {
2108 			stmmac_tx_err(priv, chan);
2109 		}
2110 	}
2111 }
2112 
2113 /**
2114  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2115  * @priv: driver private structure
2116  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2117  */
2118 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2119 {
2120 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2121 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2122 
2123 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2124 
2125 	if (priv->dma_cap.rmon) {
2126 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2127 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2128 	} else
2129 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2130 }
2131 
2132 /**
2133  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2134  * @priv: driver private structure
2135  * Description:
2136  *  new GMAC chip generations have a new register to indicate the
2137  *  presence of the optional feature/functions.
2138  *  This can be also used to override the value passed through the
2139  *  platform and necessary for old MAC10/100 and GMAC chips.
2140  */
2141 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2142 {
2143 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2144 }
2145 
2146 /**
2147  * stmmac_check_ether_addr - check if the MAC addr is valid
2148  * @priv: driver private structure
2149  * Description:
2150  * it is to verify if the MAC address is valid, in case of failures it
2151  * generates a random MAC address
2152  */
2153 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2154 {
2155 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2156 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2157 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2158 			eth_hw_addr_random(priv->dev);
2159 		netdev_info(priv->dev, "device MAC address %pM\n",
2160 			    priv->dev->dev_addr);
2161 	}
2162 }
2163 
2164 /**
2165  * stmmac_init_dma_engine - DMA init.
2166  * @priv: driver private structure
2167  * Description:
2168  * It inits the DMA invoking the specific MAC/GMAC callback.
2169  * Some DMA parameters can be passed from the platform;
2170  * in case of these are not passed a default is kept for the MAC or GMAC.
2171  */
2172 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2173 {
2174 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2175 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2176 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2177 	struct stmmac_rx_queue *rx_q;
2178 	struct stmmac_tx_queue *tx_q;
2179 	u32 chan = 0;
2180 	int atds = 0;
2181 	int ret = 0;
2182 
2183 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2184 		dev_err(priv->device, "Invalid DMA configuration\n");
2185 		return -EINVAL;
2186 	}
2187 
2188 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2189 		atds = 1;
2190 
2191 	ret = stmmac_reset(priv, priv->ioaddr);
2192 	if (ret) {
2193 		dev_err(priv->device, "Failed to reset the dma\n");
2194 		return ret;
2195 	}
2196 
2197 	/* DMA Configuration */
2198 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2199 
2200 	if (priv->plat->axi)
2201 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2202 
2203 	/* DMA RX Channel Configuration */
2204 	for (chan = 0; chan < rx_channels_count; chan++) {
2205 		rx_q = &priv->rx_queue[chan];
2206 
2207 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2208 				    rx_q->dma_rx_phy, chan);
2209 
2210 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2211 			    (DMA_RX_SIZE * sizeof(struct dma_desc));
2212 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2213 				       rx_q->rx_tail_addr, chan);
2214 	}
2215 
2216 	/* DMA TX Channel Configuration */
2217 	for (chan = 0; chan < tx_channels_count; chan++) {
2218 		tx_q = &priv->tx_queue[chan];
2219 
2220 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2221 				    tx_q->dma_tx_phy, chan);
2222 
2223 		tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2224 			    (DMA_TX_SIZE * sizeof(struct dma_desc));
2225 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2226 				       tx_q->tx_tail_addr, chan);
2227 	}
2228 
2229 	/* DMA CSR Channel configuration */
2230 	for (chan = 0; chan < dma_csr_ch; chan++)
2231 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2232 
2233 	return ret;
2234 }
2235 
2236 /**
2237  * stmmac_tx_timer - mitigation sw timer for tx.
2238  * @data: data pointer
2239  * Description:
2240  * This is the timer handler to directly invoke the stmmac_tx_clean.
2241  */
2242 static void stmmac_tx_timer(struct timer_list *t)
2243 {
2244 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2245 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2246 	u32 queue;
2247 
2248 	/* let's scan all the tx queues */
2249 	for (queue = 0; queue < tx_queues_count; queue++)
2250 		stmmac_tx_clean(priv, queue);
2251 }
2252 
2253 /**
2254  * stmmac_init_tx_coalesce - init tx mitigation options.
2255  * @priv: driver private structure
2256  * Description:
2257  * This inits the transmit coalesce parameters: i.e. timer rate,
2258  * timer handler and default threshold used for enabling the
2259  * interrupt on completion bit.
2260  */
2261 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2262 {
2263 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2264 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2265 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2266 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2267 	add_timer(&priv->txtimer);
2268 }
2269 
2270 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2271 {
2272 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2273 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2274 	u32 chan;
2275 
2276 	/* set TX ring length */
2277 	for (chan = 0; chan < tx_channels_count; chan++)
2278 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2279 				(DMA_TX_SIZE - 1), chan);
2280 
2281 	/* set RX ring length */
2282 	for (chan = 0; chan < rx_channels_count; chan++)
2283 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2284 				(DMA_RX_SIZE - 1), chan);
2285 }
2286 
2287 /**
2288  *  stmmac_set_tx_queue_weight - Set TX queue weight
2289  *  @priv: driver private structure
2290  *  Description: It is used for setting TX queues weight
2291  */
2292 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2293 {
2294 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2295 	u32 weight;
2296 	u32 queue;
2297 
2298 	for (queue = 0; queue < tx_queues_count; queue++) {
2299 		weight = priv->plat->tx_queues_cfg[queue].weight;
2300 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2301 	}
2302 }
2303 
2304 /**
2305  *  stmmac_configure_cbs - Configure CBS in TX queue
2306  *  @priv: driver private structure
2307  *  Description: It is used for configuring CBS in AVB TX queues
2308  */
2309 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2310 {
2311 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2312 	u32 mode_to_use;
2313 	u32 queue;
2314 
2315 	/* queue 0 is reserved for legacy traffic */
2316 	for (queue = 1; queue < tx_queues_count; queue++) {
2317 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2318 		if (mode_to_use == MTL_QUEUE_DCB)
2319 			continue;
2320 
2321 		stmmac_config_cbs(priv, priv->hw,
2322 				priv->plat->tx_queues_cfg[queue].send_slope,
2323 				priv->plat->tx_queues_cfg[queue].idle_slope,
2324 				priv->plat->tx_queues_cfg[queue].high_credit,
2325 				priv->plat->tx_queues_cfg[queue].low_credit,
2326 				queue);
2327 	}
2328 }
2329 
2330 /**
2331  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2332  *  @priv: driver private structure
2333  *  Description: It is used for mapping RX queues to RX dma channels
2334  */
2335 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2336 {
2337 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2338 	u32 queue;
2339 	u32 chan;
2340 
2341 	for (queue = 0; queue < rx_queues_count; queue++) {
2342 		chan = priv->plat->rx_queues_cfg[queue].chan;
2343 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2344 	}
2345 }
2346 
2347 /**
2348  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2349  *  @priv: driver private structure
2350  *  Description: It is used for configuring the RX Queue Priority
2351  */
2352 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2353 {
2354 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2355 	u32 queue;
2356 	u32 prio;
2357 
2358 	for (queue = 0; queue < rx_queues_count; queue++) {
2359 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2360 			continue;
2361 
2362 		prio = priv->plat->rx_queues_cfg[queue].prio;
2363 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2364 	}
2365 }
2366 
2367 /**
2368  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2369  *  @priv: driver private structure
2370  *  Description: It is used for configuring the TX Queue Priority
2371  */
2372 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2373 {
2374 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2375 	u32 queue;
2376 	u32 prio;
2377 
2378 	for (queue = 0; queue < tx_queues_count; queue++) {
2379 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2380 			continue;
2381 
2382 		prio = priv->plat->tx_queues_cfg[queue].prio;
2383 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2384 	}
2385 }
2386 
2387 /**
2388  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2389  *  @priv: driver private structure
2390  *  Description: It is used for configuring the RX queue routing
2391  */
2392 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2393 {
2394 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2395 	u32 queue;
2396 	u8 packet;
2397 
2398 	for (queue = 0; queue < rx_queues_count; queue++) {
2399 		/* no specific packet type routing specified for the queue */
2400 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2401 			continue;
2402 
2403 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2404 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2405 	}
2406 }
2407 
2408 /**
2409  *  stmmac_mtl_configuration - Configure MTL
2410  *  @priv: driver private structure
2411  *  Description: It is used for configurring MTL
2412  */
2413 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2414 {
2415 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2416 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2417 
2418 	if (tx_queues_count > 1)
2419 		stmmac_set_tx_queue_weight(priv);
2420 
2421 	/* Configure MTL RX algorithms */
2422 	if (rx_queues_count > 1)
2423 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2424 				priv->plat->rx_sched_algorithm);
2425 
2426 	/* Configure MTL TX algorithms */
2427 	if (tx_queues_count > 1)
2428 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2429 				priv->plat->tx_sched_algorithm);
2430 
2431 	/* Configure CBS in AVB TX queues */
2432 	if (tx_queues_count > 1)
2433 		stmmac_configure_cbs(priv);
2434 
2435 	/* Map RX MTL to DMA channels */
2436 	stmmac_rx_queue_dma_chan_map(priv);
2437 
2438 	/* Enable MAC RX Queues */
2439 	stmmac_mac_enable_rx_queues(priv);
2440 
2441 	/* Set RX priorities */
2442 	if (rx_queues_count > 1)
2443 		stmmac_mac_config_rx_queues_prio(priv);
2444 
2445 	/* Set TX priorities */
2446 	if (tx_queues_count > 1)
2447 		stmmac_mac_config_tx_queues_prio(priv);
2448 
2449 	/* Set RX routing */
2450 	if (rx_queues_count > 1)
2451 		stmmac_mac_config_rx_queues_routing(priv);
2452 }
2453 
2454 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2455 {
2456 	if (priv->dma_cap.asp) {
2457 		netdev_info(priv->dev, "Enabling Safety Features\n");
2458 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2459 	} else {
2460 		netdev_info(priv->dev, "No Safety Features support found\n");
2461 	}
2462 }
2463 
2464 /**
2465  * stmmac_hw_setup - setup mac in a usable state.
2466  *  @dev : pointer to the device structure.
2467  *  Description:
2468  *  this is the main function to setup the HW in a usable state because the
2469  *  dma engine is reset, the core registers are configured (e.g. AXI,
2470  *  Checksum features, timers). The DMA is ready to start receiving and
2471  *  transmitting.
2472  *  Return value:
2473  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2474  *  file on failure.
2475  */
2476 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2477 {
2478 	struct stmmac_priv *priv = netdev_priv(dev);
2479 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2480 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2481 	u32 chan;
2482 	int ret;
2483 
2484 	/* DMA initialization and SW reset */
2485 	ret = stmmac_init_dma_engine(priv);
2486 	if (ret < 0) {
2487 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2488 			   __func__);
2489 		return ret;
2490 	}
2491 
2492 	/* Copy the MAC addr into the HW  */
2493 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2494 
2495 	/* PS and related bits will be programmed according to the speed */
2496 	if (priv->hw->pcs) {
2497 		int speed = priv->plat->mac_port_sel_speed;
2498 
2499 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2500 		    (speed == SPEED_1000)) {
2501 			priv->hw->ps = speed;
2502 		} else {
2503 			dev_warn(priv->device, "invalid port speed\n");
2504 			priv->hw->ps = 0;
2505 		}
2506 	}
2507 
2508 	/* Initialize the MAC Core */
2509 	stmmac_core_init(priv, priv->hw, dev);
2510 
2511 	/* Initialize MTL*/
2512 	stmmac_mtl_configuration(priv);
2513 
2514 	/* Initialize Safety Features */
2515 	stmmac_safety_feat_configuration(priv);
2516 
2517 	ret = stmmac_rx_ipc(priv, priv->hw);
2518 	if (!ret) {
2519 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2520 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2521 		priv->hw->rx_csum = 0;
2522 	}
2523 
2524 	/* Enable the MAC Rx/Tx */
2525 	stmmac_mac_set(priv, priv->ioaddr, true);
2526 
2527 	/* Set the HW DMA mode and the COE */
2528 	stmmac_dma_operation_mode(priv);
2529 
2530 	stmmac_mmc_setup(priv);
2531 
2532 	if (init_ptp) {
2533 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2534 		if (ret < 0)
2535 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2536 
2537 		ret = stmmac_init_ptp(priv);
2538 		if (ret == -EOPNOTSUPP)
2539 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2540 		else if (ret)
2541 			netdev_warn(priv->dev, "PTP init failed\n");
2542 	}
2543 
2544 #ifdef CONFIG_DEBUG_FS
2545 	ret = stmmac_init_fs(dev);
2546 	if (ret < 0)
2547 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2548 			    __func__);
2549 #endif
2550 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2551 
2552 	if (priv->use_riwt) {
2553 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2554 		if (!ret)
2555 			priv->rx_riwt = MAX_DMA_RIWT;
2556 	}
2557 
2558 	if (priv->hw->pcs)
2559 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2560 
2561 	/* set TX and RX rings length */
2562 	stmmac_set_rings_length(priv);
2563 
2564 	/* Enable TSO */
2565 	if (priv->tso) {
2566 		for (chan = 0; chan < tx_cnt; chan++)
2567 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2568 	}
2569 
2570 	/* Start the ball rolling... */
2571 	stmmac_start_all_dma(priv);
2572 
2573 	return 0;
2574 }
2575 
2576 static void stmmac_hw_teardown(struct net_device *dev)
2577 {
2578 	struct stmmac_priv *priv = netdev_priv(dev);
2579 
2580 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2581 }
2582 
2583 /**
2584  *  stmmac_open - open entry point of the driver
2585  *  @dev : pointer to the device structure.
2586  *  Description:
2587  *  This function is the open entry point of the driver.
2588  *  Return value:
2589  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2590  *  file on failure.
2591  */
2592 static int stmmac_open(struct net_device *dev)
2593 {
2594 	struct stmmac_priv *priv = netdev_priv(dev);
2595 	int ret;
2596 
2597 	stmmac_check_ether_addr(priv);
2598 
2599 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2600 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2601 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2602 		ret = stmmac_init_phy(dev);
2603 		if (ret) {
2604 			netdev_err(priv->dev,
2605 				   "%s: Cannot attach to PHY (error: %d)\n",
2606 				   __func__, ret);
2607 			return ret;
2608 		}
2609 	}
2610 
2611 	/* Extra statistics */
2612 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2613 	priv->xstats.threshold = tc;
2614 
2615 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2616 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2617 
2618 	ret = alloc_dma_desc_resources(priv);
2619 	if (ret < 0) {
2620 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2621 			   __func__);
2622 		goto dma_desc_error;
2623 	}
2624 
2625 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2626 	if (ret < 0) {
2627 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2628 			   __func__);
2629 		goto init_error;
2630 	}
2631 
2632 	ret = stmmac_hw_setup(dev, true);
2633 	if (ret < 0) {
2634 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2635 		goto init_error;
2636 	}
2637 
2638 	stmmac_init_tx_coalesce(priv);
2639 
2640 	if (dev->phydev)
2641 		phy_start(dev->phydev);
2642 
2643 	/* Request the IRQ lines */
2644 	ret = request_irq(dev->irq, stmmac_interrupt,
2645 			  IRQF_SHARED, dev->name, dev);
2646 	if (unlikely(ret < 0)) {
2647 		netdev_err(priv->dev,
2648 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2649 			   __func__, dev->irq, ret);
2650 		goto irq_error;
2651 	}
2652 
2653 	/* Request the Wake IRQ in case of another line is used for WoL */
2654 	if (priv->wol_irq != dev->irq) {
2655 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2656 				  IRQF_SHARED, dev->name, dev);
2657 		if (unlikely(ret < 0)) {
2658 			netdev_err(priv->dev,
2659 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2660 				   __func__, priv->wol_irq, ret);
2661 			goto wolirq_error;
2662 		}
2663 	}
2664 
2665 	/* Request the IRQ lines */
2666 	if (priv->lpi_irq > 0) {
2667 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2668 				  dev->name, dev);
2669 		if (unlikely(ret < 0)) {
2670 			netdev_err(priv->dev,
2671 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2672 				   __func__, priv->lpi_irq, ret);
2673 			goto lpiirq_error;
2674 		}
2675 	}
2676 
2677 	stmmac_enable_all_queues(priv);
2678 	stmmac_start_all_queues(priv);
2679 
2680 	return 0;
2681 
2682 lpiirq_error:
2683 	if (priv->wol_irq != dev->irq)
2684 		free_irq(priv->wol_irq, dev);
2685 wolirq_error:
2686 	free_irq(dev->irq, dev);
2687 irq_error:
2688 	if (dev->phydev)
2689 		phy_stop(dev->phydev);
2690 
2691 	del_timer_sync(&priv->txtimer);
2692 	stmmac_hw_teardown(dev);
2693 init_error:
2694 	free_dma_desc_resources(priv);
2695 dma_desc_error:
2696 	if (dev->phydev)
2697 		phy_disconnect(dev->phydev);
2698 
2699 	return ret;
2700 }
2701 
2702 /**
2703  *  stmmac_release - close entry point of the driver
2704  *  @dev : device pointer.
2705  *  Description:
2706  *  This is the stop entry point of the driver.
2707  */
2708 static int stmmac_release(struct net_device *dev)
2709 {
2710 	struct stmmac_priv *priv = netdev_priv(dev);
2711 
2712 	if (priv->eee_enabled)
2713 		del_timer_sync(&priv->eee_ctrl_timer);
2714 
2715 	/* Stop and disconnect the PHY */
2716 	if (dev->phydev) {
2717 		phy_stop(dev->phydev);
2718 		phy_disconnect(dev->phydev);
2719 	}
2720 
2721 	stmmac_stop_all_queues(priv);
2722 
2723 	stmmac_disable_all_queues(priv);
2724 
2725 	del_timer_sync(&priv->txtimer);
2726 
2727 	/* Free the IRQ lines */
2728 	free_irq(dev->irq, dev);
2729 	if (priv->wol_irq != dev->irq)
2730 		free_irq(priv->wol_irq, dev);
2731 	if (priv->lpi_irq > 0)
2732 		free_irq(priv->lpi_irq, dev);
2733 
2734 	/* Stop TX/RX DMA and clear the descriptors */
2735 	stmmac_stop_all_dma(priv);
2736 
2737 	/* Release and free the Rx/Tx resources */
2738 	free_dma_desc_resources(priv);
2739 
2740 	/* Disable the MAC Rx/Tx */
2741 	stmmac_mac_set(priv, priv->ioaddr, false);
2742 
2743 	netif_carrier_off(dev);
2744 
2745 #ifdef CONFIG_DEBUG_FS
2746 	stmmac_exit_fs(dev);
2747 #endif
2748 
2749 	stmmac_release_ptp(priv);
2750 
2751 	return 0;
2752 }
2753 
2754 /**
2755  *  stmmac_tso_allocator - close entry point of the driver
2756  *  @priv: driver private structure
2757  *  @des: buffer start address
2758  *  @total_len: total length to fill in descriptors
2759  *  @last_segmant: condition for the last descriptor
2760  *  @queue: TX queue index
2761  *  Description:
2762  *  This function fills descriptor and request new descriptors according to
2763  *  buffer length to fill
2764  */
2765 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2766 				 int total_len, bool last_segment, u32 queue)
2767 {
2768 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2769 	struct dma_desc *desc;
2770 	u32 buff_size;
2771 	int tmp_len;
2772 
2773 	tmp_len = total_len;
2774 
2775 	while (tmp_len > 0) {
2776 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2777 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2778 		desc = tx_q->dma_tx + tx_q->cur_tx;
2779 
2780 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2781 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2782 			    TSO_MAX_BUFF_SIZE : tmp_len;
2783 
2784 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2785 				0, 1,
2786 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2787 				0, 0);
2788 
2789 		tmp_len -= TSO_MAX_BUFF_SIZE;
2790 	}
2791 }
2792 
2793 /**
2794  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2795  *  @skb : the socket buffer
2796  *  @dev : device pointer
2797  *  Description: this is the transmit function that is called on TSO frames
2798  *  (support available on GMAC4 and newer chips).
2799  *  Diagram below show the ring programming in case of TSO frames:
2800  *
2801  *  First Descriptor
2802  *   --------
2803  *   | DES0 |---> buffer1 = L2/L3/L4 header
2804  *   | DES1 |---> TCP Payload (can continue on next descr...)
2805  *   | DES2 |---> buffer 1 and 2 len
2806  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2807  *   --------
2808  *	|
2809  *     ...
2810  *	|
2811  *   --------
2812  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2813  *   | DES1 | --|
2814  *   | DES2 | --> buffer 1 and 2 len
2815  *   | DES3 |
2816  *   --------
2817  *
2818  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2819  */
2820 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2821 {
2822 	struct dma_desc *desc, *first, *mss_desc = NULL;
2823 	struct stmmac_priv *priv = netdev_priv(dev);
2824 	int nfrags = skb_shinfo(skb)->nr_frags;
2825 	u32 queue = skb_get_queue_mapping(skb);
2826 	unsigned int first_entry, des;
2827 	struct stmmac_tx_queue *tx_q;
2828 	int tmp_pay_len = 0;
2829 	u32 pay_len, mss;
2830 	u8 proto_hdr_len;
2831 	int i;
2832 
2833 	tx_q = &priv->tx_queue[queue];
2834 
2835 	/* Compute header lengths */
2836 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2837 
2838 	/* Desc availability based on threshold should be enough safe */
2839 	if (unlikely(stmmac_tx_avail(priv, queue) <
2840 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2841 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2842 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2843 								queue));
2844 			/* This is a hard error, log it. */
2845 			netdev_err(priv->dev,
2846 				   "%s: Tx Ring full when queue awake\n",
2847 				   __func__);
2848 		}
2849 		return NETDEV_TX_BUSY;
2850 	}
2851 
2852 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2853 
2854 	mss = skb_shinfo(skb)->gso_size;
2855 
2856 	/* set new MSS value if needed */
2857 	if (mss != tx_q->mss) {
2858 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2859 		stmmac_set_mss(priv, mss_desc, mss);
2860 		tx_q->mss = mss;
2861 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2862 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2863 	}
2864 
2865 	if (netif_msg_tx_queued(priv)) {
2866 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2867 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2868 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2869 			skb->data_len);
2870 	}
2871 
2872 	first_entry = tx_q->cur_tx;
2873 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2874 
2875 	desc = tx_q->dma_tx + first_entry;
2876 	first = desc;
2877 
2878 	/* first descriptor: fill Headers on Buf1 */
2879 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2880 			     DMA_TO_DEVICE);
2881 	if (dma_mapping_error(priv->device, des))
2882 		goto dma_map_err;
2883 
2884 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2885 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2886 
2887 	first->des0 = cpu_to_le32(des);
2888 
2889 	/* Fill start of payload in buff2 of first descriptor */
2890 	if (pay_len)
2891 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2892 
2893 	/* If needed take extra descriptors to fill the remaining payload */
2894 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2895 
2896 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2897 
2898 	/* Prepare fragments */
2899 	for (i = 0; i < nfrags; i++) {
2900 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2901 
2902 		des = skb_frag_dma_map(priv->device, frag, 0,
2903 				       skb_frag_size(frag),
2904 				       DMA_TO_DEVICE);
2905 		if (dma_mapping_error(priv->device, des))
2906 			goto dma_map_err;
2907 
2908 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2909 				     (i == nfrags - 1), queue);
2910 
2911 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2912 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2913 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2914 	}
2915 
2916 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2917 
2918 	/* Only the last descriptor gets to point to the skb. */
2919 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2920 
2921 	/* We've used all descriptors we need for this skb, however,
2922 	 * advance cur_tx so that it references a fresh descriptor.
2923 	 * ndo_start_xmit will fill this descriptor the next time it's
2924 	 * called and stmmac_tx_clean may clean up to this descriptor.
2925 	 */
2926 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2927 
2928 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2929 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2930 			  __func__);
2931 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2932 	}
2933 
2934 	dev->stats.tx_bytes += skb->len;
2935 	priv->xstats.tx_tso_frames++;
2936 	priv->xstats.tx_tso_nfrags += nfrags;
2937 
2938 	/* Manage tx mitigation */
2939 	priv->tx_count_frames += nfrags + 1;
2940 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2941 		mod_timer(&priv->txtimer,
2942 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2943 	} else {
2944 		priv->tx_count_frames = 0;
2945 		stmmac_set_tx_ic(priv, desc);
2946 		priv->xstats.tx_set_ic_bit++;
2947 	}
2948 
2949 	skb_tx_timestamp(skb);
2950 
2951 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2952 		     priv->hwts_tx_en)) {
2953 		/* declare that device is doing timestamping */
2954 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2955 		stmmac_enable_tx_timestamp(priv, first);
2956 	}
2957 
2958 	/* Complete the first descriptor before granting the DMA */
2959 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2960 			proto_hdr_len,
2961 			pay_len,
2962 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2963 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2964 
2965 	/* If context desc is used to change MSS */
2966 	if (mss_desc) {
2967 		/* Make sure that first descriptor has been completely
2968 		 * written, including its own bit. This is because MSS is
2969 		 * actually before first descriptor, so we need to make
2970 		 * sure that MSS's own bit is the last thing written.
2971 		 */
2972 		dma_wmb();
2973 		stmmac_set_tx_owner(priv, mss_desc);
2974 	}
2975 
2976 	/* The own bit must be the latest setting done when prepare the
2977 	 * descriptor and then barrier is needed to make sure that
2978 	 * all is coherent before granting the DMA engine.
2979 	 */
2980 	wmb();
2981 
2982 	if (netif_msg_pktdata(priv)) {
2983 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2984 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2985 			tx_q->cur_tx, first, nfrags);
2986 
2987 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2988 
2989 		pr_info(">>> frame to be transmitted: ");
2990 		print_pkt(skb->data, skb_headlen(skb));
2991 	}
2992 
2993 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2994 
2995 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2996 
2997 	return NETDEV_TX_OK;
2998 
2999 dma_map_err:
3000 	dev_err(priv->device, "Tx dma map failed\n");
3001 	dev_kfree_skb(skb);
3002 	priv->dev->stats.tx_dropped++;
3003 	return NETDEV_TX_OK;
3004 }
3005 
3006 /**
3007  *  stmmac_xmit - Tx entry point of the driver
3008  *  @skb : the socket buffer
3009  *  @dev : device pointer
3010  *  Description : this is the tx entry point of the driver.
3011  *  It programs the chain or the ring and supports oversized frames
3012  *  and SG feature.
3013  */
3014 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3015 {
3016 	struct stmmac_priv *priv = netdev_priv(dev);
3017 	unsigned int nopaged_len = skb_headlen(skb);
3018 	int i, csum_insertion = 0, is_jumbo = 0;
3019 	u32 queue = skb_get_queue_mapping(skb);
3020 	int nfrags = skb_shinfo(skb)->nr_frags;
3021 	int entry;
3022 	unsigned int first_entry;
3023 	struct dma_desc *desc, *first;
3024 	struct stmmac_tx_queue *tx_q;
3025 	unsigned int enh_desc;
3026 	unsigned int des;
3027 
3028 	tx_q = &priv->tx_queue[queue];
3029 
3030 	/* Manage oversized TCP frames for GMAC4 device */
3031 	if (skb_is_gso(skb) && priv->tso) {
3032 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3033 			return stmmac_tso_xmit(skb, dev);
3034 	}
3035 
3036 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3037 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3038 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3039 								queue));
3040 			/* This is a hard error, log it. */
3041 			netdev_err(priv->dev,
3042 				   "%s: Tx Ring full when queue awake\n",
3043 				   __func__);
3044 		}
3045 		return NETDEV_TX_BUSY;
3046 	}
3047 
3048 	if (priv->tx_path_in_lpi_mode)
3049 		stmmac_disable_eee_mode(priv);
3050 
3051 	entry = tx_q->cur_tx;
3052 	first_entry = entry;
3053 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3054 
3055 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3056 
3057 	if (likely(priv->extend_desc))
3058 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3059 	else
3060 		desc = tx_q->dma_tx + entry;
3061 
3062 	first = desc;
3063 
3064 	enh_desc = priv->plat->enh_desc;
3065 	/* To program the descriptors according to the size of the frame */
3066 	if (enh_desc)
3067 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3068 
3069 	if (unlikely(is_jumbo)) {
3070 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3071 		if (unlikely(entry < 0) && (entry != -EINVAL))
3072 			goto dma_map_err;
3073 	}
3074 
3075 	for (i = 0; i < nfrags; i++) {
3076 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3077 		int len = skb_frag_size(frag);
3078 		bool last_segment = (i == (nfrags - 1));
3079 
3080 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3081 		WARN_ON(tx_q->tx_skbuff[entry]);
3082 
3083 		if (likely(priv->extend_desc))
3084 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3085 		else
3086 			desc = tx_q->dma_tx + entry;
3087 
3088 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3089 				       DMA_TO_DEVICE);
3090 		if (dma_mapping_error(priv->device, des))
3091 			goto dma_map_err; /* should reuse desc w/o issues */
3092 
3093 		tx_q->tx_skbuff_dma[entry].buf = des;
3094 
3095 		stmmac_set_desc_addr(priv, desc, des);
3096 
3097 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3098 		tx_q->tx_skbuff_dma[entry].len = len;
3099 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3100 
3101 		/* Prepare the descriptor and set the own bit too */
3102 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3103 				priv->mode, 1, last_segment, skb->len);
3104 	}
3105 
3106 	/* Only the last descriptor gets to point to the skb. */
3107 	tx_q->tx_skbuff[entry] = skb;
3108 
3109 	/* We've used all descriptors we need for this skb, however,
3110 	 * advance cur_tx so that it references a fresh descriptor.
3111 	 * ndo_start_xmit will fill this descriptor the next time it's
3112 	 * called and stmmac_tx_clean may clean up to this descriptor.
3113 	 */
3114 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3115 	tx_q->cur_tx = entry;
3116 
3117 	if (netif_msg_pktdata(priv)) {
3118 		void *tx_head;
3119 
3120 		netdev_dbg(priv->dev,
3121 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3122 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3123 			   entry, first, nfrags);
3124 
3125 		if (priv->extend_desc)
3126 			tx_head = (void *)tx_q->dma_etx;
3127 		else
3128 			tx_head = (void *)tx_q->dma_tx;
3129 
3130 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3131 
3132 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3133 		print_pkt(skb->data, skb->len);
3134 	}
3135 
3136 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3137 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3138 			  __func__);
3139 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3140 	}
3141 
3142 	dev->stats.tx_bytes += skb->len;
3143 
3144 	/* According to the coalesce parameter the IC bit for the latest
3145 	 * segment is reset and the timer re-started to clean the tx status.
3146 	 * This approach takes care about the fragments: desc is the first
3147 	 * element in case of no SG.
3148 	 */
3149 	priv->tx_count_frames += nfrags + 1;
3150 	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3151 	    !priv->tx_timer_armed) {
3152 		mod_timer(&priv->txtimer,
3153 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3154 		priv->tx_timer_armed = true;
3155 	} else {
3156 		priv->tx_count_frames = 0;
3157 		stmmac_set_tx_ic(priv, desc);
3158 		priv->xstats.tx_set_ic_bit++;
3159 		priv->tx_timer_armed = false;
3160 	}
3161 
3162 	skb_tx_timestamp(skb);
3163 
3164 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3165 	 * problems because all the descriptors are actually ready to be
3166 	 * passed to the DMA engine.
3167 	 */
3168 	if (likely(!is_jumbo)) {
3169 		bool last_segment = (nfrags == 0);
3170 
3171 		des = dma_map_single(priv->device, skb->data,
3172 				     nopaged_len, DMA_TO_DEVICE);
3173 		if (dma_mapping_error(priv->device, des))
3174 			goto dma_map_err;
3175 
3176 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3177 
3178 		stmmac_set_desc_addr(priv, first, des);
3179 
3180 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3181 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3182 
3183 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3184 			     priv->hwts_tx_en)) {
3185 			/* declare that device is doing timestamping */
3186 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3187 			stmmac_enable_tx_timestamp(priv, first);
3188 		}
3189 
3190 		/* Prepare the first descriptor setting the OWN bit too */
3191 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3192 				csum_insertion, priv->mode, 1, last_segment,
3193 				skb->len);
3194 
3195 		/* The own bit must be the latest setting done when prepare the
3196 		 * descriptor and then barrier is needed to make sure that
3197 		 * all is coherent before granting the DMA engine.
3198 		 */
3199 		wmb();
3200 	}
3201 
3202 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3203 
3204 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3205 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3206 
3207 	return NETDEV_TX_OK;
3208 
3209 dma_map_err:
3210 	netdev_err(priv->dev, "Tx DMA map failed\n");
3211 	dev_kfree_skb(skb);
3212 	priv->dev->stats.tx_dropped++;
3213 	return NETDEV_TX_OK;
3214 }
3215 
3216 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3217 {
3218 	struct vlan_ethhdr *veth;
3219 	__be16 vlan_proto;
3220 	u16 vlanid;
3221 
3222 	veth = (struct vlan_ethhdr *)skb->data;
3223 	vlan_proto = veth->h_vlan_proto;
3224 
3225 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3226 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3227 	    (vlan_proto == htons(ETH_P_8021AD) &&
3228 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3229 		/* pop the vlan tag */
3230 		vlanid = ntohs(veth->h_vlan_TCI);
3231 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3232 		skb_pull(skb, VLAN_HLEN);
3233 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3234 	}
3235 }
3236 
3237 
3238 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3239 {
3240 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3241 		return 0;
3242 
3243 	return 1;
3244 }
3245 
3246 /**
3247  * stmmac_rx_refill - refill used skb preallocated buffers
3248  * @priv: driver private structure
3249  * @queue: RX queue index
3250  * Description : this is to reallocate the skb for the reception process
3251  * that is based on zero-copy.
3252  */
3253 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3254 {
3255 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3256 	int dirty = stmmac_rx_dirty(priv, queue);
3257 	unsigned int entry = rx_q->dirty_rx;
3258 
3259 	int bfsize = priv->dma_buf_sz;
3260 
3261 	while (dirty-- > 0) {
3262 		struct dma_desc *p;
3263 
3264 		if (priv->extend_desc)
3265 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3266 		else
3267 			p = rx_q->dma_rx + entry;
3268 
3269 		if (likely(!rx_q->rx_skbuff[entry])) {
3270 			struct sk_buff *skb;
3271 
3272 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3273 			if (unlikely(!skb)) {
3274 				/* so for a while no zero-copy! */
3275 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3276 				if (unlikely(net_ratelimit()))
3277 					dev_err(priv->device,
3278 						"fail to alloc skb entry %d\n",
3279 						entry);
3280 				break;
3281 			}
3282 
3283 			rx_q->rx_skbuff[entry] = skb;
3284 			rx_q->rx_skbuff_dma[entry] =
3285 			    dma_map_single(priv->device, skb->data, bfsize,
3286 					   DMA_FROM_DEVICE);
3287 			if (dma_mapping_error(priv->device,
3288 					      rx_q->rx_skbuff_dma[entry])) {
3289 				netdev_err(priv->dev, "Rx DMA map failed\n");
3290 				dev_kfree_skb(skb);
3291 				break;
3292 			}
3293 
3294 			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3295 			stmmac_refill_desc3(priv, rx_q, p);
3296 
3297 			if (rx_q->rx_zeroc_thresh > 0)
3298 				rx_q->rx_zeroc_thresh--;
3299 
3300 			netif_dbg(priv, rx_status, priv->dev,
3301 				  "refill entry #%d\n", entry);
3302 		}
3303 		dma_wmb();
3304 
3305 		stmmac_set_rx_owner(priv, p, priv->use_riwt);
3306 
3307 		dma_wmb();
3308 
3309 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3310 	}
3311 	rx_q->dirty_rx = entry;
3312 }
3313 
3314 /**
3315  * stmmac_rx - manage the receive process
3316  * @priv: driver private structure
3317  * @limit: napi bugget
3318  * @queue: RX queue index.
3319  * Description :  this the function called by the napi poll method.
3320  * It gets all the frames inside the ring.
3321  */
3322 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3323 {
3324 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3325 	unsigned int entry = rx_q->cur_rx;
3326 	int coe = priv->hw->rx_csum;
3327 	unsigned int next_entry;
3328 	unsigned int count = 0;
3329 	bool xmac;
3330 
3331 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3332 
3333 	if (netif_msg_rx_status(priv)) {
3334 		void *rx_head;
3335 
3336 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3337 		if (priv->extend_desc)
3338 			rx_head = (void *)rx_q->dma_erx;
3339 		else
3340 			rx_head = (void *)rx_q->dma_rx;
3341 
3342 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3343 	}
3344 	while (count < limit) {
3345 		int status;
3346 		struct dma_desc *p;
3347 		struct dma_desc *np;
3348 
3349 		if (priv->extend_desc)
3350 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3351 		else
3352 			p = rx_q->dma_rx + entry;
3353 
3354 		/* read the status of the incoming frame */
3355 		status = stmmac_rx_status(priv, &priv->dev->stats,
3356 				&priv->xstats, p);
3357 		/* check if managed by the DMA otherwise go ahead */
3358 		if (unlikely(status & dma_own))
3359 			break;
3360 
3361 		count++;
3362 
3363 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3364 		next_entry = rx_q->cur_rx;
3365 
3366 		if (priv->extend_desc)
3367 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3368 		else
3369 			np = rx_q->dma_rx + next_entry;
3370 
3371 		prefetch(np);
3372 
3373 		if (priv->extend_desc)
3374 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3375 					&priv->xstats, rx_q->dma_erx + entry);
3376 		if (unlikely(status == discard_frame)) {
3377 			priv->dev->stats.rx_errors++;
3378 			if (priv->hwts_rx_en && !priv->extend_desc) {
3379 				/* DESC2 & DESC3 will be overwritten by device
3380 				 * with timestamp value, hence reinitialize
3381 				 * them in stmmac_rx_refill() function so that
3382 				 * device can reuse it.
3383 				 */
3384 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3385 				rx_q->rx_skbuff[entry] = NULL;
3386 				dma_unmap_single(priv->device,
3387 						 rx_q->rx_skbuff_dma[entry],
3388 						 priv->dma_buf_sz,
3389 						 DMA_FROM_DEVICE);
3390 			}
3391 		} else {
3392 			struct sk_buff *skb;
3393 			int frame_len;
3394 			unsigned int des;
3395 
3396 			stmmac_get_desc_addr(priv, p, &des);
3397 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3398 
3399 			/*  If frame length is greater than skb buffer size
3400 			 *  (preallocated during init) then the packet is
3401 			 *  ignored
3402 			 */
3403 			if (frame_len > priv->dma_buf_sz) {
3404 				netdev_err(priv->dev,
3405 					   "len %d larger than size (%d)\n",
3406 					   frame_len, priv->dma_buf_sz);
3407 				priv->dev->stats.rx_length_errors++;
3408 				break;
3409 			}
3410 
3411 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3412 			 * Type frames (LLC/LLC-SNAP)
3413 			 *
3414 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3415 			 * feature is always disabled and packets need to be
3416 			 * stripped manually.
3417 			 */
3418 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3419 			    unlikely(status != llc_snap))
3420 				frame_len -= ETH_FCS_LEN;
3421 
3422 			if (netif_msg_rx_status(priv)) {
3423 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3424 					   p, entry, des);
3425 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3426 					   frame_len, status);
3427 			}
3428 
3429 			/* The zero-copy is always used for all the sizes
3430 			 * in case of GMAC4 because it needs
3431 			 * to refill the used descriptors, always.
3432 			 */
3433 			if (unlikely(!xmac &&
3434 				     ((frame_len < priv->rx_copybreak) ||
3435 				     stmmac_rx_threshold_count(rx_q)))) {
3436 				skb = netdev_alloc_skb_ip_align(priv->dev,
3437 								frame_len);
3438 				if (unlikely(!skb)) {
3439 					if (net_ratelimit())
3440 						dev_warn(priv->device,
3441 							 "packet dropped\n");
3442 					priv->dev->stats.rx_dropped++;
3443 					break;
3444 				}
3445 
3446 				dma_sync_single_for_cpu(priv->device,
3447 							rx_q->rx_skbuff_dma
3448 							[entry], frame_len,
3449 							DMA_FROM_DEVICE);
3450 				skb_copy_to_linear_data(skb,
3451 							rx_q->
3452 							rx_skbuff[entry]->data,
3453 							frame_len);
3454 
3455 				skb_put(skb, frame_len);
3456 				dma_sync_single_for_device(priv->device,
3457 							   rx_q->rx_skbuff_dma
3458 							   [entry], frame_len,
3459 							   DMA_FROM_DEVICE);
3460 			} else {
3461 				skb = rx_q->rx_skbuff[entry];
3462 				if (unlikely(!skb)) {
3463 					netdev_err(priv->dev,
3464 						   "%s: Inconsistent Rx chain\n",
3465 						   priv->dev->name);
3466 					priv->dev->stats.rx_dropped++;
3467 					break;
3468 				}
3469 				prefetch(skb->data - NET_IP_ALIGN);
3470 				rx_q->rx_skbuff[entry] = NULL;
3471 				rx_q->rx_zeroc_thresh++;
3472 
3473 				skb_put(skb, frame_len);
3474 				dma_unmap_single(priv->device,
3475 						 rx_q->rx_skbuff_dma[entry],
3476 						 priv->dma_buf_sz,
3477 						 DMA_FROM_DEVICE);
3478 			}
3479 
3480 			if (netif_msg_pktdata(priv)) {
3481 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3482 					   frame_len);
3483 				print_pkt(skb->data, frame_len);
3484 			}
3485 
3486 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3487 
3488 			stmmac_rx_vlan(priv->dev, skb);
3489 
3490 			skb->protocol = eth_type_trans(skb, priv->dev);
3491 
3492 			if (unlikely(!coe))
3493 				skb_checksum_none_assert(skb);
3494 			else
3495 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3496 
3497 			napi_gro_receive(&rx_q->napi, skb);
3498 
3499 			priv->dev->stats.rx_packets++;
3500 			priv->dev->stats.rx_bytes += frame_len;
3501 		}
3502 		entry = next_entry;
3503 	}
3504 
3505 	stmmac_rx_refill(priv, queue);
3506 
3507 	priv->xstats.rx_pkt_n += count;
3508 
3509 	return count;
3510 }
3511 
3512 /**
3513  *  stmmac_poll - stmmac poll method (NAPI)
3514  *  @napi : pointer to the napi structure.
3515  *  @budget : maximum number of packets that the current CPU can receive from
3516  *	      all interfaces.
3517  *  Description :
3518  *  To look at the incoming frames and clear the tx resources.
3519  */
3520 static int stmmac_poll(struct napi_struct *napi, int budget)
3521 {
3522 	struct stmmac_rx_queue *rx_q =
3523 		container_of(napi, struct stmmac_rx_queue, napi);
3524 	struct stmmac_priv *priv = rx_q->priv_data;
3525 	u32 tx_count = priv->plat->tx_queues_to_use;
3526 	u32 chan = rx_q->queue_index;
3527 	int work_done = 0;
3528 	u32 queue;
3529 
3530 	priv->xstats.napi_poll++;
3531 
3532 	/* check all the queues */
3533 	for (queue = 0; queue < tx_count; queue++)
3534 		stmmac_tx_clean(priv, queue);
3535 
3536 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3537 	if (work_done < budget) {
3538 		napi_complete_done(napi, work_done);
3539 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3540 	}
3541 	return work_done;
3542 }
3543 
3544 /**
3545  *  stmmac_tx_timeout
3546  *  @dev : Pointer to net device structure
3547  *  Description: this function is called when a packet transmission fails to
3548  *   complete within a reasonable time. The driver will mark the error in the
3549  *   netdev structure and arrange for the device to be reset to a sane state
3550  *   in order to transmit a new packet.
3551  */
3552 static void stmmac_tx_timeout(struct net_device *dev)
3553 {
3554 	struct stmmac_priv *priv = netdev_priv(dev);
3555 
3556 	stmmac_global_err(priv);
3557 }
3558 
3559 /**
3560  *  stmmac_set_rx_mode - entry point for multicast addressing
3561  *  @dev : pointer to the device structure
3562  *  Description:
3563  *  This function is a driver entry point which gets called by the kernel
3564  *  whenever multicast addresses must be enabled/disabled.
3565  *  Return value:
3566  *  void.
3567  */
3568 static void stmmac_set_rx_mode(struct net_device *dev)
3569 {
3570 	struct stmmac_priv *priv = netdev_priv(dev);
3571 
3572 	stmmac_set_filter(priv, priv->hw, dev);
3573 }
3574 
3575 /**
3576  *  stmmac_change_mtu - entry point to change MTU size for the device.
3577  *  @dev : device pointer.
3578  *  @new_mtu : the new MTU size for the device.
3579  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3580  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3581  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3582  *  Return value:
3583  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3584  *  file on failure.
3585  */
3586 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3587 {
3588 	struct stmmac_priv *priv = netdev_priv(dev);
3589 
3590 	if (netif_running(dev)) {
3591 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3592 		return -EBUSY;
3593 	}
3594 
3595 	dev->mtu = new_mtu;
3596 
3597 	netdev_update_features(dev);
3598 
3599 	return 0;
3600 }
3601 
3602 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3603 					     netdev_features_t features)
3604 {
3605 	struct stmmac_priv *priv = netdev_priv(dev);
3606 
3607 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3608 		features &= ~NETIF_F_RXCSUM;
3609 
3610 	if (!priv->plat->tx_coe)
3611 		features &= ~NETIF_F_CSUM_MASK;
3612 
3613 	/* Some GMAC devices have a bugged Jumbo frame support that
3614 	 * needs to have the Tx COE disabled for oversized frames
3615 	 * (due to limited buffer sizes). In this case we disable
3616 	 * the TX csum insertion in the TDES and not use SF.
3617 	 */
3618 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3619 		features &= ~NETIF_F_CSUM_MASK;
3620 
3621 	/* Disable tso if asked by ethtool */
3622 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3623 		if (features & NETIF_F_TSO)
3624 			priv->tso = true;
3625 		else
3626 			priv->tso = false;
3627 	}
3628 
3629 	return features;
3630 }
3631 
3632 static int stmmac_set_features(struct net_device *netdev,
3633 			       netdev_features_t features)
3634 {
3635 	struct stmmac_priv *priv = netdev_priv(netdev);
3636 
3637 	/* Keep the COE Type in case of csum is supporting */
3638 	if (features & NETIF_F_RXCSUM)
3639 		priv->hw->rx_csum = priv->plat->rx_coe;
3640 	else
3641 		priv->hw->rx_csum = 0;
3642 	/* No check needed because rx_coe has been set before and it will be
3643 	 * fixed in case of issue.
3644 	 */
3645 	stmmac_rx_ipc(priv, priv->hw);
3646 
3647 	return 0;
3648 }
3649 
3650 /**
3651  *  stmmac_interrupt - main ISR
3652  *  @irq: interrupt number.
3653  *  @dev_id: to pass the net device pointer.
3654  *  Description: this is the main driver interrupt service routine.
3655  *  It can call:
3656  *  o DMA service routine (to manage incoming frame reception and transmission
3657  *    status)
3658  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3659  *    interrupts.
3660  */
3661 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3662 {
3663 	struct net_device *dev = (struct net_device *)dev_id;
3664 	struct stmmac_priv *priv = netdev_priv(dev);
3665 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3666 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3667 	u32 queues_count;
3668 	u32 queue;
3669 	bool xmac;
3670 
3671 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3672 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3673 
3674 	if (priv->irq_wake)
3675 		pm_wakeup_event(priv->device, 0);
3676 
3677 	if (unlikely(!dev)) {
3678 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3679 		return IRQ_NONE;
3680 	}
3681 
3682 	/* Check if adapter is up */
3683 	if (test_bit(STMMAC_DOWN, &priv->state))
3684 		return IRQ_HANDLED;
3685 	/* Check if a fatal error happened */
3686 	if (stmmac_safety_feat_interrupt(priv))
3687 		return IRQ_HANDLED;
3688 
3689 	/* To handle GMAC own interrupts */
3690 	if ((priv->plat->has_gmac) || xmac) {
3691 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3692 		int mtl_status;
3693 
3694 		if (unlikely(status)) {
3695 			/* For LPI we need to save the tx status */
3696 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3697 				priv->tx_path_in_lpi_mode = true;
3698 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3699 				priv->tx_path_in_lpi_mode = false;
3700 		}
3701 
3702 		for (queue = 0; queue < queues_count; queue++) {
3703 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3704 
3705 			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3706 								queue);
3707 			if (mtl_status != -EINVAL)
3708 				status |= mtl_status;
3709 
3710 			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3711 				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3712 						       rx_q->rx_tail_addr,
3713 						       queue);
3714 		}
3715 
3716 		/* PCS link status */
3717 		if (priv->hw->pcs) {
3718 			if (priv->xstats.pcs_link)
3719 				netif_carrier_on(dev);
3720 			else
3721 				netif_carrier_off(dev);
3722 		}
3723 	}
3724 
3725 	/* To handle DMA interrupts */
3726 	stmmac_dma_interrupt(priv);
3727 
3728 	return IRQ_HANDLED;
3729 }
3730 
3731 #ifdef CONFIG_NET_POLL_CONTROLLER
3732 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3733  * to allow network I/O with interrupts disabled.
3734  */
3735 static void stmmac_poll_controller(struct net_device *dev)
3736 {
3737 	disable_irq(dev->irq);
3738 	stmmac_interrupt(dev->irq, dev);
3739 	enable_irq(dev->irq);
3740 }
3741 #endif
3742 
3743 /**
3744  *  stmmac_ioctl - Entry point for the Ioctl
3745  *  @dev: Device pointer.
3746  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3747  *  a proprietary structure used to pass information to the driver.
3748  *  @cmd: IOCTL command
3749  *  Description:
3750  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3751  */
3752 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3753 {
3754 	int ret = -EOPNOTSUPP;
3755 
3756 	if (!netif_running(dev))
3757 		return -EINVAL;
3758 
3759 	switch (cmd) {
3760 	case SIOCGMIIPHY:
3761 	case SIOCGMIIREG:
3762 	case SIOCSMIIREG:
3763 		if (!dev->phydev)
3764 			return -EINVAL;
3765 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3766 		break;
3767 	case SIOCSHWTSTAMP:
3768 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3769 		break;
3770 	default:
3771 		break;
3772 	}
3773 
3774 	return ret;
3775 }
3776 
3777 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3778 				    void *cb_priv)
3779 {
3780 	struct stmmac_priv *priv = cb_priv;
3781 	int ret = -EOPNOTSUPP;
3782 
3783 	stmmac_disable_all_queues(priv);
3784 
3785 	switch (type) {
3786 	case TC_SETUP_CLSU32:
3787 		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3788 			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3789 		break;
3790 	default:
3791 		break;
3792 	}
3793 
3794 	stmmac_enable_all_queues(priv);
3795 	return ret;
3796 }
3797 
3798 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3799 				 struct tc_block_offload *f)
3800 {
3801 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3802 		return -EOPNOTSUPP;
3803 
3804 	switch (f->command) {
3805 	case TC_BLOCK_BIND:
3806 		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3807 				priv, priv, f->extack);
3808 	case TC_BLOCK_UNBIND:
3809 		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3810 		return 0;
3811 	default:
3812 		return -EOPNOTSUPP;
3813 	}
3814 }
3815 
3816 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3817 			   void *type_data)
3818 {
3819 	struct stmmac_priv *priv = netdev_priv(ndev);
3820 
3821 	switch (type) {
3822 	case TC_SETUP_BLOCK:
3823 		return stmmac_setup_tc_block(priv, type_data);
3824 	case TC_SETUP_QDISC_CBS:
3825 		return stmmac_tc_setup_cbs(priv, priv, type_data);
3826 	default:
3827 		return -EOPNOTSUPP;
3828 	}
3829 }
3830 
3831 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3832 {
3833 	struct stmmac_priv *priv = netdev_priv(ndev);
3834 	int ret = 0;
3835 
3836 	ret = eth_mac_addr(ndev, addr);
3837 	if (ret)
3838 		return ret;
3839 
3840 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3841 
3842 	return ret;
3843 }
3844 
3845 #ifdef CONFIG_DEBUG_FS
3846 static struct dentry *stmmac_fs_dir;
3847 
3848 static void sysfs_display_ring(void *head, int size, int extend_desc,
3849 			       struct seq_file *seq)
3850 {
3851 	int i;
3852 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3853 	struct dma_desc *p = (struct dma_desc *)head;
3854 
3855 	for (i = 0; i < size; i++) {
3856 		if (extend_desc) {
3857 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3858 				   i, (unsigned int)virt_to_phys(ep),
3859 				   le32_to_cpu(ep->basic.des0),
3860 				   le32_to_cpu(ep->basic.des1),
3861 				   le32_to_cpu(ep->basic.des2),
3862 				   le32_to_cpu(ep->basic.des3));
3863 			ep++;
3864 		} else {
3865 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3866 				   i, (unsigned int)virt_to_phys(p),
3867 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3868 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3869 			p++;
3870 		}
3871 		seq_printf(seq, "\n");
3872 	}
3873 }
3874 
3875 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3876 {
3877 	struct net_device *dev = seq->private;
3878 	struct stmmac_priv *priv = netdev_priv(dev);
3879 	u32 rx_count = priv->plat->rx_queues_to_use;
3880 	u32 tx_count = priv->plat->tx_queues_to_use;
3881 	u32 queue;
3882 
3883 	for (queue = 0; queue < rx_count; queue++) {
3884 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3885 
3886 		seq_printf(seq, "RX Queue %d:\n", queue);
3887 
3888 		if (priv->extend_desc) {
3889 			seq_printf(seq, "Extended descriptor ring:\n");
3890 			sysfs_display_ring((void *)rx_q->dma_erx,
3891 					   DMA_RX_SIZE, 1, seq);
3892 		} else {
3893 			seq_printf(seq, "Descriptor ring:\n");
3894 			sysfs_display_ring((void *)rx_q->dma_rx,
3895 					   DMA_RX_SIZE, 0, seq);
3896 		}
3897 	}
3898 
3899 	for (queue = 0; queue < tx_count; queue++) {
3900 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3901 
3902 		seq_printf(seq, "TX Queue %d:\n", queue);
3903 
3904 		if (priv->extend_desc) {
3905 			seq_printf(seq, "Extended descriptor ring:\n");
3906 			sysfs_display_ring((void *)tx_q->dma_etx,
3907 					   DMA_TX_SIZE, 1, seq);
3908 		} else {
3909 			seq_printf(seq, "Descriptor ring:\n");
3910 			sysfs_display_ring((void *)tx_q->dma_tx,
3911 					   DMA_TX_SIZE, 0, seq);
3912 		}
3913 	}
3914 
3915 	return 0;
3916 }
3917 
3918 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3919 {
3920 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3921 }
3922 
3923 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3924 
3925 static const struct file_operations stmmac_rings_status_fops = {
3926 	.owner = THIS_MODULE,
3927 	.open = stmmac_sysfs_ring_open,
3928 	.read = seq_read,
3929 	.llseek = seq_lseek,
3930 	.release = single_release,
3931 };
3932 
3933 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3934 {
3935 	struct net_device *dev = seq->private;
3936 	struct stmmac_priv *priv = netdev_priv(dev);
3937 
3938 	if (!priv->hw_cap_support) {
3939 		seq_printf(seq, "DMA HW features not supported\n");
3940 		return 0;
3941 	}
3942 
3943 	seq_printf(seq, "==============================\n");
3944 	seq_printf(seq, "\tDMA HW features\n");
3945 	seq_printf(seq, "==============================\n");
3946 
3947 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3948 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3949 	seq_printf(seq, "\t1000 Mbps: %s\n",
3950 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3951 	seq_printf(seq, "\tHalf duplex: %s\n",
3952 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3953 	seq_printf(seq, "\tHash Filter: %s\n",
3954 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3955 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3956 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3957 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3958 		   (priv->dma_cap.pcs) ? "Y" : "N");
3959 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3960 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3961 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3962 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3963 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3964 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3965 	seq_printf(seq, "\tRMON module: %s\n",
3966 		   (priv->dma_cap.rmon) ? "Y" : "N");
3967 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3968 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3969 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3970 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3971 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3972 		   (priv->dma_cap.eee) ? "Y" : "N");
3973 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3974 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3975 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3976 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3977 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3978 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3979 	} else {
3980 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3981 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3982 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3983 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3984 	}
3985 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3986 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3987 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3988 		   priv->dma_cap.number_rx_channel);
3989 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3990 		   priv->dma_cap.number_tx_channel);
3991 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3992 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3993 
3994 	return 0;
3995 }
3996 
3997 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3998 {
3999 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4000 }
4001 
4002 static const struct file_operations stmmac_dma_cap_fops = {
4003 	.owner = THIS_MODULE,
4004 	.open = stmmac_sysfs_dma_cap_open,
4005 	.read = seq_read,
4006 	.llseek = seq_lseek,
4007 	.release = single_release,
4008 };
4009 
4010 static int stmmac_init_fs(struct net_device *dev)
4011 {
4012 	struct stmmac_priv *priv = netdev_priv(dev);
4013 
4014 	/* Create per netdev entries */
4015 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4016 
4017 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4018 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4019 
4020 		return -ENOMEM;
4021 	}
4022 
4023 	/* Entry to report DMA RX/TX rings */
4024 	priv->dbgfs_rings_status =
4025 		debugfs_create_file("descriptors_status", 0444,
4026 				    priv->dbgfs_dir, dev,
4027 				    &stmmac_rings_status_fops);
4028 
4029 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4030 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4031 		debugfs_remove_recursive(priv->dbgfs_dir);
4032 
4033 		return -ENOMEM;
4034 	}
4035 
4036 	/* Entry to report the DMA HW features */
4037 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4038 						  priv->dbgfs_dir,
4039 						  dev, &stmmac_dma_cap_fops);
4040 
4041 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4042 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4043 		debugfs_remove_recursive(priv->dbgfs_dir);
4044 
4045 		return -ENOMEM;
4046 	}
4047 
4048 	return 0;
4049 }
4050 
4051 static void stmmac_exit_fs(struct net_device *dev)
4052 {
4053 	struct stmmac_priv *priv = netdev_priv(dev);
4054 
4055 	debugfs_remove_recursive(priv->dbgfs_dir);
4056 }
4057 #endif /* CONFIG_DEBUG_FS */
4058 
4059 static const struct net_device_ops stmmac_netdev_ops = {
4060 	.ndo_open = stmmac_open,
4061 	.ndo_start_xmit = stmmac_xmit,
4062 	.ndo_stop = stmmac_release,
4063 	.ndo_change_mtu = stmmac_change_mtu,
4064 	.ndo_fix_features = stmmac_fix_features,
4065 	.ndo_set_features = stmmac_set_features,
4066 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4067 	.ndo_tx_timeout = stmmac_tx_timeout,
4068 	.ndo_do_ioctl = stmmac_ioctl,
4069 	.ndo_setup_tc = stmmac_setup_tc,
4070 #ifdef CONFIG_NET_POLL_CONTROLLER
4071 	.ndo_poll_controller = stmmac_poll_controller,
4072 #endif
4073 	.ndo_set_mac_address = stmmac_set_mac_address,
4074 };
4075 
4076 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4077 {
4078 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4079 		return;
4080 	if (test_bit(STMMAC_DOWN, &priv->state))
4081 		return;
4082 
4083 	netdev_err(priv->dev, "Reset adapter.\n");
4084 
4085 	rtnl_lock();
4086 	netif_trans_update(priv->dev);
4087 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4088 		usleep_range(1000, 2000);
4089 
4090 	set_bit(STMMAC_DOWN, &priv->state);
4091 	dev_close(priv->dev);
4092 	dev_open(priv->dev);
4093 	clear_bit(STMMAC_DOWN, &priv->state);
4094 	clear_bit(STMMAC_RESETING, &priv->state);
4095 	rtnl_unlock();
4096 }
4097 
4098 static void stmmac_service_task(struct work_struct *work)
4099 {
4100 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4101 			service_task);
4102 
4103 	stmmac_reset_subtask(priv);
4104 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4105 }
4106 
4107 /**
4108  *  stmmac_hw_init - Init the MAC device
4109  *  @priv: driver private structure
4110  *  Description: this function is to configure the MAC device according to
4111  *  some platform parameters or the HW capability register. It prepares the
4112  *  driver to use either ring or chain modes and to setup either enhanced or
4113  *  normal descriptors.
4114  */
4115 static int stmmac_hw_init(struct stmmac_priv *priv)
4116 {
4117 	int ret;
4118 
4119 	/* dwmac-sun8i only work in chain mode */
4120 	if (priv->plat->has_sun8i)
4121 		chain_mode = 1;
4122 	priv->chain_mode = chain_mode;
4123 
4124 	/* Initialize HW Interface */
4125 	ret = stmmac_hwif_init(priv);
4126 	if (ret)
4127 		return ret;
4128 
4129 	/* Get the HW capability (new GMAC newer than 3.50a) */
4130 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4131 	if (priv->hw_cap_support) {
4132 		dev_info(priv->device, "DMA HW capability register supported\n");
4133 
4134 		/* We can override some gmac/dma configuration fields: e.g.
4135 		 * enh_desc, tx_coe (e.g. that are passed through the
4136 		 * platform) with the values from the HW capability
4137 		 * register (if supported).
4138 		 */
4139 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4140 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4141 		priv->hw->pmt = priv->plat->pmt;
4142 
4143 		/* TXCOE doesn't work in thresh DMA mode */
4144 		if (priv->plat->force_thresh_dma_mode)
4145 			priv->plat->tx_coe = 0;
4146 		else
4147 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4148 
4149 		/* In case of GMAC4 rx_coe is from HW cap register. */
4150 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4151 
4152 		if (priv->dma_cap.rx_coe_type2)
4153 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4154 		else if (priv->dma_cap.rx_coe_type1)
4155 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4156 
4157 	} else {
4158 		dev_info(priv->device, "No HW DMA feature register supported\n");
4159 	}
4160 
4161 	if (priv->plat->rx_coe) {
4162 		priv->hw->rx_csum = priv->plat->rx_coe;
4163 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4164 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4165 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4166 	}
4167 	if (priv->plat->tx_coe)
4168 		dev_info(priv->device, "TX Checksum insertion supported\n");
4169 
4170 	if (priv->plat->pmt) {
4171 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4172 		device_set_wakeup_capable(priv->device, 1);
4173 	}
4174 
4175 	if (priv->dma_cap.tsoen)
4176 		dev_info(priv->device, "TSO supported\n");
4177 
4178 	/* Run HW quirks, if any */
4179 	if (priv->hwif_quirks) {
4180 		ret = priv->hwif_quirks(priv);
4181 		if (ret)
4182 			return ret;
4183 	}
4184 
4185 	return 0;
4186 }
4187 
4188 /**
4189  * stmmac_dvr_probe
4190  * @device: device pointer
4191  * @plat_dat: platform data pointer
4192  * @res: stmmac resource pointer
4193  * Description: this is the main probe function used to
4194  * call the alloc_etherdev, allocate the priv structure.
4195  * Return:
4196  * returns 0 on success, otherwise errno.
4197  */
4198 int stmmac_dvr_probe(struct device *device,
4199 		     struct plat_stmmacenet_data *plat_dat,
4200 		     struct stmmac_resources *res)
4201 {
4202 	struct net_device *ndev = NULL;
4203 	struct stmmac_priv *priv;
4204 	int ret = 0;
4205 	u32 queue;
4206 
4207 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4208 				  MTL_MAX_TX_QUEUES,
4209 				  MTL_MAX_RX_QUEUES);
4210 	if (!ndev)
4211 		return -ENOMEM;
4212 
4213 	SET_NETDEV_DEV(ndev, device);
4214 
4215 	priv = netdev_priv(ndev);
4216 	priv->device = device;
4217 	priv->dev = ndev;
4218 
4219 	stmmac_set_ethtool_ops(ndev);
4220 	priv->pause = pause;
4221 	priv->plat = plat_dat;
4222 	priv->ioaddr = res->addr;
4223 	priv->dev->base_addr = (unsigned long)res->addr;
4224 
4225 	priv->dev->irq = res->irq;
4226 	priv->wol_irq = res->wol_irq;
4227 	priv->lpi_irq = res->lpi_irq;
4228 
4229 	if (res->mac)
4230 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4231 
4232 	dev_set_drvdata(device, priv->dev);
4233 
4234 	/* Verify driver arguments */
4235 	stmmac_verify_args();
4236 
4237 	/* Allocate workqueue */
4238 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4239 	if (!priv->wq) {
4240 		dev_err(priv->device, "failed to create workqueue\n");
4241 		goto error_wq;
4242 	}
4243 
4244 	INIT_WORK(&priv->service_task, stmmac_service_task);
4245 
4246 	/* Override with kernel parameters if supplied XXX CRS XXX
4247 	 * this needs to have multiple instances
4248 	 */
4249 	if ((phyaddr >= 0) && (phyaddr <= 31))
4250 		priv->plat->phy_addr = phyaddr;
4251 
4252 	if (priv->plat->stmmac_rst) {
4253 		ret = reset_control_assert(priv->plat->stmmac_rst);
4254 		reset_control_deassert(priv->plat->stmmac_rst);
4255 		/* Some reset controllers have only reset callback instead of
4256 		 * assert + deassert callbacks pair.
4257 		 */
4258 		if (ret == -ENOTSUPP)
4259 			reset_control_reset(priv->plat->stmmac_rst);
4260 	}
4261 
4262 	/* Init MAC and get the capabilities */
4263 	ret = stmmac_hw_init(priv);
4264 	if (ret)
4265 		goto error_hw_init;
4266 
4267 	/* Configure real RX and TX queues */
4268 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4269 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4270 
4271 	ndev->netdev_ops = &stmmac_netdev_ops;
4272 
4273 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4274 			    NETIF_F_RXCSUM;
4275 
4276 	ret = stmmac_tc_init(priv, priv);
4277 	if (!ret) {
4278 		ndev->hw_features |= NETIF_F_HW_TC;
4279 	}
4280 
4281 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4282 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4283 		priv->tso = true;
4284 		dev_info(priv->device, "TSO feature enabled\n");
4285 	}
4286 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4287 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4288 #ifdef STMMAC_VLAN_TAG_USED
4289 	/* Both mac100 and gmac support receive VLAN tag detection */
4290 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4291 #endif
4292 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4293 
4294 	/* MTU range: 46 - hw-specific max */
4295 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4296 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4297 		ndev->max_mtu = JUMBO_LEN;
4298 	else if (priv->plat->has_xgmac)
4299 		ndev->max_mtu = XGMAC_JUMBO_LEN;
4300 	else
4301 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4302 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4303 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4304 	 */
4305 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4306 	    (priv->plat->maxmtu >= ndev->min_mtu))
4307 		ndev->max_mtu = priv->plat->maxmtu;
4308 	else if (priv->plat->maxmtu < ndev->min_mtu)
4309 		dev_warn(priv->device,
4310 			 "%s: warning: maxmtu having invalid value (%d)\n",
4311 			 __func__, priv->plat->maxmtu);
4312 
4313 	if (flow_ctrl)
4314 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4315 
4316 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4317 	 * In some case, for example on bugged HW this feature
4318 	 * has to be disable and this can be done by passing the
4319 	 * riwt_off field from the platform.
4320 	 */
4321 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4322 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4323 		priv->use_riwt = 1;
4324 		dev_info(priv->device,
4325 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4326 	}
4327 
4328 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4329 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4330 
4331 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4332 			       (8 * priv->plat->rx_queues_to_use));
4333 	}
4334 
4335 	mutex_init(&priv->lock);
4336 
4337 	/* If a specific clk_csr value is passed from the platform
4338 	 * this means that the CSR Clock Range selection cannot be
4339 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4340 	 * set the MDC clock dynamically according to the csr actual
4341 	 * clock input.
4342 	 */
4343 	if (!priv->plat->clk_csr)
4344 		stmmac_clk_csr_set(priv);
4345 	else
4346 		priv->clk_csr = priv->plat->clk_csr;
4347 
4348 	stmmac_check_pcs_mode(priv);
4349 
4350 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4351 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4352 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4353 		/* MDIO bus Registration */
4354 		ret = stmmac_mdio_register(ndev);
4355 		if (ret < 0) {
4356 			dev_err(priv->device,
4357 				"%s: MDIO bus (id: %d) registration failed",
4358 				__func__, priv->plat->bus_id);
4359 			goto error_mdio_register;
4360 		}
4361 	}
4362 
4363 	ret = register_netdev(ndev);
4364 	if (ret) {
4365 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4366 			__func__, ret);
4367 		goto error_netdev_register;
4368 	}
4369 
4370 	return ret;
4371 
4372 error_netdev_register:
4373 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4374 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4375 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4376 		stmmac_mdio_unregister(ndev);
4377 error_mdio_register:
4378 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4379 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4380 
4381 		netif_napi_del(&rx_q->napi);
4382 	}
4383 error_hw_init:
4384 	destroy_workqueue(priv->wq);
4385 error_wq:
4386 	free_netdev(ndev);
4387 
4388 	return ret;
4389 }
4390 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4391 
4392 /**
4393  * stmmac_dvr_remove
4394  * @dev: device pointer
4395  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4396  * changes the link status, releases the DMA descriptor rings.
4397  */
4398 int stmmac_dvr_remove(struct device *dev)
4399 {
4400 	struct net_device *ndev = dev_get_drvdata(dev);
4401 	struct stmmac_priv *priv = netdev_priv(ndev);
4402 
4403 	netdev_info(priv->dev, "%s: removing driver", __func__);
4404 
4405 	stmmac_stop_all_dma(priv);
4406 
4407 	stmmac_mac_set(priv, priv->ioaddr, false);
4408 	netif_carrier_off(ndev);
4409 	unregister_netdev(ndev);
4410 	if (priv->plat->stmmac_rst)
4411 		reset_control_assert(priv->plat->stmmac_rst);
4412 	clk_disable_unprepare(priv->plat->pclk);
4413 	clk_disable_unprepare(priv->plat->stmmac_clk);
4414 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4415 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4416 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4417 		stmmac_mdio_unregister(ndev);
4418 	destroy_workqueue(priv->wq);
4419 	mutex_destroy(&priv->lock);
4420 	free_netdev(ndev);
4421 
4422 	return 0;
4423 }
4424 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4425 
4426 /**
4427  * stmmac_suspend - suspend callback
4428  * @dev: device pointer
4429  * Description: this is the function to suspend the device and it is called
4430  * by the platform driver to stop the network queue, release the resources,
4431  * program the PMT register (for WoL), clean and release driver resources.
4432  */
4433 int stmmac_suspend(struct device *dev)
4434 {
4435 	struct net_device *ndev = dev_get_drvdata(dev);
4436 	struct stmmac_priv *priv = netdev_priv(ndev);
4437 
4438 	if (!ndev || !netif_running(ndev))
4439 		return 0;
4440 
4441 	if (ndev->phydev)
4442 		phy_stop(ndev->phydev);
4443 
4444 	mutex_lock(&priv->lock);
4445 
4446 	netif_device_detach(ndev);
4447 	stmmac_stop_all_queues(priv);
4448 
4449 	stmmac_disable_all_queues(priv);
4450 
4451 	/* Stop TX/RX DMA */
4452 	stmmac_stop_all_dma(priv);
4453 
4454 	/* Enable Power down mode by programming the PMT regs */
4455 	if (device_may_wakeup(priv->device)) {
4456 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4457 		priv->irq_wake = 1;
4458 	} else {
4459 		stmmac_mac_set(priv, priv->ioaddr, false);
4460 		pinctrl_pm_select_sleep_state(priv->device);
4461 		/* Disable clock in case of PWM is off */
4462 		clk_disable(priv->plat->pclk);
4463 		clk_disable(priv->plat->stmmac_clk);
4464 	}
4465 	mutex_unlock(&priv->lock);
4466 
4467 	priv->oldlink = false;
4468 	priv->speed = SPEED_UNKNOWN;
4469 	priv->oldduplex = DUPLEX_UNKNOWN;
4470 	return 0;
4471 }
4472 EXPORT_SYMBOL_GPL(stmmac_suspend);
4473 
4474 /**
4475  * stmmac_reset_queues_param - reset queue parameters
4476  * @dev: device pointer
4477  */
4478 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4479 {
4480 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4481 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4482 	u32 queue;
4483 
4484 	for (queue = 0; queue < rx_cnt; queue++) {
4485 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4486 
4487 		rx_q->cur_rx = 0;
4488 		rx_q->dirty_rx = 0;
4489 	}
4490 
4491 	for (queue = 0; queue < tx_cnt; queue++) {
4492 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4493 
4494 		tx_q->cur_tx = 0;
4495 		tx_q->dirty_tx = 0;
4496 		tx_q->mss = 0;
4497 	}
4498 }
4499 
4500 /**
4501  * stmmac_resume - resume callback
4502  * @dev: device pointer
4503  * Description: when resume this function is invoked to setup the DMA and CORE
4504  * in a usable state.
4505  */
4506 int stmmac_resume(struct device *dev)
4507 {
4508 	struct net_device *ndev = dev_get_drvdata(dev);
4509 	struct stmmac_priv *priv = netdev_priv(ndev);
4510 
4511 	if (!netif_running(ndev))
4512 		return 0;
4513 
4514 	/* Power Down bit, into the PM register, is cleared
4515 	 * automatically as soon as a magic packet or a Wake-up frame
4516 	 * is received. Anyway, it's better to manually clear
4517 	 * this bit because it can generate problems while resuming
4518 	 * from another devices (e.g. serial console).
4519 	 */
4520 	if (device_may_wakeup(priv->device)) {
4521 		mutex_lock(&priv->lock);
4522 		stmmac_pmt(priv, priv->hw, 0);
4523 		mutex_unlock(&priv->lock);
4524 		priv->irq_wake = 0;
4525 	} else {
4526 		pinctrl_pm_select_default_state(priv->device);
4527 		/* enable the clk previously disabled */
4528 		clk_enable(priv->plat->stmmac_clk);
4529 		clk_enable(priv->plat->pclk);
4530 		/* reset the phy so that it's ready */
4531 		if (priv->mii)
4532 			stmmac_mdio_reset(priv->mii);
4533 	}
4534 
4535 	netif_device_attach(ndev);
4536 
4537 	mutex_lock(&priv->lock);
4538 
4539 	stmmac_reset_queues_param(priv);
4540 
4541 	stmmac_clear_descriptors(priv);
4542 
4543 	stmmac_hw_setup(ndev, false);
4544 	stmmac_init_tx_coalesce(priv);
4545 	stmmac_set_rx_mode(ndev);
4546 
4547 	stmmac_enable_all_queues(priv);
4548 
4549 	stmmac_start_all_queues(priv);
4550 
4551 	mutex_unlock(&priv->lock);
4552 
4553 	if (ndev->phydev)
4554 		phy_start(ndev->phydev);
4555 
4556 	return 0;
4557 }
4558 EXPORT_SYMBOL_GPL(stmmac_resume);
4559 
4560 #ifndef MODULE
4561 static int __init stmmac_cmdline_opt(char *str)
4562 {
4563 	char *opt;
4564 
4565 	if (!str || !*str)
4566 		return -EINVAL;
4567 	while ((opt = strsep(&str, ",")) != NULL) {
4568 		if (!strncmp(opt, "debug:", 6)) {
4569 			if (kstrtoint(opt + 6, 0, &debug))
4570 				goto err;
4571 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4572 			if (kstrtoint(opt + 8, 0, &phyaddr))
4573 				goto err;
4574 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4575 			if (kstrtoint(opt + 7, 0, &buf_sz))
4576 				goto err;
4577 		} else if (!strncmp(opt, "tc:", 3)) {
4578 			if (kstrtoint(opt + 3, 0, &tc))
4579 				goto err;
4580 		} else if (!strncmp(opt, "watchdog:", 9)) {
4581 			if (kstrtoint(opt + 9, 0, &watchdog))
4582 				goto err;
4583 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4584 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4585 				goto err;
4586 		} else if (!strncmp(opt, "pause:", 6)) {
4587 			if (kstrtoint(opt + 6, 0, &pause))
4588 				goto err;
4589 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4590 			if (kstrtoint(opt + 10, 0, &eee_timer))
4591 				goto err;
4592 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4593 			if (kstrtoint(opt + 11, 0, &chain_mode))
4594 				goto err;
4595 		}
4596 	}
4597 	return 0;
4598 
4599 err:
4600 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4601 	return -EINVAL;
4602 }
4603 
4604 __setup("stmmaceth=", stmmac_cmdline_opt);
4605 #endif /* MODULE */
4606 
4607 static int __init stmmac_init(void)
4608 {
4609 #ifdef CONFIG_DEBUG_FS
4610 	/* Create debugfs main directory if it doesn't exist yet */
4611 	if (!stmmac_fs_dir) {
4612 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4613 
4614 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4615 			pr_err("ERROR %s, debugfs create directory failed\n",
4616 			       STMMAC_RESOURCE_NAME);
4617 
4618 			return -ENOMEM;
4619 		}
4620 	}
4621 #endif
4622 
4623 	return 0;
4624 }
4625 
4626 static void __exit stmmac_exit(void)
4627 {
4628 #ifdef CONFIG_DEBUG_FS
4629 	debugfs_remove_recursive(stmmac_fs_dir);
4630 #endif
4631 }
4632 
4633 module_init(stmmac_init)
4634 module_exit(stmmac_exit)
4635 
4636 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4637 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4638 MODULE_LICENSE("GPL");
4639