xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision f412eed9dfdeeb6becd7de2ffe8b5d0a8b3f81ca)
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4 
5 	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10 
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15 
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18 
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20 
21   Documentation available at:
22 	http://www.stlinux.com
23   Support available at:
24 	https://bugzilla.stlinux.com/
25 *******************************************************************************/
26 
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 #include "hwif.h"
54 
55 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
56 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
57 
58 /* Module parameters */
59 #define TX_TIMEO	5000
60 static int watchdog = TX_TIMEO;
61 module_param(watchdog, int, 0644);
62 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
63 
64 static int debug = -1;
65 module_param(debug, int, 0644);
66 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
67 
68 static int phyaddr = -1;
69 module_param(phyaddr, int, 0444);
70 MODULE_PARM_DESC(phyaddr, "Physical device address");
71 
72 #define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
73 #define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
74 
75 static int flow_ctrl = FLOW_OFF;
76 module_param(flow_ctrl, int, 0644);
77 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
78 
79 static int pause = PAUSE_TIME;
80 module_param(pause, int, 0644);
81 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
82 
83 #define TC_DEFAULT 64
84 static int tc = TC_DEFAULT;
85 module_param(tc, int, 0644);
86 MODULE_PARM_DESC(tc, "DMA threshold control value");
87 
88 #define	DEFAULT_BUFSIZE	1536
89 static int buf_sz = DEFAULT_BUFSIZE;
90 module_param(buf_sz, int, 0644);
91 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
92 
93 #define	STMMAC_RX_COPYBREAK	256
94 
95 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
96 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
97 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
98 
99 #define STMMAC_DEFAULT_LPI_TIMER	1000
100 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
101 module_param(eee_timer, int, 0644);
102 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
103 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
104 
105 /* By default the driver will use the ring mode to manage tx and rx descriptors,
106  * but allow user to force to use the chain instead of the ring
107  */
108 static unsigned int chain_mode;
109 module_param(chain_mode, int, 0444);
110 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
111 
112 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
113 
114 #ifdef CONFIG_DEBUG_FS
115 static int stmmac_init_fs(struct net_device *dev);
116 static void stmmac_exit_fs(struct net_device *dev);
117 #endif
118 
119 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
120 
121 /**
122  * stmmac_verify_args - verify the driver parameters.
123  * Description: it checks the driver parameters and set a default in case of
124  * errors.
125  */
126 static void stmmac_verify_args(void)
127 {
128 	if (unlikely(watchdog < 0))
129 		watchdog = TX_TIMEO;
130 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
131 		buf_sz = DEFAULT_BUFSIZE;
132 	if (unlikely(flow_ctrl > 1))
133 		flow_ctrl = FLOW_AUTO;
134 	else if (likely(flow_ctrl < 0))
135 		flow_ctrl = FLOW_OFF;
136 	if (unlikely((pause < 0) || (pause > 0xffff)))
137 		pause = PAUSE_TIME;
138 	if (eee_timer < 0)
139 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
140 }
141 
142 /**
143  * stmmac_disable_all_queues - Disable all queues
144  * @priv: driver private structure
145  */
146 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
147 {
148 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
149 	u32 queue;
150 
151 	for (queue = 0; queue < rx_queues_cnt; queue++) {
152 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
153 
154 		napi_disable(&rx_q->napi);
155 	}
156 }
157 
158 /**
159  * stmmac_enable_all_queues - Enable all queues
160  * @priv: driver private structure
161  */
162 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163 {
164 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
165 	u32 queue;
166 
167 	for (queue = 0; queue < rx_queues_cnt; queue++) {
168 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
169 
170 		napi_enable(&rx_q->napi);
171 	}
172 }
173 
174 /**
175  * stmmac_stop_all_queues - Stop all queues
176  * @priv: driver private structure
177  */
178 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
179 {
180 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
181 	u32 queue;
182 
183 	for (queue = 0; queue < tx_queues_cnt; queue++)
184 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
185 }
186 
187 /**
188  * stmmac_start_all_queues - Start all queues
189  * @priv: driver private structure
190  */
191 static void stmmac_start_all_queues(struct stmmac_priv *priv)
192 {
193 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
194 	u32 queue;
195 
196 	for (queue = 0; queue < tx_queues_cnt; queue++)
197 		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
198 }
199 
200 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
201 {
202 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
203 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
204 		queue_work(priv->wq, &priv->service_task);
205 }
206 
207 static void stmmac_global_err(struct stmmac_priv *priv)
208 {
209 	netif_carrier_off(priv->dev);
210 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
211 	stmmac_service_event_schedule(priv);
212 }
213 
214 /**
215  * stmmac_clk_csr_set - dynamically set the MDC clock
216  * @priv: driver private structure
217  * Description: this is to dynamically set the MDC clock according to the csr
218  * clock input.
219  * Note:
220  *	If a specific clk_csr value is passed from the platform
221  *	this means that the CSR Clock Range selection cannot be
222  *	changed at run-time and it is fixed (as reported in the driver
223  *	documentation). Viceversa the driver will try to set the MDC
224  *	clock dynamically according to the actual clock input.
225  */
226 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
227 {
228 	u32 clk_rate;
229 
230 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
231 
232 	/* Platform provided default clk_csr would be assumed valid
233 	 * for all other cases except for the below mentioned ones.
234 	 * For values higher than the IEEE 802.3 specified frequency
235 	 * we can not estimate the proper divider as it is not known
236 	 * the frequency of clk_csr_i. So we do not change the default
237 	 * divider.
238 	 */
239 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
240 		if (clk_rate < CSR_F_35M)
241 			priv->clk_csr = STMMAC_CSR_20_35M;
242 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
243 			priv->clk_csr = STMMAC_CSR_35_60M;
244 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
245 			priv->clk_csr = STMMAC_CSR_60_100M;
246 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
247 			priv->clk_csr = STMMAC_CSR_100_150M;
248 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
249 			priv->clk_csr = STMMAC_CSR_150_250M;
250 		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
251 			priv->clk_csr = STMMAC_CSR_250_300M;
252 	}
253 
254 	if (priv->plat->has_sun8i) {
255 		if (clk_rate > 160000000)
256 			priv->clk_csr = 0x03;
257 		else if (clk_rate > 80000000)
258 			priv->clk_csr = 0x02;
259 		else if (clk_rate > 40000000)
260 			priv->clk_csr = 0x01;
261 		else
262 			priv->clk_csr = 0;
263 	}
264 }
265 
266 static void print_pkt(unsigned char *buf, int len)
267 {
268 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
269 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
270 }
271 
272 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
273 {
274 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
275 	u32 avail;
276 
277 	if (tx_q->dirty_tx > tx_q->cur_tx)
278 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
279 	else
280 		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
281 
282 	return avail;
283 }
284 
285 /**
286  * stmmac_rx_dirty - Get RX queue dirty
287  * @priv: driver private structure
288  * @queue: RX queue index
289  */
290 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
291 {
292 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
293 	u32 dirty;
294 
295 	if (rx_q->dirty_rx <= rx_q->cur_rx)
296 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
297 	else
298 		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
299 
300 	return dirty;
301 }
302 
303 /**
304  * stmmac_hw_fix_mac_speed - callback for speed selection
305  * @priv: driver private structure
306  * Description: on some platforms (e.g. ST), some HW system configuration
307  * registers have to be set according to the link speed negotiated.
308  */
309 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
310 {
311 	struct net_device *ndev = priv->dev;
312 	struct phy_device *phydev = ndev->phydev;
313 
314 	if (likely(priv->plat->fix_mac_speed))
315 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
316 }
317 
318 /**
319  * stmmac_enable_eee_mode - check and enter in LPI mode
320  * @priv: driver private structure
321  * Description: this function is to verify and enter in LPI mode in case of
322  * EEE.
323  */
324 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
325 {
326 	u32 tx_cnt = priv->plat->tx_queues_to_use;
327 	u32 queue;
328 
329 	/* check if all TX queues have the work finished */
330 	for (queue = 0; queue < tx_cnt; queue++) {
331 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
332 
333 		if (tx_q->dirty_tx != tx_q->cur_tx)
334 			return; /* still unfinished work */
335 	}
336 
337 	/* Check and enter in LPI mode */
338 	if (!priv->tx_path_in_lpi_mode)
339 		stmmac_set_eee_mode(priv, priv->hw,
340 				priv->plat->en_tx_lpi_clockgating);
341 }
342 
343 /**
344  * stmmac_disable_eee_mode - disable and exit from LPI mode
345  * @priv: driver private structure
346  * Description: this function is to exit and disable EEE in case of
347  * LPI state is true. This is called by the xmit.
348  */
349 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
350 {
351 	stmmac_reset_eee_mode(priv, priv->hw);
352 	del_timer_sync(&priv->eee_ctrl_timer);
353 	priv->tx_path_in_lpi_mode = false;
354 }
355 
356 /**
357  * stmmac_eee_ctrl_timer - EEE TX SW timer.
358  * @arg : data hook
359  * Description:
360  *  if there is no data transfer and if we are not in LPI state,
361  *  then MAC Transmitter can be moved to LPI state.
362  */
363 static void stmmac_eee_ctrl_timer(struct timer_list *t)
364 {
365 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
366 
367 	stmmac_enable_eee_mode(priv);
368 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
369 }
370 
371 /**
372  * stmmac_eee_init - init EEE
373  * @priv: driver private structure
374  * Description:
375  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
376  *  can also manage EEE, this function enable the LPI state and start related
377  *  timer.
378  */
379 bool stmmac_eee_init(struct stmmac_priv *priv)
380 {
381 	struct net_device *ndev = priv->dev;
382 	int interface = priv->plat->interface;
383 	unsigned long flags;
384 	bool ret = false;
385 
386 	if ((interface != PHY_INTERFACE_MODE_MII) &&
387 	    (interface != PHY_INTERFACE_MODE_GMII) &&
388 	    !phy_interface_mode_is_rgmii(interface))
389 		goto out;
390 
391 	/* Using PCS we cannot dial with the phy registers at this stage
392 	 * so we do not support extra feature like EEE.
393 	 */
394 	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
395 	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
396 	    (priv->hw->pcs == STMMAC_PCS_RTBI))
397 		goto out;
398 
399 	/* MAC core supports the EEE feature. */
400 	if (priv->dma_cap.eee) {
401 		int tx_lpi_timer = priv->tx_lpi_timer;
402 
403 		/* Check if the PHY supports EEE */
404 		if (phy_init_eee(ndev->phydev, 1)) {
405 			/* To manage at run-time if the EEE cannot be supported
406 			 * anymore (for example because the lp caps have been
407 			 * changed).
408 			 * In that case the driver disable own timers.
409 			 */
410 			spin_lock_irqsave(&priv->lock, flags);
411 			if (priv->eee_active) {
412 				netdev_dbg(priv->dev, "disable EEE\n");
413 				del_timer_sync(&priv->eee_ctrl_timer);
414 				stmmac_set_eee_timer(priv, priv->hw, 0,
415 						tx_lpi_timer);
416 			}
417 			priv->eee_active = 0;
418 			spin_unlock_irqrestore(&priv->lock, flags);
419 			goto out;
420 		}
421 		/* Activate the EEE and start timers */
422 		spin_lock_irqsave(&priv->lock, flags);
423 		if (!priv->eee_active) {
424 			priv->eee_active = 1;
425 			timer_setup(&priv->eee_ctrl_timer,
426 				    stmmac_eee_ctrl_timer, 0);
427 			mod_timer(&priv->eee_ctrl_timer,
428 				  STMMAC_LPI_T(eee_timer));
429 
430 			stmmac_set_eee_timer(priv, priv->hw,
431 					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
432 		}
433 		/* Set HW EEE according to the speed */
434 		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
435 
436 		ret = true;
437 		spin_unlock_irqrestore(&priv->lock, flags);
438 
439 		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440 	}
441 out:
442 	return ret;
443 }
444 
445 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
446  * @priv: driver private structure
447  * @p : descriptor pointer
448  * @skb : the socket buffer
449  * Description :
450  * This function will read timestamp from the descriptor & pass it to stack.
451  * and also perform some sanity checks.
452  */
453 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454 				   struct dma_desc *p, struct sk_buff *skb)
455 {
456 	struct skb_shared_hwtstamps shhwtstamp;
457 	u64 ns;
458 
459 	if (!priv->hwts_tx_en)
460 		return;
461 
462 	/* exit if skb doesn't support hw tstamp */
463 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464 		return;
465 
466 	/* check tx tstamp status */
467 	if (stmmac_get_tx_timestamp_status(priv, p)) {
468 		/* get the valid tstamp */
469 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
470 
471 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
473 
474 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475 		/* pass tstamp to stack */
476 		skb_tstamp_tx(skb, &shhwtstamp);
477 	}
478 
479 	return;
480 }
481 
482 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
483  * @priv: driver private structure
484  * @p : descriptor pointer
485  * @np : next descriptor pointer
486  * @skb : the socket buffer
487  * Description :
488  * This function will read received packet's timestamp from the descriptor
489  * and pass it to stack. It also perform some sanity checks.
490  */
491 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492 				   struct dma_desc *np, struct sk_buff *skb)
493 {
494 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
495 	struct dma_desc *desc = p;
496 	u64 ns;
497 
498 	if (!priv->hwts_rx_en)
499 		return;
500 	/* For GMAC4, the valid timestamp is from CTX next desc. */
501 	if (priv->plat->has_gmac4)
502 		desc = np;
503 
504 	/* Check if timestamp is available */
505 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
506 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
507 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508 		shhwtstamp = skb_hwtstamps(skb);
509 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
511 	} else  {
512 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513 	}
514 }
515 
516 /**
517  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
518  *  @dev: device pointer.
519  *  @ifr: An IOCTL specific structure, that can contain a pointer to
520  *  a proprietary structure used to pass information to the driver.
521  *  Description:
522  *  This function configures the MAC to enable/disable both outgoing(TX)
523  *  and incoming(RX) packets time stamping based on user input.
524  *  Return Value:
525  *  0 on success and an appropriate -ve integer on failure.
526  */
527 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528 {
529 	struct stmmac_priv *priv = netdev_priv(dev);
530 	struct hwtstamp_config config;
531 	struct timespec64 now;
532 	u64 temp = 0;
533 	u32 ptp_v2 = 0;
534 	u32 tstamp_all = 0;
535 	u32 ptp_over_ipv4_udp = 0;
536 	u32 ptp_over_ipv6_udp = 0;
537 	u32 ptp_over_ethernet = 0;
538 	u32 snap_type_sel = 0;
539 	u32 ts_master_en = 0;
540 	u32 ts_event_en = 0;
541 	u32 value = 0;
542 	u32 sec_inc;
543 
544 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545 		netdev_alert(priv->dev, "No support for HW time stamping\n");
546 		priv->hwts_tx_en = 0;
547 		priv->hwts_rx_en = 0;
548 
549 		return -EOPNOTSUPP;
550 	}
551 
552 	if (copy_from_user(&config, ifr->ifr_data,
553 			   sizeof(struct hwtstamp_config)))
554 		return -EFAULT;
555 
556 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557 		   __func__, config.flags, config.tx_type, config.rx_filter);
558 
559 	/* reserved for future extensions */
560 	if (config.flags)
561 		return -EINVAL;
562 
563 	if (config.tx_type != HWTSTAMP_TX_OFF &&
564 	    config.tx_type != HWTSTAMP_TX_ON)
565 		return -ERANGE;
566 
567 	if (priv->adv_ts) {
568 		switch (config.rx_filter) {
569 		case HWTSTAMP_FILTER_NONE:
570 			/* time stamp no incoming packet at all */
571 			config.rx_filter = HWTSTAMP_FILTER_NONE;
572 			break;
573 
574 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575 			/* PTP v1, UDP, any kind of event packet */
576 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577 			/* take time stamp for all event messages */
578 			if (priv->plat->has_gmac4)
579 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580 			else
581 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582 
583 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 			break;
586 
587 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588 			/* PTP v1, UDP, Sync packet */
589 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590 			/* take time stamp for SYNC messages only */
591 			ts_event_en = PTP_TCR_TSEVNTENA;
592 
593 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595 			break;
596 
597 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598 			/* PTP v1, UDP, Delay_req packet */
599 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600 			/* take time stamp for Delay_Req messages only */
601 			ts_master_en = PTP_TCR_TSMSTRENA;
602 			ts_event_en = PTP_TCR_TSEVNTENA;
603 
604 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 			break;
607 
608 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609 			/* PTP v2, UDP, any kind of event packet */
610 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611 			ptp_v2 = PTP_TCR_TSVER2ENA;
612 			/* take time stamp for all event messages */
613 			if (priv->plat->has_gmac4)
614 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615 			else
616 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617 
618 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620 			break;
621 
622 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623 			/* PTP v2, UDP, Sync packet */
624 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625 			ptp_v2 = PTP_TCR_TSVER2ENA;
626 			/* take time stamp for SYNC messages only */
627 			ts_event_en = PTP_TCR_TSEVNTENA;
628 
629 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634 			/* PTP v2, UDP, Delay_req packet */
635 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636 			ptp_v2 = PTP_TCR_TSVER2ENA;
637 			/* take time stamp for Delay_Req messages only */
638 			ts_master_en = PTP_TCR_TSMSTRENA;
639 			ts_event_en = PTP_TCR_TSEVNTENA;
640 
641 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 			break;
644 
645 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
646 			/* PTP v2/802.AS1 any layer, any kind of event packet */
647 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648 			ptp_v2 = PTP_TCR_TSVER2ENA;
649 			/* take time stamp for all event messages */
650 			if (priv->plat->has_gmac4)
651 				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652 			else
653 				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654 
655 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657 			ptp_over_ethernet = PTP_TCR_TSIPENA;
658 			break;
659 
660 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
661 			/* PTP v2/802.AS1, any layer, Sync packet */
662 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663 			ptp_v2 = PTP_TCR_TSVER2ENA;
664 			/* take time stamp for SYNC messages only */
665 			ts_event_en = PTP_TCR_TSEVNTENA;
666 
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			ptp_over_ethernet = PTP_TCR_TSIPENA;
670 			break;
671 
672 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673 			/* PTP v2/802.AS1, any layer, Delay_req packet */
674 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675 			ptp_v2 = PTP_TCR_TSVER2ENA;
676 			/* take time stamp for Delay_Req messages only */
677 			ts_master_en = PTP_TCR_TSMSTRENA;
678 			ts_event_en = PTP_TCR_TSEVNTENA;
679 
680 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682 			ptp_over_ethernet = PTP_TCR_TSIPENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_NTP_ALL:
686 		case HWTSTAMP_FILTER_ALL:
687 			/* time stamp any incoming packet */
688 			config.rx_filter = HWTSTAMP_FILTER_ALL;
689 			tstamp_all = PTP_TCR_TSENALL;
690 			break;
691 
692 		default:
693 			return -ERANGE;
694 		}
695 	} else {
696 		switch (config.rx_filter) {
697 		case HWTSTAMP_FILTER_NONE:
698 			config.rx_filter = HWTSTAMP_FILTER_NONE;
699 			break;
700 		default:
701 			/* PTP v1, UDP, any kind of event packet */
702 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703 			break;
704 		}
705 	}
706 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
707 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708 
709 	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
711 	else {
712 		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713 			 tstamp_all | ptp_v2 | ptp_over_ethernet |
714 			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715 			 ts_master_en | snap_type_sel);
716 		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
717 
718 		/* program Sub Second Increment reg */
719 		stmmac_config_sub_second_increment(priv,
720 				priv->ptpaddr, priv->plat->clk_ptp_rate,
721 				priv->plat->has_gmac4, &sec_inc);
722 		temp = div_u64(1000000000ULL, sec_inc);
723 
724 		/* calculate default added value:
725 		 * formula is :
726 		 * addend = (2^32)/freq_div_ratio;
727 		 * where, freq_div_ratio = 1e9ns/sec_inc
728 		 */
729 		temp = (u64)(temp << 32);
730 		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
731 		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
732 
733 		/* initialize system time */
734 		ktime_get_real_ts64(&now);
735 
736 		/* lower 32 bits of tv_sec are safe until y2106 */
737 		stmmac_init_systime(priv, priv->ptpaddr,
738 				(u32)now.tv_sec, now.tv_nsec);
739 	}
740 
741 	return copy_to_user(ifr->ifr_data, &config,
742 			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
743 }
744 
745 /**
746  * stmmac_init_ptp - init PTP
747  * @priv: driver private structure
748  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
749  * This is done by looking at the HW cap. register.
750  * This function also registers the ptp driver.
751  */
752 static int stmmac_init_ptp(struct stmmac_priv *priv)
753 {
754 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
755 		return -EOPNOTSUPP;
756 
757 	priv->adv_ts = 0;
758 	/* Check if adv_ts can be enabled for dwmac 4.x core */
759 	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
760 		priv->adv_ts = 1;
761 	/* Dwmac 3.x core with extend_desc can support adv_ts */
762 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
763 		priv->adv_ts = 1;
764 
765 	if (priv->dma_cap.time_stamp)
766 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
767 
768 	if (priv->adv_ts)
769 		netdev_info(priv->dev,
770 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
771 
772 	priv->hwts_tx_en = 0;
773 	priv->hwts_rx_en = 0;
774 
775 	stmmac_ptp_register(priv);
776 
777 	return 0;
778 }
779 
780 static void stmmac_release_ptp(struct stmmac_priv *priv)
781 {
782 	if (priv->plat->clk_ptp_ref)
783 		clk_disable_unprepare(priv->plat->clk_ptp_ref);
784 	stmmac_ptp_unregister(priv);
785 }
786 
787 /**
788  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
789  *  @priv: driver private structure
790  *  Description: It is used for configuring the flow control in all queues
791  */
792 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
793 {
794 	u32 tx_cnt = priv->plat->tx_queues_to_use;
795 
796 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
797 			priv->pause, tx_cnt);
798 }
799 
800 /**
801  * stmmac_adjust_link - adjusts the link parameters
802  * @dev: net device structure
803  * Description: this is the helper called by the physical abstraction layer
804  * drivers to communicate the phy link status. According the speed and duplex
805  * this driver can invoke registered glue-logic as well.
806  * It also invoke the eee initialization because it could happen when switch
807  * on different networks (that are eee capable).
808  */
809 static void stmmac_adjust_link(struct net_device *dev)
810 {
811 	struct stmmac_priv *priv = netdev_priv(dev);
812 	struct phy_device *phydev = dev->phydev;
813 	unsigned long flags;
814 	bool new_state = false;
815 
816 	if (!phydev)
817 		return;
818 
819 	spin_lock_irqsave(&priv->lock, flags);
820 
821 	if (phydev->link) {
822 		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
823 
824 		/* Now we make sure that we can be in full duplex mode.
825 		 * If not, we operate in half-duplex mode. */
826 		if (phydev->duplex != priv->oldduplex) {
827 			new_state = true;
828 			if (!phydev->duplex)
829 				ctrl &= ~priv->hw->link.duplex;
830 			else
831 				ctrl |= priv->hw->link.duplex;
832 			priv->oldduplex = phydev->duplex;
833 		}
834 		/* Flow Control operation */
835 		if (phydev->pause)
836 			stmmac_mac_flow_ctrl(priv, phydev->duplex);
837 
838 		if (phydev->speed != priv->speed) {
839 			new_state = true;
840 			ctrl &= ~priv->hw->link.speed_mask;
841 			switch (phydev->speed) {
842 			case SPEED_1000:
843 				ctrl |= priv->hw->link.speed1000;
844 				break;
845 			case SPEED_100:
846 				ctrl |= priv->hw->link.speed100;
847 				break;
848 			case SPEED_10:
849 				ctrl |= priv->hw->link.speed10;
850 				break;
851 			default:
852 				netif_warn(priv, link, priv->dev,
853 					   "broken speed: %d\n", phydev->speed);
854 				phydev->speed = SPEED_UNKNOWN;
855 				break;
856 			}
857 			if (phydev->speed != SPEED_UNKNOWN)
858 				stmmac_hw_fix_mac_speed(priv);
859 			priv->speed = phydev->speed;
860 		}
861 
862 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
863 
864 		if (!priv->oldlink) {
865 			new_state = true;
866 			priv->oldlink = true;
867 		}
868 	} else if (priv->oldlink) {
869 		new_state = true;
870 		priv->oldlink = false;
871 		priv->speed = SPEED_UNKNOWN;
872 		priv->oldduplex = DUPLEX_UNKNOWN;
873 	}
874 
875 	if (new_state && netif_msg_link(priv))
876 		phy_print_status(phydev);
877 
878 	spin_unlock_irqrestore(&priv->lock, flags);
879 
880 	if (phydev->is_pseudo_fixed_link)
881 		/* Stop PHY layer to call the hook to adjust the link in case
882 		 * of a switch is attached to the stmmac driver.
883 		 */
884 		phydev->irq = PHY_IGNORE_INTERRUPT;
885 	else
886 		/* At this stage, init the EEE if supported.
887 		 * Never called in case of fixed_link.
888 		 */
889 		priv->eee_enabled = stmmac_eee_init(priv);
890 }
891 
892 /**
893  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
894  * @priv: driver private structure
895  * Description: this is to verify if the HW supports the PCS.
896  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
897  * configured for the TBI, RTBI, or SGMII PHY interface.
898  */
899 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
900 {
901 	int interface = priv->plat->interface;
902 
903 	if (priv->dma_cap.pcs) {
904 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
905 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
906 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
907 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
908 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
909 			priv->hw->pcs = STMMAC_PCS_RGMII;
910 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
911 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
912 			priv->hw->pcs = STMMAC_PCS_SGMII;
913 		}
914 	}
915 }
916 
917 /**
918  * stmmac_init_phy - PHY initialization
919  * @dev: net device structure
920  * Description: it initializes the driver's PHY state, and attaches the PHY
921  * to the mac driver.
922  *  Return value:
923  *  0 on success
924  */
925 static int stmmac_init_phy(struct net_device *dev)
926 {
927 	struct stmmac_priv *priv = netdev_priv(dev);
928 	struct phy_device *phydev;
929 	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
930 	char bus_id[MII_BUS_ID_SIZE];
931 	int interface = priv->plat->interface;
932 	int max_speed = priv->plat->max_speed;
933 	priv->oldlink = false;
934 	priv->speed = SPEED_UNKNOWN;
935 	priv->oldduplex = DUPLEX_UNKNOWN;
936 
937 	if (priv->plat->phy_node) {
938 		phydev = of_phy_connect(dev, priv->plat->phy_node,
939 					&stmmac_adjust_link, 0, interface);
940 	} else {
941 		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
942 			 priv->plat->bus_id);
943 
944 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
945 			 priv->plat->phy_addr);
946 		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
947 			   phy_id_fmt);
948 
949 		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
950 				     interface);
951 	}
952 
953 	if (IS_ERR_OR_NULL(phydev)) {
954 		netdev_err(priv->dev, "Could not attach to PHY\n");
955 		if (!phydev)
956 			return -ENODEV;
957 
958 		return PTR_ERR(phydev);
959 	}
960 
961 	/* Stop Advertising 1000BASE Capability if interface is not GMII */
962 	if ((interface == PHY_INTERFACE_MODE_MII) ||
963 	    (interface == PHY_INTERFACE_MODE_RMII) ||
964 		(max_speed < 1000 && max_speed > 0))
965 		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
966 					 SUPPORTED_1000baseT_Full);
967 
968 	/*
969 	 * Broken HW is sometimes missing the pull-up resistor on the
970 	 * MDIO line, which results in reads to non-existent devices returning
971 	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
972 	 * device as well.
973 	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
974 	 */
975 	if (!priv->plat->phy_node && phydev->phy_id == 0) {
976 		phy_disconnect(phydev);
977 		return -ENODEV;
978 	}
979 
980 	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
981 	 * subsequent PHY polling, make sure we force a link transition if
982 	 * we have a UP/DOWN/UP transition
983 	 */
984 	if (phydev->is_pseudo_fixed_link)
985 		phydev->irq = PHY_POLL;
986 
987 	phy_attached_info(phydev);
988 	return 0;
989 }
990 
991 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
992 {
993 	u32 rx_cnt = priv->plat->rx_queues_to_use;
994 	void *head_rx;
995 	u32 queue;
996 
997 	/* Display RX rings */
998 	for (queue = 0; queue < rx_cnt; queue++) {
999 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1000 
1001 		pr_info("\tRX Queue %u rings\n", queue);
1002 
1003 		if (priv->extend_desc)
1004 			head_rx = (void *)rx_q->dma_erx;
1005 		else
1006 			head_rx = (void *)rx_q->dma_rx;
1007 
1008 		/* Display RX ring */
1009 		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1010 	}
1011 }
1012 
1013 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1014 {
1015 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1016 	void *head_tx;
1017 	u32 queue;
1018 
1019 	/* Display TX rings */
1020 	for (queue = 0; queue < tx_cnt; queue++) {
1021 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1022 
1023 		pr_info("\tTX Queue %d rings\n", queue);
1024 
1025 		if (priv->extend_desc)
1026 			head_tx = (void *)tx_q->dma_etx;
1027 		else
1028 			head_tx = (void *)tx_q->dma_tx;
1029 
1030 		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1031 	}
1032 }
1033 
1034 static void stmmac_display_rings(struct stmmac_priv *priv)
1035 {
1036 	/* Display RX ring */
1037 	stmmac_display_rx_rings(priv);
1038 
1039 	/* Display TX ring */
1040 	stmmac_display_tx_rings(priv);
1041 }
1042 
1043 static int stmmac_set_bfsize(int mtu, int bufsize)
1044 {
1045 	int ret = bufsize;
1046 
1047 	if (mtu >= BUF_SIZE_4KiB)
1048 		ret = BUF_SIZE_8KiB;
1049 	else if (mtu >= BUF_SIZE_2KiB)
1050 		ret = BUF_SIZE_4KiB;
1051 	else if (mtu > DEFAULT_BUFSIZE)
1052 		ret = BUF_SIZE_2KiB;
1053 	else
1054 		ret = DEFAULT_BUFSIZE;
1055 
1056 	return ret;
1057 }
1058 
1059 /**
1060  * stmmac_clear_rx_descriptors - clear RX descriptors
1061  * @priv: driver private structure
1062  * @queue: RX queue index
1063  * Description: this function is called to clear the RX descriptors
1064  * in case of both basic and extended descriptors are used.
1065  */
1066 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1067 {
1068 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1069 	int i;
1070 
1071 	/* Clear the RX descriptors */
1072 	for (i = 0; i < DMA_RX_SIZE; i++)
1073 		if (priv->extend_desc)
1074 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1075 					priv->use_riwt, priv->mode,
1076 					(i == DMA_RX_SIZE - 1));
1077 		else
1078 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1079 					priv->use_riwt, priv->mode,
1080 					(i == DMA_RX_SIZE - 1));
1081 }
1082 
1083 /**
1084  * stmmac_clear_tx_descriptors - clear tx descriptors
1085  * @priv: driver private structure
1086  * @queue: TX queue index.
1087  * Description: this function is called to clear the TX descriptors
1088  * in case of both basic and extended descriptors are used.
1089  */
1090 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1091 {
1092 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1093 	int i;
1094 
1095 	/* Clear the TX descriptors */
1096 	for (i = 0; i < DMA_TX_SIZE; i++)
1097 		if (priv->extend_desc)
1098 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1099 					priv->mode, (i == DMA_TX_SIZE - 1));
1100 		else
1101 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1102 					priv->mode, (i == DMA_TX_SIZE - 1));
1103 }
1104 
1105 /**
1106  * stmmac_clear_descriptors - clear descriptors
1107  * @priv: driver private structure
1108  * Description: this function is called to clear the TX and RX descriptors
1109  * in case of both basic and extended descriptors are used.
1110  */
1111 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1112 {
1113 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1114 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1115 	u32 queue;
1116 
1117 	/* Clear the RX descriptors */
1118 	for (queue = 0; queue < rx_queue_cnt; queue++)
1119 		stmmac_clear_rx_descriptors(priv, queue);
1120 
1121 	/* Clear the TX descriptors */
1122 	for (queue = 0; queue < tx_queue_cnt; queue++)
1123 		stmmac_clear_tx_descriptors(priv, queue);
1124 }
1125 
1126 /**
1127  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1128  * @priv: driver private structure
1129  * @p: descriptor pointer
1130  * @i: descriptor index
1131  * @flags: gfp flag
1132  * @queue: RX queue index
1133  * Description: this function is called to allocate a receive buffer, perform
1134  * the DMA mapping and init the descriptor.
1135  */
1136 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1137 				  int i, gfp_t flags, u32 queue)
1138 {
1139 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1140 	struct sk_buff *skb;
1141 
1142 	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1143 	if (!skb) {
1144 		netdev_err(priv->dev,
1145 			   "%s: Rx init fails; skb is NULL\n", __func__);
1146 		return -ENOMEM;
1147 	}
1148 	rx_q->rx_skbuff[i] = skb;
1149 	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1150 						priv->dma_buf_sz,
1151 						DMA_FROM_DEVICE);
1152 	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1153 		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1154 		dev_kfree_skb_any(skb);
1155 		return -EINVAL;
1156 	}
1157 
1158 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
1159 		p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1160 	else
1161 		p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1162 
1163 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1164 		stmmac_init_desc3(priv, p);
1165 
1166 	return 0;
1167 }
1168 
1169 /**
1170  * stmmac_free_rx_buffer - free RX dma buffers
1171  * @priv: private structure
1172  * @queue: RX queue index
1173  * @i: buffer index.
1174  */
1175 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1176 {
1177 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1178 
1179 	if (rx_q->rx_skbuff[i]) {
1180 		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1181 				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1182 		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1183 	}
1184 	rx_q->rx_skbuff[i] = NULL;
1185 }
1186 
1187 /**
1188  * stmmac_free_tx_buffer - free RX dma buffers
1189  * @priv: private structure
1190  * @queue: RX queue index
1191  * @i: buffer index.
1192  */
1193 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1194 {
1195 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1196 
1197 	if (tx_q->tx_skbuff_dma[i].buf) {
1198 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1199 			dma_unmap_page(priv->device,
1200 				       tx_q->tx_skbuff_dma[i].buf,
1201 				       tx_q->tx_skbuff_dma[i].len,
1202 				       DMA_TO_DEVICE);
1203 		else
1204 			dma_unmap_single(priv->device,
1205 					 tx_q->tx_skbuff_dma[i].buf,
1206 					 tx_q->tx_skbuff_dma[i].len,
1207 					 DMA_TO_DEVICE);
1208 	}
1209 
1210 	if (tx_q->tx_skbuff[i]) {
1211 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1212 		tx_q->tx_skbuff[i] = NULL;
1213 		tx_q->tx_skbuff_dma[i].buf = 0;
1214 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1215 	}
1216 }
1217 
1218 /**
1219  * init_dma_rx_desc_rings - init the RX descriptor rings
1220  * @dev: net device structure
1221  * @flags: gfp flag.
1222  * Description: this function initializes the DMA RX descriptors
1223  * and allocates the socket buffers. It supports the chained and ring
1224  * modes.
1225  */
1226 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1227 {
1228 	struct stmmac_priv *priv = netdev_priv(dev);
1229 	u32 rx_count = priv->plat->rx_queues_to_use;
1230 	int ret = -ENOMEM;
1231 	int bfsize = 0;
1232 	int queue;
1233 	int i;
1234 
1235 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1236 	if (bfsize < 0)
1237 		bfsize = 0;
1238 
1239 	if (bfsize < BUF_SIZE_16KiB)
1240 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1241 
1242 	priv->dma_buf_sz = bfsize;
1243 
1244 	/* RX INITIALIZATION */
1245 	netif_dbg(priv, probe, priv->dev,
1246 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1247 
1248 	for (queue = 0; queue < rx_count; queue++) {
1249 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1250 
1251 		netif_dbg(priv, probe, priv->dev,
1252 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1253 			  (u32)rx_q->dma_rx_phy);
1254 
1255 		for (i = 0; i < DMA_RX_SIZE; i++) {
1256 			struct dma_desc *p;
1257 
1258 			if (priv->extend_desc)
1259 				p = &((rx_q->dma_erx + i)->basic);
1260 			else
1261 				p = rx_q->dma_rx + i;
1262 
1263 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1264 						     queue);
1265 			if (ret)
1266 				goto err_init_rx_buffers;
1267 
1268 			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1269 				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1270 				  (unsigned int)rx_q->rx_skbuff_dma[i]);
1271 		}
1272 
1273 		rx_q->cur_rx = 0;
1274 		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1275 
1276 		stmmac_clear_rx_descriptors(priv, queue);
1277 
1278 		/* Setup the chained descriptor addresses */
1279 		if (priv->mode == STMMAC_CHAIN_MODE) {
1280 			if (priv->extend_desc)
1281 				stmmac_mode_init(priv, rx_q->dma_erx,
1282 						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1283 			else
1284 				stmmac_mode_init(priv, rx_q->dma_rx,
1285 						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1286 		}
1287 	}
1288 
1289 	buf_sz = bfsize;
1290 
1291 	return 0;
1292 
1293 err_init_rx_buffers:
1294 	while (queue >= 0) {
1295 		while (--i >= 0)
1296 			stmmac_free_rx_buffer(priv, queue, i);
1297 
1298 		if (queue == 0)
1299 			break;
1300 
1301 		i = DMA_RX_SIZE;
1302 		queue--;
1303 	}
1304 
1305 	return ret;
1306 }
1307 
1308 /**
1309  * init_dma_tx_desc_rings - init the TX descriptor rings
1310  * @dev: net device structure.
1311  * Description: this function initializes the DMA TX descriptors
1312  * and allocates the socket buffers. It supports the chained and ring
1313  * modes.
1314  */
1315 static int init_dma_tx_desc_rings(struct net_device *dev)
1316 {
1317 	struct stmmac_priv *priv = netdev_priv(dev);
1318 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1319 	u32 queue;
1320 	int i;
1321 
1322 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1323 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1324 
1325 		netif_dbg(priv, probe, priv->dev,
1326 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1327 			 (u32)tx_q->dma_tx_phy);
1328 
1329 		/* Setup the chained descriptor addresses */
1330 		if (priv->mode == STMMAC_CHAIN_MODE) {
1331 			if (priv->extend_desc)
1332 				stmmac_mode_init(priv, tx_q->dma_etx,
1333 						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1334 			else
1335 				stmmac_mode_init(priv, tx_q->dma_tx,
1336 						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1337 		}
1338 
1339 		for (i = 0; i < DMA_TX_SIZE; i++) {
1340 			struct dma_desc *p;
1341 			if (priv->extend_desc)
1342 				p = &((tx_q->dma_etx + i)->basic);
1343 			else
1344 				p = tx_q->dma_tx + i;
1345 
1346 			if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1347 				p->des0 = 0;
1348 				p->des1 = 0;
1349 				p->des2 = 0;
1350 				p->des3 = 0;
1351 			} else {
1352 				p->des2 = 0;
1353 			}
1354 
1355 			tx_q->tx_skbuff_dma[i].buf = 0;
1356 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1357 			tx_q->tx_skbuff_dma[i].len = 0;
1358 			tx_q->tx_skbuff_dma[i].last_segment = false;
1359 			tx_q->tx_skbuff[i] = NULL;
1360 		}
1361 
1362 		tx_q->dirty_tx = 0;
1363 		tx_q->cur_tx = 0;
1364 		tx_q->mss = 0;
1365 
1366 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 /**
1373  * init_dma_desc_rings - init the RX/TX descriptor rings
1374  * @dev: net device structure
1375  * @flags: gfp flag.
1376  * Description: this function initializes the DMA RX/TX descriptors
1377  * and allocates the socket buffers. It supports the chained and ring
1378  * modes.
1379  */
1380 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1381 {
1382 	struct stmmac_priv *priv = netdev_priv(dev);
1383 	int ret;
1384 
1385 	ret = init_dma_rx_desc_rings(dev, flags);
1386 	if (ret)
1387 		return ret;
1388 
1389 	ret = init_dma_tx_desc_rings(dev);
1390 
1391 	stmmac_clear_descriptors(priv);
1392 
1393 	if (netif_msg_hw(priv))
1394 		stmmac_display_rings(priv);
1395 
1396 	return ret;
1397 }
1398 
1399 /**
1400  * dma_free_rx_skbufs - free RX dma buffers
1401  * @priv: private structure
1402  * @queue: RX queue index
1403  */
1404 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1405 {
1406 	int i;
1407 
1408 	for (i = 0; i < DMA_RX_SIZE; i++)
1409 		stmmac_free_rx_buffer(priv, queue, i);
1410 }
1411 
1412 /**
1413  * dma_free_tx_skbufs - free TX dma buffers
1414  * @priv: private structure
1415  * @queue: TX queue index
1416  */
1417 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1418 {
1419 	int i;
1420 
1421 	for (i = 0; i < DMA_TX_SIZE; i++)
1422 		stmmac_free_tx_buffer(priv, queue, i);
1423 }
1424 
1425 /**
1426  * free_dma_rx_desc_resources - free RX dma desc resources
1427  * @priv: private structure
1428  */
1429 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1430 {
1431 	u32 rx_count = priv->plat->rx_queues_to_use;
1432 	u32 queue;
1433 
1434 	/* Free RX queue resources */
1435 	for (queue = 0; queue < rx_count; queue++) {
1436 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1437 
1438 		/* Release the DMA RX socket buffers */
1439 		dma_free_rx_skbufs(priv, queue);
1440 
1441 		/* Free DMA regions of consistent memory previously allocated */
1442 		if (!priv->extend_desc)
1443 			dma_free_coherent(priv->device,
1444 					  DMA_RX_SIZE * sizeof(struct dma_desc),
1445 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1446 		else
1447 			dma_free_coherent(priv->device, DMA_RX_SIZE *
1448 					  sizeof(struct dma_extended_desc),
1449 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1450 
1451 		kfree(rx_q->rx_skbuff_dma);
1452 		kfree(rx_q->rx_skbuff);
1453 	}
1454 }
1455 
1456 /**
1457  * free_dma_tx_desc_resources - free TX dma desc resources
1458  * @priv: private structure
1459  */
1460 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1461 {
1462 	u32 tx_count = priv->plat->tx_queues_to_use;
1463 	u32 queue;
1464 
1465 	/* Free TX queue resources */
1466 	for (queue = 0; queue < tx_count; queue++) {
1467 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1468 
1469 		/* Release the DMA TX socket buffers */
1470 		dma_free_tx_skbufs(priv, queue);
1471 
1472 		/* Free DMA regions of consistent memory previously allocated */
1473 		if (!priv->extend_desc)
1474 			dma_free_coherent(priv->device,
1475 					  DMA_TX_SIZE * sizeof(struct dma_desc),
1476 					  tx_q->dma_tx, tx_q->dma_tx_phy);
1477 		else
1478 			dma_free_coherent(priv->device, DMA_TX_SIZE *
1479 					  sizeof(struct dma_extended_desc),
1480 					  tx_q->dma_etx, tx_q->dma_tx_phy);
1481 
1482 		kfree(tx_q->tx_skbuff_dma);
1483 		kfree(tx_q->tx_skbuff);
1484 	}
1485 }
1486 
1487 /**
1488  * alloc_dma_rx_desc_resources - alloc RX resources.
1489  * @priv: private structure
1490  * Description: according to which descriptor can be used (extend or basic)
1491  * this function allocates the resources for TX and RX paths. In case of
1492  * reception, for example, it pre-allocated the RX socket buffer in order to
1493  * allow zero-copy mechanism.
1494  */
1495 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1496 {
1497 	u32 rx_count = priv->plat->rx_queues_to_use;
1498 	int ret = -ENOMEM;
1499 	u32 queue;
1500 
1501 	/* RX queues buffers and DMA */
1502 	for (queue = 0; queue < rx_count; queue++) {
1503 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1504 
1505 		rx_q->queue_index = queue;
1506 		rx_q->priv_data = priv;
1507 
1508 		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1509 						    sizeof(dma_addr_t),
1510 						    GFP_KERNEL);
1511 		if (!rx_q->rx_skbuff_dma)
1512 			goto err_dma;
1513 
1514 		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1515 						sizeof(struct sk_buff *),
1516 						GFP_KERNEL);
1517 		if (!rx_q->rx_skbuff)
1518 			goto err_dma;
1519 
1520 		if (priv->extend_desc) {
1521 			rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1522 							    DMA_RX_SIZE *
1523 							    sizeof(struct
1524 							    dma_extended_desc),
1525 							    &rx_q->dma_rx_phy,
1526 							    GFP_KERNEL);
1527 			if (!rx_q->dma_erx)
1528 				goto err_dma;
1529 
1530 		} else {
1531 			rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1532 							   DMA_RX_SIZE *
1533 							   sizeof(struct
1534 							   dma_desc),
1535 							   &rx_q->dma_rx_phy,
1536 							   GFP_KERNEL);
1537 			if (!rx_q->dma_rx)
1538 				goto err_dma;
1539 		}
1540 	}
1541 
1542 	return 0;
1543 
1544 err_dma:
1545 	free_dma_rx_desc_resources(priv);
1546 
1547 	return ret;
1548 }
1549 
1550 /**
1551  * alloc_dma_tx_desc_resources - alloc TX resources.
1552  * @priv: private structure
1553  * Description: according to which descriptor can be used (extend or basic)
1554  * this function allocates the resources for TX and RX paths. In case of
1555  * reception, for example, it pre-allocated the RX socket buffer in order to
1556  * allow zero-copy mechanism.
1557  */
1558 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1559 {
1560 	u32 tx_count = priv->plat->tx_queues_to_use;
1561 	int ret = -ENOMEM;
1562 	u32 queue;
1563 
1564 	/* TX queues buffers and DMA */
1565 	for (queue = 0; queue < tx_count; queue++) {
1566 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1567 
1568 		tx_q->queue_index = queue;
1569 		tx_q->priv_data = priv;
1570 
1571 		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1572 						    sizeof(*tx_q->tx_skbuff_dma),
1573 						    GFP_KERNEL);
1574 		if (!tx_q->tx_skbuff_dma)
1575 			goto err_dma;
1576 
1577 		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1578 						sizeof(struct sk_buff *),
1579 						GFP_KERNEL);
1580 		if (!tx_q->tx_skbuff)
1581 			goto err_dma;
1582 
1583 		if (priv->extend_desc) {
1584 			tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1585 							    DMA_TX_SIZE *
1586 							    sizeof(struct
1587 							    dma_extended_desc),
1588 							    &tx_q->dma_tx_phy,
1589 							    GFP_KERNEL);
1590 			if (!tx_q->dma_etx)
1591 				goto err_dma;
1592 		} else {
1593 			tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1594 							   DMA_TX_SIZE *
1595 							   sizeof(struct
1596 								  dma_desc),
1597 							   &tx_q->dma_tx_phy,
1598 							   GFP_KERNEL);
1599 			if (!tx_q->dma_tx)
1600 				goto err_dma;
1601 		}
1602 	}
1603 
1604 	return 0;
1605 
1606 err_dma:
1607 	free_dma_tx_desc_resources(priv);
1608 
1609 	return ret;
1610 }
1611 
1612 /**
1613  * alloc_dma_desc_resources - alloc TX/RX resources.
1614  * @priv: private structure
1615  * Description: according to which descriptor can be used (extend or basic)
1616  * this function allocates the resources for TX and RX paths. In case of
1617  * reception, for example, it pre-allocated the RX socket buffer in order to
1618  * allow zero-copy mechanism.
1619  */
1620 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1621 {
1622 	/* RX Allocation */
1623 	int ret = alloc_dma_rx_desc_resources(priv);
1624 
1625 	if (ret)
1626 		return ret;
1627 
1628 	ret = alloc_dma_tx_desc_resources(priv);
1629 
1630 	return ret;
1631 }
1632 
1633 /**
1634  * free_dma_desc_resources - free dma desc resources
1635  * @priv: private structure
1636  */
1637 static void free_dma_desc_resources(struct stmmac_priv *priv)
1638 {
1639 	/* Release the DMA RX socket buffers */
1640 	free_dma_rx_desc_resources(priv);
1641 
1642 	/* Release the DMA TX socket buffers */
1643 	free_dma_tx_desc_resources(priv);
1644 }
1645 
1646 /**
1647  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1648  *  @priv: driver private structure
1649  *  Description: It is used for enabling the rx queues in the MAC
1650  */
1651 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1652 {
1653 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1654 	int queue;
1655 	u8 mode;
1656 
1657 	for (queue = 0; queue < rx_queues_count; queue++) {
1658 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1659 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1660 	}
1661 }
1662 
1663 /**
1664  * stmmac_start_rx_dma - start RX DMA channel
1665  * @priv: driver private structure
1666  * @chan: RX channel index
1667  * Description:
1668  * This starts a RX DMA channel
1669  */
1670 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1671 {
1672 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1673 	stmmac_start_rx(priv, priv->ioaddr, chan);
1674 }
1675 
1676 /**
1677  * stmmac_start_tx_dma - start TX DMA channel
1678  * @priv: driver private structure
1679  * @chan: TX channel index
1680  * Description:
1681  * This starts a TX DMA channel
1682  */
1683 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1684 {
1685 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1686 	stmmac_start_tx(priv, priv->ioaddr, chan);
1687 }
1688 
1689 /**
1690  * stmmac_stop_rx_dma - stop RX DMA channel
1691  * @priv: driver private structure
1692  * @chan: RX channel index
1693  * Description:
1694  * This stops a RX DMA channel
1695  */
1696 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1697 {
1698 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1699 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1700 }
1701 
1702 /**
1703  * stmmac_stop_tx_dma - stop TX DMA channel
1704  * @priv: driver private structure
1705  * @chan: TX channel index
1706  * Description:
1707  * This stops a TX DMA channel
1708  */
1709 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1710 {
1711 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1712 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1713 }
1714 
1715 /**
1716  * stmmac_start_all_dma - start all RX and TX DMA channels
1717  * @priv: driver private structure
1718  * Description:
1719  * This starts all the RX and TX DMA channels
1720  */
1721 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1722 {
1723 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1724 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1725 	u32 chan = 0;
1726 
1727 	for (chan = 0; chan < rx_channels_count; chan++)
1728 		stmmac_start_rx_dma(priv, chan);
1729 
1730 	for (chan = 0; chan < tx_channels_count; chan++)
1731 		stmmac_start_tx_dma(priv, chan);
1732 }
1733 
1734 /**
1735  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1736  * @priv: driver private structure
1737  * Description:
1738  * This stops the RX and TX DMA channels
1739  */
1740 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1741 {
1742 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1743 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1744 	u32 chan = 0;
1745 
1746 	for (chan = 0; chan < rx_channels_count; chan++)
1747 		stmmac_stop_rx_dma(priv, chan);
1748 
1749 	for (chan = 0; chan < tx_channels_count; chan++)
1750 		stmmac_stop_tx_dma(priv, chan);
1751 }
1752 
1753 /**
1754  *  stmmac_dma_operation_mode - HW DMA operation mode
1755  *  @priv: driver private structure
1756  *  Description: it is used for configuring the DMA operation mode register in
1757  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1758  */
1759 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1760 {
1761 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1762 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1763 	int rxfifosz = priv->plat->rx_fifo_size;
1764 	int txfifosz = priv->plat->tx_fifo_size;
1765 	u32 txmode = 0;
1766 	u32 rxmode = 0;
1767 	u32 chan = 0;
1768 	u8 qmode = 0;
1769 
1770 	if (rxfifosz == 0)
1771 		rxfifosz = priv->dma_cap.rx_fifo_size;
1772 	if (txfifosz == 0)
1773 		txfifosz = priv->dma_cap.tx_fifo_size;
1774 
1775 	/* Adjust for real per queue fifo size */
1776 	rxfifosz /= rx_channels_count;
1777 	txfifosz /= tx_channels_count;
1778 
1779 	if (priv->plat->force_thresh_dma_mode) {
1780 		txmode = tc;
1781 		rxmode = tc;
1782 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1783 		/*
1784 		 * In case of GMAC, SF mode can be enabled
1785 		 * to perform the TX COE in HW. This depends on:
1786 		 * 1) TX COE if actually supported
1787 		 * 2) There is no bugged Jumbo frame support
1788 		 *    that needs to not insert csum in the TDES.
1789 		 */
1790 		txmode = SF_DMA_MODE;
1791 		rxmode = SF_DMA_MODE;
1792 		priv->xstats.threshold = SF_DMA_MODE;
1793 	} else {
1794 		txmode = tc;
1795 		rxmode = SF_DMA_MODE;
1796 	}
1797 
1798 	/* configure all channels */
1799 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1800 		for (chan = 0; chan < rx_channels_count; chan++) {
1801 			qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1802 
1803 			stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1804 					rxfifosz, qmode);
1805 		}
1806 
1807 		for (chan = 0; chan < tx_channels_count; chan++) {
1808 			qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1809 
1810 			stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1811 					txfifosz, qmode);
1812 		}
1813 	} else {
1814 		stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
1815 	}
1816 }
1817 
1818 /**
1819  * stmmac_tx_clean - to manage the transmission completion
1820  * @priv: driver private structure
1821  * @queue: TX queue index
1822  * Description: it reclaims the transmit resources after transmission completes.
1823  */
1824 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1825 {
1826 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1827 	unsigned int bytes_compl = 0, pkts_compl = 0;
1828 	unsigned int entry;
1829 
1830 	netif_tx_lock(priv->dev);
1831 
1832 	priv->xstats.tx_clean++;
1833 
1834 	entry = tx_q->dirty_tx;
1835 	while (entry != tx_q->cur_tx) {
1836 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1837 		struct dma_desc *p;
1838 		int status;
1839 
1840 		if (priv->extend_desc)
1841 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1842 		else
1843 			p = tx_q->dma_tx + entry;
1844 
1845 		status = stmmac_tx_status(priv, &priv->dev->stats,
1846 				&priv->xstats, p, priv->ioaddr);
1847 		/* Check if the descriptor is owned by the DMA */
1848 		if (unlikely(status & tx_dma_own))
1849 			break;
1850 
1851 		/* Make sure descriptor fields are read after reading
1852 		 * the own bit.
1853 		 */
1854 		dma_rmb();
1855 
1856 		/* Just consider the last segment and ...*/
1857 		if (likely(!(status & tx_not_ls))) {
1858 			/* ... verify the status error condition */
1859 			if (unlikely(status & tx_err)) {
1860 				priv->dev->stats.tx_errors++;
1861 			} else {
1862 				priv->dev->stats.tx_packets++;
1863 				priv->xstats.tx_pkt_n++;
1864 			}
1865 			stmmac_get_tx_hwtstamp(priv, p, skb);
1866 		}
1867 
1868 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1869 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
1870 				dma_unmap_page(priv->device,
1871 					       tx_q->tx_skbuff_dma[entry].buf,
1872 					       tx_q->tx_skbuff_dma[entry].len,
1873 					       DMA_TO_DEVICE);
1874 			else
1875 				dma_unmap_single(priv->device,
1876 						 tx_q->tx_skbuff_dma[entry].buf,
1877 						 tx_q->tx_skbuff_dma[entry].len,
1878 						 DMA_TO_DEVICE);
1879 			tx_q->tx_skbuff_dma[entry].buf = 0;
1880 			tx_q->tx_skbuff_dma[entry].len = 0;
1881 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1882 		}
1883 
1884 		stmmac_clean_desc3(priv, tx_q, p);
1885 
1886 		tx_q->tx_skbuff_dma[entry].last_segment = false;
1887 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1888 
1889 		if (likely(skb != NULL)) {
1890 			pkts_compl++;
1891 			bytes_compl += skb->len;
1892 			dev_consume_skb_any(skb);
1893 			tx_q->tx_skbuff[entry] = NULL;
1894 		}
1895 
1896 		stmmac_release_tx_desc(priv, p, priv->mode);
1897 
1898 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1899 	}
1900 	tx_q->dirty_tx = entry;
1901 
1902 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1903 				  pkts_compl, bytes_compl);
1904 
1905 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1906 								queue))) &&
1907 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1908 
1909 		netif_dbg(priv, tx_done, priv->dev,
1910 			  "%s: restart transmit\n", __func__);
1911 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1912 	}
1913 
1914 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1915 		stmmac_enable_eee_mode(priv);
1916 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1917 	}
1918 	netif_tx_unlock(priv->dev);
1919 }
1920 
1921 /**
1922  * stmmac_tx_err - to manage the tx error
1923  * @priv: driver private structure
1924  * @chan: channel index
1925  * Description: it cleans the descriptors and restarts the transmission
1926  * in case of transmission errors.
1927  */
1928 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1929 {
1930 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1931 	int i;
1932 
1933 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1934 
1935 	stmmac_stop_tx_dma(priv, chan);
1936 	dma_free_tx_skbufs(priv, chan);
1937 	for (i = 0; i < DMA_TX_SIZE; i++)
1938 		if (priv->extend_desc)
1939 			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1940 					priv->mode, (i == DMA_TX_SIZE - 1));
1941 		else
1942 			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1943 					priv->mode, (i == DMA_TX_SIZE - 1));
1944 	tx_q->dirty_tx = 0;
1945 	tx_q->cur_tx = 0;
1946 	tx_q->mss = 0;
1947 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1948 	stmmac_start_tx_dma(priv, chan);
1949 
1950 	priv->dev->stats.tx_errors++;
1951 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1952 }
1953 
1954 /**
1955  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1956  *  @priv: driver private structure
1957  *  @txmode: TX operating mode
1958  *  @rxmode: RX operating mode
1959  *  @chan: channel index
1960  *  Description: it is used for configuring of the DMA operation mode in
1961  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1962  *  mode.
1963  */
1964 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1965 					  u32 rxmode, u32 chan)
1966 {
1967 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1968 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1969 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1970 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1971 	int rxfifosz = priv->plat->rx_fifo_size;
1972 	int txfifosz = priv->plat->tx_fifo_size;
1973 
1974 	if (rxfifosz == 0)
1975 		rxfifosz = priv->dma_cap.rx_fifo_size;
1976 	if (txfifosz == 0)
1977 		txfifosz = priv->dma_cap.tx_fifo_size;
1978 
1979 	/* Adjust for real per queue fifo size */
1980 	rxfifosz /= rx_channels_count;
1981 	txfifosz /= tx_channels_count;
1982 
1983 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1984 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz,
1985 				rxqmode);
1986 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
1987 				txqmode);
1988 	} else {
1989 		stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
1990 	}
1991 }
1992 
1993 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1994 {
1995 	int ret = false;
1996 
1997 	/* Safety features are only available in cores >= 5.10 */
1998 	if (priv->synopsys_id < DWMAC_CORE_5_10)
1999 		return ret;
2000 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2001 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2002 	if (ret && (ret != -EINVAL)) {
2003 		stmmac_global_err(priv);
2004 		return true;
2005 	}
2006 
2007 	return false;
2008 }
2009 
2010 /**
2011  * stmmac_dma_interrupt - DMA ISR
2012  * @priv: driver private structure
2013  * Description: this is the DMA ISR. It is called by the main ISR.
2014  * It calls the dwmac dma routine and schedule poll method in case of some
2015  * work can be done.
2016  */
2017 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2018 {
2019 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2020 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2021 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2022 				tx_channel_count : rx_channel_count;
2023 	u32 chan;
2024 	bool poll_scheduled = false;
2025 	int status[channels_to_check];
2026 
2027 	/* Each DMA channel can be used for rx and tx simultaneously, yet
2028 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2029 	 * stmmac_channel struct.
2030 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
2031 	 * all tx queues rather than just a single tx queue.
2032 	 */
2033 	for (chan = 0; chan < channels_to_check; chan++)
2034 		status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2035 				&priv->xstats, chan);
2036 
2037 	for (chan = 0; chan < rx_channel_count; chan++) {
2038 		if (likely(status[chan] & handle_rx)) {
2039 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2040 
2041 			if (likely(napi_schedule_prep(&rx_q->napi))) {
2042 				stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2043 				__napi_schedule(&rx_q->napi);
2044 				poll_scheduled = true;
2045 			}
2046 		}
2047 	}
2048 
2049 	/* If we scheduled poll, we already know that tx queues will be checked.
2050 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2051 	 * completed transmission, if so, call stmmac_poll (once).
2052 	 */
2053 	if (!poll_scheduled) {
2054 		for (chan = 0; chan < tx_channel_count; chan++) {
2055 			if (status[chan] & handle_tx) {
2056 				/* It doesn't matter what rx queue we choose
2057 				 * here. We use 0 since it always exists.
2058 				 */
2059 				struct stmmac_rx_queue *rx_q =
2060 					&priv->rx_queue[0];
2061 
2062 				if (likely(napi_schedule_prep(&rx_q->napi))) {
2063 					stmmac_disable_dma_irq(priv,
2064 							priv->ioaddr, chan);
2065 					__napi_schedule(&rx_q->napi);
2066 				}
2067 				break;
2068 			}
2069 		}
2070 	}
2071 
2072 	for (chan = 0; chan < tx_channel_count; chan++) {
2073 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2074 			/* Try to bump up the dma threshold on this failure */
2075 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2076 			    (tc <= 256)) {
2077 				tc += 64;
2078 				if (priv->plat->force_thresh_dma_mode)
2079 					stmmac_set_dma_operation_mode(priv,
2080 								      tc,
2081 								      tc,
2082 								      chan);
2083 				else
2084 					stmmac_set_dma_operation_mode(priv,
2085 								    tc,
2086 								    SF_DMA_MODE,
2087 								    chan);
2088 				priv->xstats.threshold = tc;
2089 			}
2090 		} else if (unlikely(status[chan] == tx_hard_error)) {
2091 			stmmac_tx_err(priv, chan);
2092 		}
2093 	}
2094 }
2095 
2096 /**
2097  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2098  * @priv: driver private structure
2099  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2100  */
2101 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2102 {
2103 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2104 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2105 
2106 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2107 		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2108 		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2109 	} else {
2110 		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2111 		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2112 	}
2113 
2114 	dwmac_mmc_intr_all_mask(priv->mmcaddr);
2115 
2116 	if (priv->dma_cap.rmon) {
2117 		dwmac_mmc_ctrl(priv->mmcaddr, mode);
2118 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2119 	} else
2120 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2121 }
2122 
2123 /**
2124  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2125  * @priv: driver private structure
2126  * Description:
2127  *  new GMAC chip generations have a new register to indicate the
2128  *  presence of the optional feature/functions.
2129  *  This can be also used to override the value passed through the
2130  *  platform and necessary for old MAC10/100 and GMAC chips.
2131  */
2132 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2133 {
2134 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2135 }
2136 
2137 /**
2138  * stmmac_check_ether_addr - check if the MAC addr is valid
2139  * @priv: driver private structure
2140  * Description:
2141  * it is to verify if the MAC address is valid, in case of failures it
2142  * generates a random MAC address
2143  */
2144 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2145 {
2146 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2147 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2148 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2149 			eth_hw_addr_random(priv->dev);
2150 		netdev_info(priv->dev, "device MAC address %pM\n",
2151 			    priv->dev->dev_addr);
2152 	}
2153 }
2154 
2155 /**
2156  * stmmac_init_dma_engine - DMA init.
2157  * @priv: driver private structure
2158  * Description:
2159  * It inits the DMA invoking the specific MAC/GMAC callback.
2160  * Some DMA parameters can be passed from the platform;
2161  * in case of these are not passed a default is kept for the MAC or GMAC.
2162  */
2163 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2164 {
2165 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2166 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2167 	struct stmmac_rx_queue *rx_q;
2168 	struct stmmac_tx_queue *tx_q;
2169 	u32 dummy_dma_rx_phy = 0;
2170 	u32 dummy_dma_tx_phy = 0;
2171 	u32 chan = 0;
2172 	int atds = 0;
2173 	int ret = 0;
2174 
2175 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2176 		dev_err(priv->device, "Invalid DMA configuration\n");
2177 		return -EINVAL;
2178 	}
2179 
2180 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2181 		atds = 1;
2182 
2183 	ret = stmmac_reset(priv, priv->ioaddr);
2184 	if (ret) {
2185 		dev_err(priv->device, "Failed to reset the dma\n");
2186 		return ret;
2187 	}
2188 
2189 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2190 		/* DMA Configuration */
2191 		stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
2192 				dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2193 
2194 		/* DMA RX Channel Configuration */
2195 		for (chan = 0; chan < rx_channels_count; chan++) {
2196 			rx_q = &priv->rx_queue[chan];
2197 
2198 			stmmac_init_rx_chan(priv, priv->ioaddr,
2199 					priv->plat->dma_cfg, rx_q->dma_rx_phy,
2200 					chan);
2201 
2202 			rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2203 				    (DMA_RX_SIZE * sizeof(struct dma_desc));
2204 			stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2205 					rx_q->rx_tail_addr, chan);
2206 		}
2207 
2208 		/* DMA TX Channel Configuration */
2209 		for (chan = 0; chan < tx_channels_count; chan++) {
2210 			tx_q = &priv->tx_queue[chan];
2211 
2212 			stmmac_init_chan(priv, priv->ioaddr,
2213 					priv->plat->dma_cfg, chan);
2214 
2215 			stmmac_init_tx_chan(priv, priv->ioaddr,
2216 					priv->plat->dma_cfg, tx_q->dma_tx_phy,
2217 					chan);
2218 
2219 			tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2220 				    (DMA_TX_SIZE * sizeof(struct dma_desc));
2221 			stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2222 					tx_q->tx_tail_addr, chan);
2223 		}
2224 	} else {
2225 		rx_q = &priv->rx_queue[chan];
2226 		tx_q = &priv->tx_queue[chan];
2227 		stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
2228 				tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2229 	}
2230 
2231 	if (priv->plat->axi)
2232 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2233 
2234 	return ret;
2235 }
2236 
2237 /**
2238  * stmmac_tx_timer - mitigation sw timer for tx.
2239  * @data: data pointer
2240  * Description:
2241  * This is the timer handler to directly invoke the stmmac_tx_clean.
2242  */
2243 static void stmmac_tx_timer(struct timer_list *t)
2244 {
2245 	struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2246 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2247 	u32 queue;
2248 
2249 	/* let's scan all the tx queues */
2250 	for (queue = 0; queue < tx_queues_count; queue++)
2251 		stmmac_tx_clean(priv, queue);
2252 }
2253 
2254 /**
2255  * stmmac_init_tx_coalesce - init tx mitigation options.
2256  * @priv: driver private structure
2257  * Description:
2258  * This inits the transmit coalesce parameters: i.e. timer rate,
2259  * timer handler and default threshold used for enabling the
2260  * interrupt on completion bit.
2261  */
2262 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2263 {
2264 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2265 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2266 	timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2267 	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2268 	add_timer(&priv->txtimer);
2269 }
2270 
2271 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2272 {
2273 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2274 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2275 	u32 chan;
2276 
2277 	/* set TX ring length */
2278 	for (chan = 0; chan < tx_channels_count; chan++)
2279 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2280 				(DMA_TX_SIZE - 1), chan);
2281 
2282 	/* set RX ring length */
2283 	for (chan = 0; chan < rx_channels_count; chan++)
2284 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2285 				(DMA_RX_SIZE - 1), chan);
2286 }
2287 
2288 /**
2289  *  stmmac_set_tx_queue_weight - Set TX queue weight
2290  *  @priv: driver private structure
2291  *  Description: It is used for setting TX queues weight
2292  */
2293 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2294 {
2295 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2296 	u32 weight;
2297 	u32 queue;
2298 
2299 	for (queue = 0; queue < tx_queues_count; queue++) {
2300 		weight = priv->plat->tx_queues_cfg[queue].weight;
2301 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2302 	}
2303 }
2304 
2305 /**
2306  *  stmmac_configure_cbs - Configure CBS in TX queue
2307  *  @priv: driver private structure
2308  *  Description: It is used for configuring CBS in AVB TX queues
2309  */
2310 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2311 {
2312 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2313 	u32 mode_to_use;
2314 	u32 queue;
2315 
2316 	/* queue 0 is reserved for legacy traffic */
2317 	for (queue = 1; queue < tx_queues_count; queue++) {
2318 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2319 		if (mode_to_use == MTL_QUEUE_DCB)
2320 			continue;
2321 
2322 		stmmac_config_cbs(priv, priv->hw,
2323 				priv->plat->tx_queues_cfg[queue].send_slope,
2324 				priv->plat->tx_queues_cfg[queue].idle_slope,
2325 				priv->plat->tx_queues_cfg[queue].high_credit,
2326 				priv->plat->tx_queues_cfg[queue].low_credit,
2327 				queue);
2328 	}
2329 }
2330 
2331 /**
2332  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2333  *  @priv: driver private structure
2334  *  Description: It is used for mapping RX queues to RX dma channels
2335  */
2336 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2337 {
2338 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2339 	u32 queue;
2340 	u32 chan;
2341 
2342 	for (queue = 0; queue < rx_queues_count; queue++) {
2343 		chan = priv->plat->rx_queues_cfg[queue].chan;
2344 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2345 	}
2346 }
2347 
2348 /**
2349  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2350  *  @priv: driver private structure
2351  *  Description: It is used for configuring the RX Queue Priority
2352  */
2353 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2354 {
2355 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2356 	u32 queue;
2357 	u32 prio;
2358 
2359 	for (queue = 0; queue < rx_queues_count; queue++) {
2360 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2361 			continue;
2362 
2363 		prio = priv->plat->rx_queues_cfg[queue].prio;
2364 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2365 	}
2366 }
2367 
2368 /**
2369  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2370  *  @priv: driver private structure
2371  *  Description: It is used for configuring the TX Queue Priority
2372  */
2373 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2374 {
2375 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2376 	u32 queue;
2377 	u32 prio;
2378 
2379 	for (queue = 0; queue < tx_queues_count; queue++) {
2380 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2381 			continue;
2382 
2383 		prio = priv->plat->tx_queues_cfg[queue].prio;
2384 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2385 	}
2386 }
2387 
2388 /**
2389  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2390  *  @priv: driver private structure
2391  *  Description: It is used for configuring the RX queue routing
2392  */
2393 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2394 {
2395 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2396 	u32 queue;
2397 	u8 packet;
2398 
2399 	for (queue = 0; queue < rx_queues_count; queue++) {
2400 		/* no specific packet type routing specified for the queue */
2401 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2402 			continue;
2403 
2404 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2405 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2406 	}
2407 }
2408 
2409 /**
2410  *  stmmac_mtl_configuration - Configure MTL
2411  *  @priv: driver private structure
2412  *  Description: It is used for configurring MTL
2413  */
2414 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2415 {
2416 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2417 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2418 
2419 	if (tx_queues_count > 1)
2420 		stmmac_set_tx_queue_weight(priv);
2421 
2422 	/* Configure MTL RX algorithms */
2423 	if (rx_queues_count > 1)
2424 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2425 				priv->plat->rx_sched_algorithm);
2426 
2427 	/* Configure MTL TX algorithms */
2428 	if (tx_queues_count > 1)
2429 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2430 				priv->plat->tx_sched_algorithm);
2431 
2432 	/* Configure CBS in AVB TX queues */
2433 	if (tx_queues_count > 1)
2434 		stmmac_configure_cbs(priv);
2435 
2436 	/* Map RX MTL to DMA channels */
2437 	stmmac_rx_queue_dma_chan_map(priv);
2438 
2439 	/* Enable MAC RX Queues */
2440 	stmmac_mac_enable_rx_queues(priv);
2441 
2442 	/* Set RX priorities */
2443 	if (rx_queues_count > 1)
2444 		stmmac_mac_config_rx_queues_prio(priv);
2445 
2446 	/* Set TX priorities */
2447 	if (tx_queues_count > 1)
2448 		stmmac_mac_config_tx_queues_prio(priv);
2449 
2450 	/* Set RX routing */
2451 	if (rx_queues_count > 1)
2452 		stmmac_mac_config_rx_queues_routing(priv);
2453 }
2454 
2455 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2456 {
2457 	if (priv->dma_cap.asp) {
2458 		netdev_info(priv->dev, "Enabling Safety Features\n");
2459 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2460 	} else {
2461 		netdev_info(priv->dev, "No Safety Features support found\n");
2462 	}
2463 }
2464 
2465 /**
2466  * stmmac_hw_setup - setup mac in a usable state.
2467  *  @dev : pointer to the device structure.
2468  *  Description:
2469  *  this is the main function to setup the HW in a usable state because the
2470  *  dma engine is reset, the core registers are configured (e.g. AXI,
2471  *  Checksum features, timers). The DMA is ready to start receiving and
2472  *  transmitting.
2473  *  Return value:
2474  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2475  *  file on failure.
2476  */
2477 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2478 {
2479 	struct stmmac_priv *priv = netdev_priv(dev);
2480 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2481 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2482 	u32 chan;
2483 	int ret;
2484 
2485 	/* DMA initialization and SW reset */
2486 	ret = stmmac_init_dma_engine(priv);
2487 	if (ret < 0) {
2488 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2489 			   __func__);
2490 		return ret;
2491 	}
2492 
2493 	/* Copy the MAC addr into the HW  */
2494 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2495 
2496 	/* PS and related bits will be programmed according to the speed */
2497 	if (priv->hw->pcs) {
2498 		int speed = priv->plat->mac_port_sel_speed;
2499 
2500 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2501 		    (speed == SPEED_1000)) {
2502 			priv->hw->ps = speed;
2503 		} else {
2504 			dev_warn(priv->device, "invalid port speed\n");
2505 			priv->hw->ps = 0;
2506 		}
2507 	}
2508 
2509 	/* Initialize the MAC Core */
2510 	stmmac_core_init(priv, priv->hw, dev);
2511 
2512 	/* Initialize MTL*/
2513 	if (priv->synopsys_id >= DWMAC_CORE_4_00)
2514 		stmmac_mtl_configuration(priv);
2515 
2516 	/* Initialize Safety Features */
2517 	if (priv->synopsys_id >= DWMAC_CORE_5_10)
2518 		stmmac_safety_feat_configuration(priv);
2519 
2520 	ret = stmmac_rx_ipc(priv, priv->hw);
2521 	if (!ret) {
2522 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2523 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2524 		priv->hw->rx_csum = 0;
2525 	}
2526 
2527 	/* Enable the MAC Rx/Tx */
2528 	stmmac_mac_set(priv, priv->ioaddr, true);
2529 
2530 	/* Set the HW DMA mode and the COE */
2531 	stmmac_dma_operation_mode(priv);
2532 
2533 	stmmac_mmc_setup(priv);
2534 
2535 	if (init_ptp) {
2536 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2537 		if (ret < 0)
2538 			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2539 
2540 		ret = stmmac_init_ptp(priv);
2541 		if (ret == -EOPNOTSUPP)
2542 			netdev_warn(priv->dev, "PTP not supported by HW\n");
2543 		else if (ret)
2544 			netdev_warn(priv->dev, "PTP init failed\n");
2545 	}
2546 
2547 #ifdef CONFIG_DEBUG_FS
2548 	ret = stmmac_init_fs(dev);
2549 	if (ret < 0)
2550 		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2551 			    __func__);
2552 #endif
2553 	/* Start the ball rolling... */
2554 	stmmac_start_all_dma(priv);
2555 
2556 	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2557 
2558 	if (priv->use_riwt) {
2559 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2560 		if (!ret)
2561 			priv->rx_riwt = MAX_DMA_RIWT;
2562 	}
2563 
2564 	if (priv->hw->pcs)
2565 		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2566 
2567 	/* set TX and RX rings length */
2568 	stmmac_set_rings_length(priv);
2569 
2570 	/* Enable TSO */
2571 	if (priv->tso) {
2572 		for (chan = 0; chan < tx_cnt; chan++)
2573 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2574 	}
2575 
2576 	return 0;
2577 }
2578 
2579 static void stmmac_hw_teardown(struct net_device *dev)
2580 {
2581 	struct stmmac_priv *priv = netdev_priv(dev);
2582 
2583 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2584 }
2585 
2586 /**
2587  *  stmmac_open - open entry point of the driver
2588  *  @dev : pointer to the device structure.
2589  *  Description:
2590  *  This function is the open entry point of the driver.
2591  *  Return value:
2592  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2593  *  file on failure.
2594  */
2595 static int stmmac_open(struct net_device *dev)
2596 {
2597 	struct stmmac_priv *priv = netdev_priv(dev);
2598 	int ret;
2599 
2600 	stmmac_check_ether_addr(priv);
2601 
2602 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2603 	    priv->hw->pcs != STMMAC_PCS_TBI &&
2604 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2605 		ret = stmmac_init_phy(dev);
2606 		if (ret) {
2607 			netdev_err(priv->dev,
2608 				   "%s: Cannot attach to PHY (error: %d)\n",
2609 				   __func__, ret);
2610 			return ret;
2611 		}
2612 	}
2613 
2614 	/* Extra statistics */
2615 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2616 	priv->xstats.threshold = tc;
2617 
2618 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2619 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2620 
2621 	ret = alloc_dma_desc_resources(priv);
2622 	if (ret < 0) {
2623 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2624 			   __func__);
2625 		goto dma_desc_error;
2626 	}
2627 
2628 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2629 	if (ret < 0) {
2630 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2631 			   __func__);
2632 		goto init_error;
2633 	}
2634 
2635 	ret = stmmac_hw_setup(dev, true);
2636 	if (ret < 0) {
2637 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2638 		goto init_error;
2639 	}
2640 
2641 	stmmac_init_tx_coalesce(priv);
2642 
2643 	if (dev->phydev)
2644 		phy_start(dev->phydev);
2645 
2646 	/* Request the IRQ lines */
2647 	ret = request_irq(dev->irq, stmmac_interrupt,
2648 			  IRQF_SHARED, dev->name, dev);
2649 	if (unlikely(ret < 0)) {
2650 		netdev_err(priv->dev,
2651 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2652 			   __func__, dev->irq, ret);
2653 		goto irq_error;
2654 	}
2655 
2656 	/* Request the Wake IRQ in case of another line is used for WoL */
2657 	if (priv->wol_irq != dev->irq) {
2658 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2659 				  IRQF_SHARED, dev->name, dev);
2660 		if (unlikely(ret < 0)) {
2661 			netdev_err(priv->dev,
2662 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2663 				   __func__, priv->wol_irq, ret);
2664 			goto wolirq_error;
2665 		}
2666 	}
2667 
2668 	/* Request the IRQ lines */
2669 	if (priv->lpi_irq > 0) {
2670 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2671 				  dev->name, dev);
2672 		if (unlikely(ret < 0)) {
2673 			netdev_err(priv->dev,
2674 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2675 				   __func__, priv->lpi_irq, ret);
2676 			goto lpiirq_error;
2677 		}
2678 	}
2679 
2680 	stmmac_enable_all_queues(priv);
2681 	stmmac_start_all_queues(priv);
2682 
2683 	return 0;
2684 
2685 lpiirq_error:
2686 	if (priv->wol_irq != dev->irq)
2687 		free_irq(priv->wol_irq, dev);
2688 wolirq_error:
2689 	free_irq(dev->irq, dev);
2690 irq_error:
2691 	if (dev->phydev)
2692 		phy_stop(dev->phydev);
2693 
2694 	del_timer_sync(&priv->txtimer);
2695 	stmmac_hw_teardown(dev);
2696 init_error:
2697 	free_dma_desc_resources(priv);
2698 dma_desc_error:
2699 	if (dev->phydev)
2700 		phy_disconnect(dev->phydev);
2701 
2702 	return ret;
2703 }
2704 
2705 /**
2706  *  stmmac_release - close entry point of the driver
2707  *  @dev : device pointer.
2708  *  Description:
2709  *  This is the stop entry point of the driver.
2710  */
2711 static int stmmac_release(struct net_device *dev)
2712 {
2713 	struct stmmac_priv *priv = netdev_priv(dev);
2714 
2715 	if (priv->eee_enabled)
2716 		del_timer_sync(&priv->eee_ctrl_timer);
2717 
2718 	/* Stop and disconnect the PHY */
2719 	if (dev->phydev) {
2720 		phy_stop(dev->phydev);
2721 		phy_disconnect(dev->phydev);
2722 	}
2723 
2724 	stmmac_stop_all_queues(priv);
2725 
2726 	stmmac_disable_all_queues(priv);
2727 
2728 	del_timer_sync(&priv->txtimer);
2729 
2730 	/* Free the IRQ lines */
2731 	free_irq(dev->irq, dev);
2732 	if (priv->wol_irq != dev->irq)
2733 		free_irq(priv->wol_irq, dev);
2734 	if (priv->lpi_irq > 0)
2735 		free_irq(priv->lpi_irq, dev);
2736 
2737 	/* Stop TX/RX DMA and clear the descriptors */
2738 	stmmac_stop_all_dma(priv);
2739 
2740 	/* Release and free the Rx/Tx resources */
2741 	free_dma_desc_resources(priv);
2742 
2743 	/* Disable the MAC Rx/Tx */
2744 	stmmac_mac_set(priv, priv->ioaddr, false);
2745 
2746 	netif_carrier_off(dev);
2747 
2748 #ifdef CONFIG_DEBUG_FS
2749 	stmmac_exit_fs(dev);
2750 #endif
2751 
2752 	stmmac_release_ptp(priv);
2753 
2754 	return 0;
2755 }
2756 
2757 /**
2758  *  stmmac_tso_allocator - close entry point of the driver
2759  *  @priv: driver private structure
2760  *  @des: buffer start address
2761  *  @total_len: total length to fill in descriptors
2762  *  @last_segmant: condition for the last descriptor
2763  *  @queue: TX queue index
2764  *  Description:
2765  *  This function fills descriptor and request new descriptors according to
2766  *  buffer length to fill
2767  */
2768 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2769 				 int total_len, bool last_segment, u32 queue)
2770 {
2771 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2772 	struct dma_desc *desc;
2773 	u32 buff_size;
2774 	int tmp_len;
2775 
2776 	tmp_len = total_len;
2777 
2778 	while (tmp_len > 0) {
2779 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2780 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2781 		desc = tx_q->dma_tx + tx_q->cur_tx;
2782 
2783 		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2784 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2785 			    TSO_MAX_BUFF_SIZE : tmp_len;
2786 
2787 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2788 				0, 1,
2789 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2790 				0, 0);
2791 
2792 		tmp_len -= TSO_MAX_BUFF_SIZE;
2793 	}
2794 }
2795 
2796 /**
2797  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2798  *  @skb : the socket buffer
2799  *  @dev : device pointer
2800  *  Description: this is the transmit function that is called on TSO frames
2801  *  (support available on GMAC4 and newer chips).
2802  *  Diagram below show the ring programming in case of TSO frames:
2803  *
2804  *  First Descriptor
2805  *   --------
2806  *   | DES0 |---> buffer1 = L2/L3/L4 header
2807  *   | DES1 |---> TCP Payload (can continue on next descr...)
2808  *   | DES2 |---> buffer 1 and 2 len
2809  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2810  *   --------
2811  *	|
2812  *     ...
2813  *	|
2814  *   --------
2815  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2816  *   | DES1 | --|
2817  *   | DES2 | --> buffer 1 and 2 len
2818  *   | DES3 |
2819  *   --------
2820  *
2821  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2822  */
2823 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2824 {
2825 	struct dma_desc *desc, *first, *mss_desc = NULL;
2826 	struct stmmac_priv *priv = netdev_priv(dev);
2827 	int nfrags = skb_shinfo(skb)->nr_frags;
2828 	u32 queue = skb_get_queue_mapping(skb);
2829 	unsigned int first_entry, des;
2830 	struct stmmac_tx_queue *tx_q;
2831 	int tmp_pay_len = 0;
2832 	u32 pay_len, mss;
2833 	u8 proto_hdr_len;
2834 	int i;
2835 
2836 	tx_q = &priv->tx_queue[queue];
2837 
2838 	/* Compute header lengths */
2839 	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2840 
2841 	/* Desc availability based on threshold should be enough safe */
2842 	if (unlikely(stmmac_tx_avail(priv, queue) <
2843 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2844 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2845 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2846 								queue));
2847 			/* This is a hard error, log it. */
2848 			netdev_err(priv->dev,
2849 				   "%s: Tx Ring full when queue awake\n",
2850 				   __func__);
2851 		}
2852 		return NETDEV_TX_BUSY;
2853 	}
2854 
2855 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2856 
2857 	mss = skb_shinfo(skb)->gso_size;
2858 
2859 	/* set new MSS value if needed */
2860 	if (mss != tx_q->mss) {
2861 		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2862 		stmmac_set_mss(priv, mss_desc, mss);
2863 		tx_q->mss = mss;
2864 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2865 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2866 	}
2867 
2868 	if (netif_msg_tx_queued(priv)) {
2869 		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2870 			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2871 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2872 			skb->data_len);
2873 	}
2874 
2875 	first_entry = tx_q->cur_tx;
2876 	WARN_ON(tx_q->tx_skbuff[first_entry]);
2877 
2878 	desc = tx_q->dma_tx + first_entry;
2879 	first = desc;
2880 
2881 	/* first descriptor: fill Headers on Buf1 */
2882 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2883 			     DMA_TO_DEVICE);
2884 	if (dma_mapping_error(priv->device, des))
2885 		goto dma_map_err;
2886 
2887 	tx_q->tx_skbuff_dma[first_entry].buf = des;
2888 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2889 
2890 	first->des0 = cpu_to_le32(des);
2891 
2892 	/* Fill start of payload in buff2 of first descriptor */
2893 	if (pay_len)
2894 		first->des1 = cpu_to_le32(des + proto_hdr_len);
2895 
2896 	/* If needed take extra descriptors to fill the remaining payload */
2897 	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2898 
2899 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2900 
2901 	/* Prepare fragments */
2902 	for (i = 0; i < nfrags; i++) {
2903 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2904 
2905 		des = skb_frag_dma_map(priv->device, frag, 0,
2906 				       skb_frag_size(frag),
2907 				       DMA_TO_DEVICE);
2908 		if (dma_mapping_error(priv->device, des))
2909 			goto dma_map_err;
2910 
2911 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2912 				     (i == nfrags - 1), queue);
2913 
2914 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2915 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2916 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2917 	}
2918 
2919 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2920 
2921 	/* Only the last descriptor gets to point to the skb. */
2922 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2923 
2924 	/* We've used all descriptors we need for this skb, however,
2925 	 * advance cur_tx so that it references a fresh descriptor.
2926 	 * ndo_start_xmit will fill this descriptor the next time it's
2927 	 * called and stmmac_tx_clean may clean up to this descriptor.
2928 	 */
2929 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2930 
2931 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2932 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2933 			  __func__);
2934 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2935 	}
2936 
2937 	dev->stats.tx_bytes += skb->len;
2938 	priv->xstats.tx_tso_frames++;
2939 	priv->xstats.tx_tso_nfrags += nfrags;
2940 
2941 	/* Manage tx mitigation */
2942 	priv->tx_count_frames += nfrags + 1;
2943 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2944 		mod_timer(&priv->txtimer,
2945 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
2946 	} else {
2947 		priv->tx_count_frames = 0;
2948 		stmmac_set_tx_ic(priv, desc);
2949 		priv->xstats.tx_set_ic_bit++;
2950 	}
2951 
2952 	skb_tx_timestamp(skb);
2953 
2954 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2955 		     priv->hwts_tx_en)) {
2956 		/* declare that device is doing timestamping */
2957 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2958 		stmmac_enable_tx_timestamp(priv, first);
2959 	}
2960 
2961 	/* Complete the first descriptor before granting the DMA */
2962 	stmmac_prepare_tso_tx_desc(priv, first, 1,
2963 			proto_hdr_len,
2964 			pay_len,
2965 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2966 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2967 
2968 	/* If context desc is used to change MSS */
2969 	if (mss_desc) {
2970 		/* Make sure that first descriptor has been completely
2971 		 * written, including its own bit. This is because MSS is
2972 		 * actually before first descriptor, so we need to make
2973 		 * sure that MSS's own bit is the last thing written.
2974 		 */
2975 		dma_wmb();
2976 		stmmac_set_tx_owner(priv, mss_desc);
2977 	}
2978 
2979 	/* The own bit must be the latest setting done when prepare the
2980 	 * descriptor and then barrier is needed to make sure that
2981 	 * all is coherent before granting the DMA engine.
2982 	 */
2983 	wmb();
2984 
2985 	if (netif_msg_pktdata(priv)) {
2986 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2987 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2988 			tx_q->cur_tx, first, nfrags);
2989 
2990 		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2991 
2992 		pr_info(">>> frame to be transmitted: ");
2993 		print_pkt(skb->data, skb_headlen(skb));
2994 	}
2995 
2996 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2997 
2998 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2999 
3000 	return NETDEV_TX_OK;
3001 
3002 dma_map_err:
3003 	dev_err(priv->device, "Tx dma map failed\n");
3004 	dev_kfree_skb(skb);
3005 	priv->dev->stats.tx_dropped++;
3006 	return NETDEV_TX_OK;
3007 }
3008 
3009 /**
3010  *  stmmac_xmit - Tx entry point of the driver
3011  *  @skb : the socket buffer
3012  *  @dev : device pointer
3013  *  Description : this is the tx entry point of the driver.
3014  *  It programs the chain or the ring and supports oversized frames
3015  *  and SG feature.
3016  */
3017 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3018 {
3019 	struct stmmac_priv *priv = netdev_priv(dev);
3020 	unsigned int nopaged_len = skb_headlen(skb);
3021 	int i, csum_insertion = 0, is_jumbo = 0;
3022 	u32 queue = skb_get_queue_mapping(skb);
3023 	int nfrags = skb_shinfo(skb)->nr_frags;
3024 	int entry;
3025 	unsigned int first_entry;
3026 	struct dma_desc *desc, *first;
3027 	struct stmmac_tx_queue *tx_q;
3028 	unsigned int enh_desc;
3029 	unsigned int des;
3030 
3031 	tx_q = &priv->tx_queue[queue];
3032 
3033 	/* Manage oversized TCP frames for GMAC4 device */
3034 	if (skb_is_gso(skb) && priv->tso) {
3035 		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3036 			return stmmac_tso_xmit(skb, dev);
3037 	}
3038 
3039 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3040 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3041 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3042 								queue));
3043 			/* This is a hard error, log it. */
3044 			netdev_err(priv->dev,
3045 				   "%s: Tx Ring full when queue awake\n",
3046 				   __func__);
3047 		}
3048 		return NETDEV_TX_BUSY;
3049 	}
3050 
3051 	if (priv->tx_path_in_lpi_mode)
3052 		stmmac_disable_eee_mode(priv);
3053 
3054 	entry = tx_q->cur_tx;
3055 	first_entry = entry;
3056 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3057 
3058 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3059 
3060 	if (likely(priv->extend_desc))
3061 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3062 	else
3063 		desc = tx_q->dma_tx + entry;
3064 
3065 	first = desc;
3066 
3067 	enh_desc = priv->plat->enh_desc;
3068 	/* To program the descriptors according to the size of the frame */
3069 	if (enh_desc)
3070 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3071 
3072 	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3073 					 DWMAC_CORE_4_00)) {
3074 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3075 		if (unlikely(entry < 0))
3076 			goto dma_map_err;
3077 	}
3078 
3079 	for (i = 0; i < nfrags; i++) {
3080 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3081 		int len = skb_frag_size(frag);
3082 		bool last_segment = (i == (nfrags - 1));
3083 
3084 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3085 		WARN_ON(tx_q->tx_skbuff[entry]);
3086 
3087 		if (likely(priv->extend_desc))
3088 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3089 		else
3090 			desc = tx_q->dma_tx + entry;
3091 
3092 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3093 				       DMA_TO_DEVICE);
3094 		if (dma_mapping_error(priv->device, des))
3095 			goto dma_map_err; /* should reuse desc w/o issues */
3096 
3097 		tx_q->tx_skbuff_dma[entry].buf = des;
3098 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3099 			desc->des0 = cpu_to_le32(des);
3100 		else
3101 			desc->des2 = cpu_to_le32(des);
3102 
3103 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3104 		tx_q->tx_skbuff_dma[entry].len = len;
3105 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3106 
3107 		/* Prepare the descriptor and set the own bit too */
3108 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3109 				priv->mode, 1, last_segment, skb->len);
3110 	}
3111 
3112 	/* Only the last descriptor gets to point to the skb. */
3113 	tx_q->tx_skbuff[entry] = skb;
3114 
3115 	/* We've used all descriptors we need for this skb, however,
3116 	 * advance cur_tx so that it references a fresh descriptor.
3117 	 * ndo_start_xmit will fill this descriptor the next time it's
3118 	 * called and stmmac_tx_clean may clean up to this descriptor.
3119 	 */
3120 	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3121 	tx_q->cur_tx = entry;
3122 
3123 	if (netif_msg_pktdata(priv)) {
3124 		void *tx_head;
3125 
3126 		netdev_dbg(priv->dev,
3127 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3128 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3129 			   entry, first, nfrags);
3130 
3131 		if (priv->extend_desc)
3132 			tx_head = (void *)tx_q->dma_etx;
3133 		else
3134 			tx_head = (void *)tx_q->dma_tx;
3135 
3136 		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3137 
3138 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3139 		print_pkt(skb->data, skb->len);
3140 	}
3141 
3142 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3143 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3144 			  __func__);
3145 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3146 	}
3147 
3148 	dev->stats.tx_bytes += skb->len;
3149 
3150 	/* According to the coalesce parameter the IC bit for the latest
3151 	 * segment is reset and the timer re-started to clean the tx status.
3152 	 * This approach takes care about the fragments: desc is the first
3153 	 * element in case of no SG.
3154 	 */
3155 	priv->tx_count_frames += nfrags + 1;
3156 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3157 		mod_timer(&priv->txtimer,
3158 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
3159 	} else {
3160 		priv->tx_count_frames = 0;
3161 		stmmac_set_tx_ic(priv, desc);
3162 		priv->xstats.tx_set_ic_bit++;
3163 	}
3164 
3165 	skb_tx_timestamp(skb);
3166 
3167 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3168 	 * problems because all the descriptors are actually ready to be
3169 	 * passed to the DMA engine.
3170 	 */
3171 	if (likely(!is_jumbo)) {
3172 		bool last_segment = (nfrags == 0);
3173 
3174 		des = dma_map_single(priv->device, skb->data,
3175 				     nopaged_len, DMA_TO_DEVICE);
3176 		if (dma_mapping_error(priv->device, des))
3177 			goto dma_map_err;
3178 
3179 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3180 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3181 			first->des0 = cpu_to_le32(des);
3182 		else
3183 			first->des2 = cpu_to_le32(des);
3184 
3185 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3186 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3187 
3188 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3189 			     priv->hwts_tx_en)) {
3190 			/* declare that device is doing timestamping */
3191 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3192 			stmmac_enable_tx_timestamp(priv, first);
3193 		}
3194 
3195 		/* Prepare the first descriptor setting the OWN bit too */
3196 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3197 				csum_insertion, priv->mode, 1, last_segment,
3198 				skb->len);
3199 
3200 		/* The own bit must be the latest setting done when prepare the
3201 		 * descriptor and then barrier is needed to make sure that
3202 		 * all is coherent before granting the DMA engine.
3203 		 */
3204 		wmb();
3205 	}
3206 
3207 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3208 
3209 	if (priv->synopsys_id < DWMAC_CORE_4_00)
3210 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
3211 	else
3212 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3213 				queue);
3214 
3215 	return NETDEV_TX_OK;
3216 
3217 dma_map_err:
3218 	netdev_err(priv->dev, "Tx DMA map failed\n");
3219 	dev_kfree_skb(skb);
3220 	priv->dev->stats.tx_dropped++;
3221 	return NETDEV_TX_OK;
3222 }
3223 
3224 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3225 {
3226 	struct ethhdr *ehdr;
3227 	u16 vlanid;
3228 
3229 	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3230 	    NETIF_F_HW_VLAN_CTAG_RX &&
3231 	    !__vlan_get_tag(skb, &vlanid)) {
3232 		/* pop the vlan tag */
3233 		ehdr = (struct ethhdr *)skb->data;
3234 		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3235 		skb_pull(skb, VLAN_HLEN);
3236 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3237 	}
3238 }
3239 
3240 
3241 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3242 {
3243 	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3244 		return 0;
3245 
3246 	return 1;
3247 }
3248 
3249 /**
3250  * stmmac_rx_refill - refill used skb preallocated buffers
3251  * @priv: driver private structure
3252  * @queue: RX queue index
3253  * Description : this is to reallocate the skb for the reception process
3254  * that is based on zero-copy.
3255  */
3256 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3257 {
3258 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3259 	int dirty = stmmac_rx_dirty(priv, queue);
3260 	unsigned int entry = rx_q->dirty_rx;
3261 
3262 	int bfsize = priv->dma_buf_sz;
3263 
3264 	while (dirty-- > 0) {
3265 		struct dma_desc *p;
3266 
3267 		if (priv->extend_desc)
3268 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3269 		else
3270 			p = rx_q->dma_rx + entry;
3271 
3272 		if (likely(!rx_q->rx_skbuff[entry])) {
3273 			struct sk_buff *skb;
3274 
3275 			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3276 			if (unlikely(!skb)) {
3277 				/* so for a while no zero-copy! */
3278 				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3279 				if (unlikely(net_ratelimit()))
3280 					dev_err(priv->device,
3281 						"fail to alloc skb entry %d\n",
3282 						entry);
3283 				break;
3284 			}
3285 
3286 			rx_q->rx_skbuff[entry] = skb;
3287 			rx_q->rx_skbuff_dma[entry] =
3288 			    dma_map_single(priv->device, skb->data, bfsize,
3289 					   DMA_FROM_DEVICE);
3290 			if (dma_mapping_error(priv->device,
3291 					      rx_q->rx_skbuff_dma[entry])) {
3292 				netdev_err(priv->dev, "Rx DMA map failed\n");
3293 				dev_kfree_skb(skb);
3294 				break;
3295 			}
3296 
3297 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3298 				p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3299 				p->des1 = 0;
3300 			} else {
3301 				p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3302 			}
3303 
3304 			stmmac_refill_desc3(priv, rx_q, p);
3305 
3306 			if (rx_q->rx_zeroc_thresh > 0)
3307 				rx_q->rx_zeroc_thresh--;
3308 
3309 			netif_dbg(priv, rx_status, priv->dev,
3310 				  "refill entry #%d\n", entry);
3311 		}
3312 		dma_wmb();
3313 
3314 		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3315 			stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
3316 		else
3317 			stmmac_set_rx_owner(priv, p);
3318 
3319 		dma_wmb();
3320 
3321 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3322 	}
3323 	rx_q->dirty_rx = entry;
3324 }
3325 
3326 /**
3327  * stmmac_rx - manage the receive process
3328  * @priv: driver private structure
3329  * @limit: napi bugget
3330  * @queue: RX queue index.
3331  * Description :  this the function called by the napi poll method.
3332  * It gets all the frames inside the ring.
3333  */
3334 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3335 {
3336 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3337 	unsigned int entry = rx_q->cur_rx;
3338 	int coe = priv->hw->rx_csum;
3339 	unsigned int next_entry;
3340 	unsigned int count = 0;
3341 
3342 	if (netif_msg_rx_status(priv)) {
3343 		void *rx_head;
3344 
3345 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3346 		if (priv->extend_desc)
3347 			rx_head = (void *)rx_q->dma_erx;
3348 		else
3349 			rx_head = (void *)rx_q->dma_rx;
3350 
3351 		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3352 	}
3353 	while (count < limit) {
3354 		int status;
3355 		struct dma_desc *p;
3356 		struct dma_desc *np;
3357 
3358 		if (priv->extend_desc)
3359 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3360 		else
3361 			p = rx_q->dma_rx + entry;
3362 
3363 		/* read the status of the incoming frame */
3364 		status = stmmac_rx_status(priv, &priv->dev->stats,
3365 				&priv->xstats, p);
3366 		/* check if managed by the DMA otherwise go ahead */
3367 		if (unlikely(status & dma_own))
3368 			break;
3369 
3370 		count++;
3371 
3372 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3373 		next_entry = rx_q->cur_rx;
3374 
3375 		if (priv->extend_desc)
3376 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3377 		else
3378 			np = rx_q->dma_rx + next_entry;
3379 
3380 		prefetch(np);
3381 
3382 		if (priv->extend_desc)
3383 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3384 					&priv->xstats, rx_q->dma_erx + entry);
3385 		if (unlikely(status == discard_frame)) {
3386 			priv->dev->stats.rx_errors++;
3387 			if (priv->hwts_rx_en && !priv->extend_desc) {
3388 				/* DESC2 & DESC3 will be overwritten by device
3389 				 * with timestamp value, hence reinitialize
3390 				 * them in stmmac_rx_refill() function so that
3391 				 * device can reuse it.
3392 				 */
3393 				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3394 				rx_q->rx_skbuff[entry] = NULL;
3395 				dma_unmap_single(priv->device,
3396 						 rx_q->rx_skbuff_dma[entry],
3397 						 priv->dma_buf_sz,
3398 						 DMA_FROM_DEVICE);
3399 			}
3400 		} else {
3401 			struct sk_buff *skb;
3402 			int frame_len;
3403 			unsigned int des;
3404 
3405 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3406 				des = le32_to_cpu(p->des0);
3407 			else
3408 				des = le32_to_cpu(p->des2);
3409 
3410 			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3411 
3412 			/*  If frame length is greater than skb buffer size
3413 			 *  (preallocated during init) then the packet is
3414 			 *  ignored
3415 			 */
3416 			if (frame_len > priv->dma_buf_sz) {
3417 				netdev_err(priv->dev,
3418 					   "len %d larger than size (%d)\n",
3419 					   frame_len, priv->dma_buf_sz);
3420 				priv->dev->stats.rx_length_errors++;
3421 				break;
3422 			}
3423 
3424 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3425 			 * Type frames (LLC/LLC-SNAP)
3426 			 *
3427 			 * llc_snap is never checked in GMAC >= 4, so this ACS
3428 			 * feature is always disabled and packets need to be
3429 			 * stripped manually.
3430 			 */
3431 			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3432 			    unlikely(status != llc_snap))
3433 				frame_len -= ETH_FCS_LEN;
3434 
3435 			if (netif_msg_rx_status(priv)) {
3436 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3437 					   p, entry, des);
3438 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3439 					   frame_len, status);
3440 			}
3441 
3442 			/* The zero-copy is always used for all the sizes
3443 			 * in case of GMAC4 because it needs
3444 			 * to refill the used descriptors, always.
3445 			 */
3446 			if (unlikely(!priv->plat->has_gmac4 &&
3447 				     ((frame_len < priv->rx_copybreak) ||
3448 				     stmmac_rx_threshold_count(rx_q)))) {
3449 				skb = netdev_alloc_skb_ip_align(priv->dev,
3450 								frame_len);
3451 				if (unlikely(!skb)) {
3452 					if (net_ratelimit())
3453 						dev_warn(priv->device,
3454 							 "packet dropped\n");
3455 					priv->dev->stats.rx_dropped++;
3456 					break;
3457 				}
3458 
3459 				dma_sync_single_for_cpu(priv->device,
3460 							rx_q->rx_skbuff_dma
3461 							[entry], frame_len,
3462 							DMA_FROM_DEVICE);
3463 				skb_copy_to_linear_data(skb,
3464 							rx_q->
3465 							rx_skbuff[entry]->data,
3466 							frame_len);
3467 
3468 				skb_put(skb, frame_len);
3469 				dma_sync_single_for_device(priv->device,
3470 							   rx_q->rx_skbuff_dma
3471 							   [entry], frame_len,
3472 							   DMA_FROM_DEVICE);
3473 			} else {
3474 				skb = rx_q->rx_skbuff[entry];
3475 				if (unlikely(!skb)) {
3476 					netdev_err(priv->dev,
3477 						   "%s: Inconsistent Rx chain\n",
3478 						   priv->dev->name);
3479 					priv->dev->stats.rx_dropped++;
3480 					break;
3481 				}
3482 				prefetch(skb->data - NET_IP_ALIGN);
3483 				rx_q->rx_skbuff[entry] = NULL;
3484 				rx_q->rx_zeroc_thresh++;
3485 
3486 				skb_put(skb, frame_len);
3487 				dma_unmap_single(priv->device,
3488 						 rx_q->rx_skbuff_dma[entry],
3489 						 priv->dma_buf_sz,
3490 						 DMA_FROM_DEVICE);
3491 			}
3492 
3493 			if (netif_msg_pktdata(priv)) {
3494 				netdev_dbg(priv->dev, "frame received (%dbytes)",
3495 					   frame_len);
3496 				print_pkt(skb->data, frame_len);
3497 			}
3498 
3499 			stmmac_get_rx_hwtstamp(priv, p, np, skb);
3500 
3501 			stmmac_rx_vlan(priv->dev, skb);
3502 
3503 			skb->protocol = eth_type_trans(skb, priv->dev);
3504 
3505 			if (unlikely(!coe))
3506 				skb_checksum_none_assert(skb);
3507 			else
3508 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3509 
3510 			napi_gro_receive(&rx_q->napi, skb);
3511 
3512 			priv->dev->stats.rx_packets++;
3513 			priv->dev->stats.rx_bytes += frame_len;
3514 		}
3515 		entry = next_entry;
3516 	}
3517 
3518 	stmmac_rx_refill(priv, queue);
3519 
3520 	priv->xstats.rx_pkt_n += count;
3521 
3522 	return count;
3523 }
3524 
3525 /**
3526  *  stmmac_poll - stmmac poll method (NAPI)
3527  *  @napi : pointer to the napi structure.
3528  *  @budget : maximum number of packets that the current CPU can receive from
3529  *	      all interfaces.
3530  *  Description :
3531  *  To look at the incoming frames and clear the tx resources.
3532  */
3533 static int stmmac_poll(struct napi_struct *napi, int budget)
3534 {
3535 	struct stmmac_rx_queue *rx_q =
3536 		container_of(napi, struct stmmac_rx_queue, napi);
3537 	struct stmmac_priv *priv = rx_q->priv_data;
3538 	u32 tx_count = priv->plat->tx_queues_to_use;
3539 	u32 chan = rx_q->queue_index;
3540 	int work_done = 0;
3541 	u32 queue;
3542 
3543 	priv->xstats.napi_poll++;
3544 
3545 	/* check all the queues */
3546 	for (queue = 0; queue < tx_count; queue++)
3547 		stmmac_tx_clean(priv, queue);
3548 
3549 	work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3550 	if (work_done < budget) {
3551 		napi_complete_done(napi, work_done);
3552 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3553 	}
3554 	return work_done;
3555 }
3556 
3557 /**
3558  *  stmmac_tx_timeout
3559  *  @dev : Pointer to net device structure
3560  *  Description: this function is called when a packet transmission fails to
3561  *   complete within a reasonable time. The driver will mark the error in the
3562  *   netdev structure and arrange for the device to be reset to a sane state
3563  *   in order to transmit a new packet.
3564  */
3565 static void stmmac_tx_timeout(struct net_device *dev)
3566 {
3567 	struct stmmac_priv *priv = netdev_priv(dev);
3568 
3569 	stmmac_global_err(priv);
3570 }
3571 
3572 /**
3573  *  stmmac_set_rx_mode - entry point for multicast addressing
3574  *  @dev : pointer to the device structure
3575  *  Description:
3576  *  This function is a driver entry point which gets called by the kernel
3577  *  whenever multicast addresses must be enabled/disabled.
3578  *  Return value:
3579  *  void.
3580  */
3581 static void stmmac_set_rx_mode(struct net_device *dev)
3582 {
3583 	struct stmmac_priv *priv = netdev_priv(dev);
3584 
3585 	stmmac_set_filter(priv, priv->hw, dev);
3586 }
3587 
3588 /**
3589  *  stmmac_change_mtu - entry point to change MTU size for the device.
3590  *  @dev : device pointer.
3591  *  @new_mtu : the new MTU size for the device.
3592  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3593  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3594  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3595  *  Return value:
3596  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3597  *  file on failure.
3598  */
3599 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3600 {
3601 	struct stmmac_priv *priv = netdev_priv(dev);
3602 
3603 	if (netif_running(dev)) {
3604 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3605 		return -EBUSY;
3606 	}
3607 
3608 	dev->mtu = new_mtu;
3609 
3610 	netdev_update_features(dev);
3611 
3612 	return 0;
3613 }
3614 
3615 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3616 					     netdev_features_t features)
3617 {
3618 	struct stmmac_priv *priv = netdev_priv(dev);
3619 
3620 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3621 		features &= ~NETIF_F_RXCSUM;
3622 
3623 	if (!priv->plat->tx_coe)
3624 		features &= ~NETIF_F_CSUM_MASK;
3625 
3626 	/* Some GMAC devices have a bugged Jumbo frame support that
3627 	 * needs to have the Tx COE disabled for oversized frames
3628 	 * (due to limited buffer sizes). In this case we disable
3629 	 * the TX csum insertion in the TDES and not use SF.
3630 	 */
3631 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3632 		features &= ~NETIF_F_CSUM_MASK;
3633 
3634 	/* Disable tso if asked by ethtool */
3635 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3636 		if (features & NETIF_F_TSO)
3637 			priv->tso = true;
3638 		else
3639 			priv->tso = false;
3640 	}
3641 
3642 	return features;
3643 }
3644 
3645 static int stmmac_set_features(struct net_device *netdev,
3646 			       netdev_features_t features)
3647 {
3648 	struct stmmac_priv *priv = netdev_priv(netdev);
3649 
3650 	/* Keep the COE Type in case of csum is supporting */
3651 	if (features & NETIF_F_RXCSUM)
3652 		priv->hw->rx_csum = priv->plat->rx_coe;
3653 	else
3654 		priv->hw->rx_csum = 0;
3655 	/* No check needed because rx_coe has been set before and it will be
3656 	 * fixed in case of issue.
3657 	 */
3658 	stmmac_rx_ipc(priv, priv->hw);
3659 
3660 	return 0;
3661 }
3662 
3663 /**
3664  *  stmmac_interrupt - main ISR
3665  *  @irq: interrupt number.
3666  *  @dev_id: to pass the net device pointer.
3667  *  Description: this is the main driver interrupt service routine.
3668  *  It can call:
3669  *  o DMA service routine (to manage incoming frame reception and transmission
3670  *    status)
3671  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3672  *    interrupts.
3673  */
3674 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3675 {
3676 	struct net_device *dev = (struct net_device *)dev_id;
3677 	struct stmmac_priv *priv = netdev_priv(dev);
3678 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3679 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3680 	u32 queues_count;
3681 	u32 queue;
3682 
3683 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3684 
3685 	if (priv->irq_wake)
3686 		pm_wakeup_event(priv->device, 0);
3687 
3688 	if (unlikely(!dev)) {
3689 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3690 		return IRQ_NONE;
3691 	}
3692 
3693 	/* Check if adapter is up */
3694 	if (test_bit(STMMAC_DOWN, &priv->state))
3695 		return IRQ_HANDLED;
3696 	/* Check if a fatal error happened */
3697 	if (stmmac_safety_feat_interrupt(priv))
3698 		return IRQ_HANDLED;
3699 
3700 	/* To handle GMAC own interrupts */
3701 	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3702 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3703 
3704 		if (unlikely(status)) {
3705 			/* For LPI we need to save the tx status */
3706 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3707 				priv->tx_path_in_lpi_mode = true;
3708 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3709 				priv->tx_path_in_lpi_mode = false;
3710 		}
3711 
3712 		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3713 			for (queue = 0; queue < queues_count; queue++) {
3714 				struct stmmac_rx_queue *rx_q =
3715 				&priv->rx_queue[queue];
3716 
3717 				status |= stmmac_host_mtl_irq_status(priv,
3718 						priv->hw, queue);
3719 
3720 				if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3721 					stmmac_set_rx_tail_ptr(priv,
3722 							priv->ioaddr,
3723 							rx_q->rx_tail_addr,
3724 							queue);
3725 			}
3726 		}
3727 
3728 		/* PCS link status */
3729 		if (priv->hw->pcs) {
3730 			if (priv->xstats.pcs_link)
3731 				netif_carrier_on(dev);
3732 			else
3733 				netif_carrier_off(dev);
3734 		}
3735 	}
3736 
3737 	/* To handle DMA interrupts */
3738 	stmmac_dma_interrupt(priv);
3739 
3740 	return IRQ_HANDLED;
3741 }
3742 
3743 #ifdef CONFIG_NET_POLL_CONTROLLER
3744 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3745  * to allow network I/O with interrupts disabled.
3746  */
3747 static void stmmac_poll_controller(struct net_device *dev)
3748 {
3749 	disable_irq(dev->irq);
3750 	stmmac_interrupt(dev->irq, dev);
3751 	enable_irq(dev->irq);
3752 }
3753 #endif
3754 
3755 /**
3756  *  stmmac_ioctl - Entry point for the Ioctl
3757  *  @dev: Device pointer.
3758  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3759  *  a proprietary structure used to pass information to the driver.
3760  *  @cmd: IOCTL command
3761  *  Description:
3762  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3763  */
3764 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3765 {
3766 	int ret = -EOPNOTSUPP;
3767 
3768 	if (!netif_running(dev))
3769 		return -EINVAL;
3770 
3771 	switch (cmd) {
3772 	case SIOCGMIIPHY:
3773 	case SIOCGMIIREG:
3774 	case SIOCSMIIREG:
3775 		if (!dev->phydev)
3776 			return -EINVAL;
3777 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3778 		break;
3779 	case SIOCSHWTSTAMP:
3780 		ret = stmmac_hwtstamp_ioctl(dev, rq);
3781 		break;
3782 	default:
3783 		break;
3784 	}
3785 
3786 	return ret;
3787 }
3788 
3789 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3790 {
3791 	struct stmmac_priv *priv = netdev_priv(ndev);
3792 	int ret = 0;
3793 
3794 	ret = eth_mac_addr(ndev, addr);
3795 	if (ret)
3796 		return ret;
3797 
3798 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3799 
3800 	return ret;
3801 }
3802 
3803 #ifdef CONFIG_DEBUG_FS
3804 static struct dentry *stmmac_fs_dir;
3805 
3806 static void sysfs_display_ring(void *head, int size, int extend_desc,
3807 			       struct seq_file *seq)
3808 {
3809 	int i;
3810 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3811 	struct dma_desc *p = (struct dma_desc *)head;
3812 
3813 	for (i = 0; i < size; i++) {
3814 		if (extend_desc) {
3815 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3816 				   i, (unsigned int)virt_to_phys(ep),
3817 				   le32_to_cpu(ep->basic.des0),
3818 				   le32_to_cpu(ep->basic.des1),
3819 				   le32_to_cpu(ep->basic.des2),
3820 				   le32_to_cpu(ep->basic.des3));
3821 			ep++;
3822 		} else {
3823 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3824 				   i, (unsigned int)virt_to_phys(p),
3825 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3826 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3827 			p++;
3828 		}
3829 		seq_printf(seq, "\n");
3830 	}
3831 }
3832 
3833 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3834 {
3835 	struct net_device *dev = seq->private;
3836 	struct stmmac_priv *priv = netdev_priv(dev);
3837 	u32 rx_count = priv->plat->rx_queues_to_use;
3838 	u32 tx_count = priv->plat->tx_queues_to_use;
3839 	u32 queue;
3840 
3841 	for (queue = 0; queue < rx_count; queue++) {
3842 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3843 
3844 		seq_printf(seq, "RX Queue %d:\n", queue);
3845 
3846 		if (priv->extend_desc) {
3847 			seq_printf(seq, "Extended descriptor ring:\n");
3848 			sysfs_display_ring((void *)rx_q->dma_erx,
3849 					   DMA_RX_SIZE, 1, seq);
3850 		} else {
3851 			seq_printf(seq, "Descriptor ring:\n");
3852 			sysfs_display_ring((void *)rx_q->dma_rx,
3853 					   DMA_RX_SIZE, 0, seq);
3854 		}
3855 	}
3856 
3857 	for (queue = 0; queue < tx_count; queue++) {
3858 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3859 
3860 		seq_printf(seq, "TX Queue %d:\n", queue);
3861 
3862 		if (priv->extend_desc) {
3863 			seq_printf(seq, "Extended descriptor ring:\n");
3864 			sysfs_display_ring((void *)tx_q->dma_etx,
3865 					   DMA_TX_SIZE, 1, seq);
3866 		} else {
3867 			seq_printf(seq, "Descriptor ring:\n");
3868 			sysfs_display_ring((void *)tx_q->dma_tx,
3869 					   DMA_TX_SIZE, 0, seq);
3870 		}
3871 	}
3872 
3873 	return 0;
3874 }
3875 
3876 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3877 {
3878 	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3879 }
3880 
3881 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3882 
3883 static const struct file_operations stmmac_rings_status_fops = {
3884 	.owner = THIS_MODULE,
3885 	.open = stmmac_sysfs_ring_open,
3886 	.read = seq_read,
3887 	.llseek = seq_lseek,
3888 	.release = single_release,
3889 };
3890 
3891 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3892 {
3893 	struct net_device *dev = seq->private;
3894 	struct stmmac_priv *priv = netdev_priv(dev);
3895 
3896 	if (!priv->hw_cap_support) {
3897 		seq_printf(seq, "DMA HW features not supported\n");
3898 		return 0;
3899 	}
3900 
3901 	seq_printf(seq, "==============================\n");
3902 	seq_printf(seq, "\tDMA HW features\n");
3903 	seq_printf(seq, "==============================\n");
3904 
3905 	seq_printf(seq, "\t10/100 Mbps: %s\n",
3906 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3907 	seq_printf(seq, "\t1000 Mbps: %s\n",
3908 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3909 	seq_printf(seq, "\tHalf duplex: %s\n",
3910 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
3911 	seq_printf(seq, "\tHash Filter: %s\n",
3912 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
3913 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3914 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3915 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3916 		   (priv->dma_cap.pcs) ? "Y" : "N");
3917 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3918 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3919 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
3920 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3921 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
3922 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3923 	seq_printf(seq, "\tRMON module: %s\n",
3924 		   (priv->dma_cap.rmon) ? "Y" : "N");
3925 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3926 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3927 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3928 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3929 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3930 		   (priv->dma_cap.eee) ? "Y" : "N");
3931 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3932 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3933 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
3934 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3935 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3936 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
3937 	} else {
3938 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3939 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3940 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3941 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3942 	}
3943 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3944 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3945 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3946 		   priv->dma_cap.number_rx_channel);
3947 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3948 		   priv->dma_cap.number_tx_channel);
3949 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
3950 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
3951 
3952 	return 0;
3953 }
3954 
3955 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3956 {
3957 	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3958 }
3959 
3960 static const struct file_operations stmmac_dma_cap_fops = {
3961 	.owner = THIS_MODULE,
3962 	.open = stmmac_sysfs_dma_cap_open,
3963 	.read = seq_read,
3964 	.llseek = seq_lseek,
3965 	.release = single_release,
3966 };
3967 
3968 static int stmmac_init_fs(struct net_device *dev)
3969 {
3970 	struct stmmac_priv *priv = netdev_priv(dev);
3971 
3972 	/* Create per netdev entries */
3973 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3974 
3975 	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3976 		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3977 
3978 		return -ENOMEM;
3979 	}
3980 
3981 	/* Entry to report DMA RX/TX rings */
3982 	priv->dbgfs_rings_status =
3983 		debugfs_create_file("descriptors_status", 0444,
3984 				    priv->dbgfs_dir, dev,
3985 				    &stmmac_rings_status_fops);
3986 
3987 	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3988 		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3989 		debugfs_remove_recursive(priv->dbgfs_dir);
3990 
3991 		return -ENOMEM;
3992 	}
3993 
3994 	/* Entry to report the DMA HW features */
3995 	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
3996 						  priv->dbgfs_dir,
3997 						  dev, &stmmac_dma_cap_fops);
3998 
3999 	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4000 		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4001 		debugfs_remove_recursive(priv->dbgfs_dir);
4002 
4003 		return -ENOMEM;
4004 	}
4005 
4006 	return 0;
4007 }
4008 
4009 static void stmmac_exit_fs(struct net_device *dev)
4010 {
4011 	struct stmmac_priv *priv = netdev_priv(dev);
4012 
4013 	debugfs_remove_recursive(priv->dbgfs_dir);
4014 }
4015 #endif /* CONFIG_DEBUG_FS */
4016 
4017 static const struct net_device_ops stmmac_netdev_ops = {
4018 	.ndo_open = stmmac_open,
4019 	.ndo_start_xmit = stmmac_xmit,
4020 	.ndo_stop = stmmac_release,
4021 	.ndo_change_mtu = stmmac_change_mtu,
4022 	.ndo_fix_features = stmmac_fix_features,
4023 	.ndo_set_features = stmmac_set_features,
4024 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4025 	.ndo_tx_timeout = stmmac_tx_timeout,
4026 	.ndo_do_ioctl = stmmac_ioctl,
4027 #ifdef CONFIG_NET_POLL_CONTROLLER
4028 	.ndo_poll_controller = stmmac_poll_controller,
4029 #endif
4030 	.ndo_set_mac_address = stmmac_set_mac_address,
4031 };
4032 
4033 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4034 {
4035 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4036 		return;
4037 	if (test_bit(STMMAC_DOWN, &priv->state))
4038 		return;
4039 
4040 	netdev_err(priv->dev, "Reset adapter.\n");
4041 
4042 	rtnl_lock();
4043 	netif_trans_update(priv->dev);
4044 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4045 		usleep_range(1000, 2000);
4046 
4047 	set_bit(STMMAC_DOWN, &priv->state);
4048 	dev_close(priv->dev);
4049 	dev_open(priv->dev);
4050 	clear_bit(STMMAC_DOWN, &priv->state);
4051 	clear_bit(STMMAC_RESETING, &priv->state);
4052 	rtnl_unlock();
4053 }
4054 
4055 static void stmmac_service_task(struct work_struct *work)
4056 {
4057 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4058 			service_task);
4059 
4060 	stmmac_reset_subtask(priv);
4061 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4062 }
4063 
4064 /**
4065  *  stmmac_hw_init - Init the MAC device
4066  *  @priv: driver private structure
4067  *  Description: this function is to configure the MAC device according to
4068  *  some platform parameters or the HW capability register. It prepares the
4069  *  driver to use either ring or chain modes and to setup either enhanced or
4070  *  normal descriptors.
4071  */
4072 static int stmmac_hw_init(struct stmmac_priv *priv)
4073 {
4074 	int ret;
4075 
4076 	/* dwmac-sun8i only work in chain mode */
4077 	if (priv->plat->has_sun8i)
4078 		chain_mode = 1;
4079 	priv->chain_mode = chain_mode;
4080 
4081 	/* Initialize HW Interface */
4082 	ret = stmmac_hwif_init(priv);
4083 	if (ret)
4084 		return ret;
4085 
4086 	/* Get the HW capability (new GMAC newer than 3.50a) */
4087 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4088 	if (priv->hw_cap_support) {
4089 		dev_info(priv->device, "DMA HW capability register supported\n");
4090 
4091 		/* We can override some gmac/dma configuration fields: e.g.
4092 		 * enh_desc, tx_coe (e.g. that are passed through the
4093 		 * platform) with the values from the HW capability
4094 		 * register (if supported).
4095 		 */
4096 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4097 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4098 		priv->hw->pmt = priv->plat->pmt;
4099 
4100 		/* TXCOE doesn't work in thresh DMA mode */
4101 		if (priv->plat->force_thresh_dma_mode)
4102 			priv->plat->tx_coe = 0;
4103 		else
4104 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4105 
4106 		/* In case of GMAC4 rx_coe is from HW cap register. */
4107 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4108 
4109 		if (priv->dma_cap.rx_coe_type2)
4110 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4111 		else if (priv->dma_cap.rx_coe_type1)
4112 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4113 
4114 	} else {
4115 		dev_info(priv->device, "No HW DMA feature register supported\n");
4116 	}
4117 
4118 	if (priv->plat->rx_coe) {
4119 		priv->hw->rx_csum = priv->plat->rx_coe;
4120 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4121 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4122 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4123 	}
4124 	if (priv->plat->tx_coe)
4125 		dev_info(priv->device, "TX Checksum insertion supported\n");
4126 
4127 	if (priv->plat->pmt) {
4128 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4129 		device_set_wakeup_capable(priv->device, 1);
4130 	}
4131 
4132 	if (priv->dma_cap.tsoen)
4133 		dev_info(priv->device, "TSO supported\n");
4134 
4135 	return 0;
4136 }
4137 
4138 /**
4139  * stmmac_dvr_probe
4140  * @device: device pointer
4141  * @plat_dat: platform data pointer
4142  * @res: stmmac resource pointer
4143  * Description: this is the main probe function used to
4144  * call the alloc_etherdev, allocate the priv structure.
4145  * Return:
4146  * returns 0 on success, otherwise errno.
4147  */
4148 int stmmac_dvr_probe(struct device *device,
4149 		     struct plat_stmmacenet_data *plat_dat,
4150 		     struct stmmac_resources *res)
4151 {
4152 	struct net_device *ndev = NULL;
4153 	struct stmmac_priv *priv;
4154 	int ret = 0;
4155 	u32 queue;
4156 
4157 	ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4158 				  MTL_MAX_TX_QUEUES,
4159 				  MTL_MAX_RX_QUEUES);
4160 	if (!ndev)
4161 		return -ENOMEM;
4162 
4163 	SET_NETDEV_DEV(ndev, device);
4164 
4165 	priv = netdev_priv(ndev);
4166 	priv->device = device;
4167 	priv->dev = ndev;
4168 
4169 	stmmac_set_ethtool_ops(ndev);
4170 	priv->pause = pause;
4171 	priv->plat = plat_dat;
4172 	priv->ioaddr = res->addr;
4173 	priv->dev->base_addr = (unsigned long)res->addr;
4174 
4175 	priv->dev->irq = res->irq;
4176 	priv->wol_irq = res->wol_irq;
4177 	priv->lpi_irq = res->lpi_irq;
4178 
4179 	if (res->mac)
4180 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4181 
4182 	dev_set_drvdata(device, priv->dev);
4183 
4184 	/* Verify driver arguments */
4185 	stmmac_verify_args();
4186 
4187 	/* Allocate workqueue */
4188 	priv->wq = create_singlethread_workqueue("stmmac_wq");
4189 	if (!priv->wq) {
4190 		dev_err(priv->device, "failed to create workqueue\n");
4191 		goto error_wq;
4192 	}
4193 
4194 	INIT_WORK(&priv->service_task, stmmac_service_task);
4195 
4196 	/* Override with kernel parameters if supplied XXX CRS XXX
4197 	 * this needs to have multiple instances
4198 	 */
4199 	if ((phyaddr >= 0) && (phyaddr <= 31))
4200 		priv->plat->phy_addr = phyaddr;
4201 
4202 	if (priv->plat->stmmac_rst) {
4203 		ret = reset_control_assert(priv->plat->stmmac_rst);
4204 		reset_control_deassert(priv->plat->stmmac_rst);
4205 		/* Some reset controllers have only reset callback instead of
4206 		 * assert + deassert callbacks pair.
4207 		 */
4208 		if (ret == -ENOTSUPP)
4209 			reset_control_reset(priv->plat->stmmac_rst);
4210 	}
4211 
4212 	/* Init MAC and get the capabilities */
4213 	ret = stmmac_hw_init(priv);
4214 	if (ret)
4215 		goto error_hw_init;
4216 
4217 	/* Configure real RX and TX queues */
4218 	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4219 	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4220 
4221 	ndev->netdev_ops = &stmmac_netdev_ops;
4222 
4223 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4224 			    NETIF_F_RXCSUM;
4225 
4226 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4227 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4228 		priv->tso = true;
4229 		dev_info(priv->device, "TSO feature enabled\n");
4230 	}
4231 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4232 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4233 #ifdef STMMAC_VLAN_TAG_USED
4234 	/* Both mac100 and gmac support receive VLAN tag detection */
4235 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4236 #endif
4237 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
4238 
4239 	/* MTU range: 46 - hw-specific max */
4240 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4241 	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4242 		ndev->max_mtu = JUMBO_LEN;
4243 	else
4244 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4245 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4246 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4247 	 */
4248 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
4249 	    (priv->plat->maxmtu >= ndev->min_mtu))
4250 		ndev->max_mtu = priv->plat->maxmtu;
4251 	else if (priv->plat->maxmtu < ndev->min_mtu)
4252 		dev_warn(priv->device,
4253 			 "%s: warning: maxmtu having invalid value (%d)\n",
4254 			 __func__, priv->plat->maxmtu);
4255 
4256 	if (flow_ctrl)
4257 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
4258 
4259 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4260 	 * In some case, for example on bugged HW this feature
4261 	 * has to be disable and this can be done by passing the
4262 	 * riwt_off field from the platform.
4263 	 */
4264 	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4265 		priv->use_riwt = 1;
4266 		dev_info(priv->device,
4267 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4268 	}
4269 
4270 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4271 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4272 
4273 		netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4274 			       (8 * priv->plat->rx_queues_to_use));
4275 	}
4276 
4277 	spin_lock_init(&priv->lock);
4278 
4279 	/* If a specific clk_csr value is passed from the platform
4280 	 * this means that the CSR Clock Range selection cannot be
4281 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
4282 	 * set the MDC clock dynamically according to the csr actual
4283 	 * clock input.
4284 	 */
4285 	if (!priv->plat->clk_csr)
4286 		stmmac_clk_csr_set(priv);
4287 	else
4288 		priv->clk_csr = priv->plat->clk_csr;
4289 
4290 	stmmac_check_pcs_mode(priv);
4291 
4292 	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4293 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4294 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4295 		/* MDIO bus Registration */
4296 		ret = stmmac_mdio_register(ndev);
4297 		if (ret < 0) {
4298 			dev_err(priv->device,
4299 				"%s: MDIO bus (id: %d) registration failed",
4300 				__func__, priv->plat->bus_id);
4301 			goto error_mdio_register;
4302 		}
4303 	}
4304 
4305 	ret = register_netdev(ndev);
4306 	if (ret) {
4307 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
4308 			__func__, ret);
4309 		goto error_netdev_register;
4310 	}
4311 
4312 	return ret;
4313 
4314 error_netdev_register:
4315 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4316 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4317 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4318 		stmmac_mdio_unregister(ndev);
4319 error_mdio_register:
4320 	for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4321 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4322 
4323 		netif_napi_del(&rx_q->napi);
4324 	}
4325 error_hw_init:
4326 	destroy_workqueue(priv->wq);
4327 error_wq:
4328 	free_netdev(ndev);
4329 
4330 	return ret;
4331 }
4332 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4333 
4334 /**
4335  * stmmac_dvr_remove
4336  * @dev: device pointer
4337  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4338  * changes the link status, releases the DMA descriptor rings.
4339  */
4340 int stmmac_dvr_remove(struct device *dev)
4341 {
4342 	struct net_device *ndev = dev_get_drvdata(dev);
4343 	struct stmmac_priv *priv = netdev_priv(ndev);
4344 
4345 	netdev_info(priv->dev, "%s: removing driver", __func__);
4346 
4347 	stmmac_stop_all_dma(priv);
4348 
4349 	stmmac_mac_set(priv, priv->ioaddr, false);
4350 	netif_carrier_off(ndev);
4351 	unregister_netdev(ndev);
4352 	if (priv->plat->stmmac_rst)
4353 		reset_control_assert(priv->plat->stmmac_rst);
4354 	clk_disable_unprepare(priv->plat->pclk);
4355 	clk_disable_unprepare(priv->plat->stmmac_clk);
4356 	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4357 	    priv->hw->pcs != STMMAC_PCS_TBI &&
4358 	    priv->hw->pcs != STMMAC_PCS_RTBI)
4359 		stmmac_mdio_unregister(ndev);
4360 	destroy_workqueue(priv->wq);
4361 	free_netdev(ndev);
4362 
4363 	return 0;
4364 }
4365 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4366 
4367 /**
4368  * stmmac_suspend - suspend callback
4369  * @dev: device pointer
4370  * Description: this is the function to suspend the device and it is called
4371  * by the platform driver to stop the network queue, release the resources,
4372  * program the PMT register (for WoL), clean and release driver resources.
4373  */
4374 int stmmac_suspend(struct device *dev)
4375 {
4376 	struct net_device *ndev = dev_get_drvdata(dev);
4377 	struct stmmac_priv *priv = netdev_priv(ndev);
4378 	unsigned long flags;
4379 
4380 	if (!ndev || !netif_running(ndev))
4381 		return 0;
4382 
4383 	if (ndev->phydev)
4384 		phy_stop(ndev->phydev);
4385 
4386 	spin_lock_irqsave(&priv->lock, flags);
4387 
4388 	netif_device_detach(ndev);
4389 	stmmac_stop_all_queues(priv);
4390 
4391 	stmmac_disable_all_queues(priv);
4392 
4393 	/* Stop TX/RX DMA */
4394 	stmmac_stop_all_dma(priv);
4395 
4396 	/* Enable Power down mode by programming the PMT regs */
4397 	if (device_may_wakeup(priv->device)) {
4398 		stmmac_pmt(priv, priv->hw, priv->wolopts);
4399 		priv->irq_wake = 1;
4400 	} else {
4401 		stmmac_mac_set(priv, priv->ioaddr, false);
4402 		pinctrl_pm_select_sleep_state(priv->device);
4403 		/* Disable clock in case of PWM is off */
4404 		clk_disable(priv->plat->pclk);
4405 		clk_disable(priv->plat->stmmac_clk);
4406 	}
4407 	spin_unlock_irqrestore(&priv->lock, flags);
4408 
4409 	priv->oldlink = false;
4410 	priv->speed = SPEED_UNKNOWN;
4411 	priv->oldduplex = DUPLEX_UNKNOWN;
4412 	return 0;
4413 }
4414 EXPORT_SYMBOL_GPL(stmmac_suspend);
4415 
4416 /**
4417  * stmmac_reset_queues_param - reset queue parameters
4418  * @dev: device pointer
4419  */
4420 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4421 {
4422 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4423 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4424 	u32 queue;
4425 
4426 	for (queue = 0; queue < rx_cnt; queue++) {
4427 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4428 
4429 		rx_q->cur_rx = 0;
4430 		rx_q->dirty_rx = 0;
4431 	}
4432 
4433 	for (queue = 0; queue < tx_cnt; queue++) {
4434 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4435 
4436 		tx_q->cur_tx = 0;
4437 		tx_q->dirty_tx = 0;
4438 		tx_q->mss = 0;
4439 	}
4440 }
4441 
4442 /**
4443  * stmmac_resume - resume callback
4444  * @dev: device pointer
4445  * Description: when resume this function is invoked to setup the DMA and CORE
4446  * in a usable state.
4447  */
4448 int stmmac_resume(struct device *dev)
4449 {
4450 	struct net_device *ndev = dev_get_drvdata(dev);
4451 	struct stmmac_priv *priv = netdev_priv(ndev);
4452 	unsigned long flags;
4453 
4454 	if (!netif_running(ndev))
4455 		return 0;
4456 
4457 	/* Power Down bit, into the PM register, is cleared
4458 	 * automatically as soon as a magic packet or a Wake-up frame
4459 	 * is received. Anyway, it's better to manually clear
4460 	 * this bit because it can generate problems while resuming
4461 	 * from another devices (e.g. serial console).
4462 	 */
4463 	if (device_may_wakeup(priv->device)) {
4464 		spin_lock_irqsave(&priv->lock, flags);
4465 		stmmac_pmt(priv, priv->hw, 0);
4466 		spin_unlock_irqrestore(&priv->lock, flags);
4467 		priv->irq_wake = 0;
4468 	} else {
4469 		pinctrl_pm_select_default_state(priv->device);
4470 		/* enable the clk previously disabled */
4471 		clk_enable(priv->plat->stmmac_clk);
4472 		clk_enable(priv->plat->pclk);
4473 		/* reset the phy so that it's ready */
4474 		if (priv->mii)
4475 			stmmac_mdio_reset(priv->mii);
4476 	}
4477 
4478 	netif_device_attach(ndev);
4479 
4480 	spin_lock_irqsave(&priv->lock, flags);
4481 
4482 	stmmac_reset_queues_param(priv);
4483 
4484 	stmmac_clear_descriptors(priv);
4485 
4486 	stmmac_hw_setup(ndev, false);
4487 	stmmac_init_tx_coalesce(priv);
4488 	stmmac_set_rx_mode(ndev);
4489 
4490 	stmmac_enable_all_queues(priv);
4491 
4492 	stmmac_start_all_queues(priv);
4493 
4494 	spin_unlock_irqrestore(&priv->lock, flags);
4495 
4496 	if (ndev->phydev)
4497 		phy_start(ndev->phydev);
4498 
4499 	return 0;
4500 }
4501 EXPORT_SYMBOL_GPL(stmmac_resume);
4502 
4503 #ifndef MODULE
4504 static int __init stmmac_cmdline_opt(char *str)
4505 {
4506 	char *opt;
4507 
4508 	if (!str || !*str)
4509 		return -EINVAL;
4510 	while ((opt = strsep(&str, ",")) != NULL) {
4511 		if (!strncmp(opt, "debug:", 6)) {
4512 			if (kstrtoint(opt + 6, 0, &debug))
4513 				goto err;
4514 		} else if (!strncmp(opt, "phyaddr:", 8)) {
4515 			if (kstrtoint(opt + 8, 0, &phyaddr))
4516 				goto err;
4517 		} else if (!strncmp(opt, "buf_sz:", 7)) {
4518 			if (kstrtoint(opt + 7, 0, &buf_sz))
4519 				goto err;
4520 		} else if (!strncmp(opt, "tc:", 3)) {
4521 			if (kstrtoint(opt + 3, 0, &tc))
4522 				goto err;
4523 		} else if (!strncmp(opt, "watchdog:", 9)) {
4524 			if (kstrtoint(opt + 9, 0, &watchdog))
4525 				goto err;
4526 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4527 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4528 				goto err;
4529 		} else if (!strncmp(opt, "pause:", 6)) {
4530 			if (kstrtoint(opt + 6, 0, &pause))
4531 				goto err;
4532 		} else if (!strncmp(opt, "eee_timer:", 10)) {
4533 			if (kstrtoint(opt + 10, 0, &eee_timer))
4534 				goto err;
4535 		} else if (!strncmp(opt, "chain_mode:", 11)) {
4536 			if (kstrtoint(opt + 11, 0, &chain_mode))
4537 				goto err;
4538 		}
4539 	}
4540 	return 0;
4541 
4542 err:
4543 	pr_err("%s: ERROR broken module parameter conversion", __func__);
4544 	return -EINVAL;
4545 }
4546 
4547 __setup("stmmaceth=", stmmac_cmdline_opt);
4548 #endif /* MODULE */
4549 
4550 static int __init stmmac_init(void)
4551 {
4552 #ifdef CONFIG_DEBUG_FS
4553 	/* Create debugfs main directory if it doesn't exist yet */
4554 	if (!stmmac_fs_dir) {
4555 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4556 
4557 		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4558 			pr_err("ERROR %s, debugfs create directory failed\n",
4559 			       STMMAC_RESOURCE_NAME);
4560 
4561 			return -ENOMEM;
4562 		}
4563 	}
4564 #endif
4565 
4566 	return 0;
4567 }
4568 
4569 static void __exit stmmac_exit(void)
4570 {
4571 #ifdef CONFIG_DEBUG_FS
4572 	debugfs_remove_recursive(stmmac_fs_dir);
4573 #endif
4574 }
4575 
4576 module_init(stmmac_init)
4577 module_exit(stmmac_exit)
4578 
4579 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4580 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4581 MODULE_LICENSE("GPL");
4582