xref: /linux/drivers/net/ethernet/spacemit/k1_emac.c (revision bfec6d7f2001c7470c3cd261ae65a3ba8737f226)
1*bfec6d7fSVivian Wang // SPDX-License-Identifier: GPL-2.0
2*bfec6d7fSVivian Wang /*
3*bfec6d7fSVivian Wang  * SpacemiT K1 Ethernet driver
4*bfec6d7fSVivian Wang  *
5*bfec6d7fSVivian Wang  * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6*bfec6d7fSVivian Wang  * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7*bfec6d7fSVivian Wang  */
8*bfec6d7fSVivian Wang 
9*bfec6d7fSVivian Wang #include <linux/bitfield.h>
10*bfec6d7fSVivian Wang #include <linux/clk.h>
11*bfec6d7fSVivian Wang #include <linux/delay.h>
12*bfec6d7fSVivian Wang #include <linux/dma-mapping.h>
13*bfec6d7fSVivian Wang #include <linux/etherdevice.h>
14*bfec6d7fSVivian Wang #include <linux/ethtool.h>
15*bfec6d7fSVivian Wang #include <linux/interrupt.h>
16*bfec6d7fSVivian Wang #include <linux/io.h>
17*bfec6d7fSVivian Wang #include <linux/iopoll.h>
18*bfec6d7fSVivian Wang #include <linux/kernel.h>
19*bfec6d7fSVivian Wang #include <linux/mfd/syscon.h>
20*bfec6d7fSVivian Wang #include <linux/module.h>
21*bfec6d7fSVivian Wang #include <linux/of.h>
22*bfec6d7fSVivian Wang #include <linux/of_irq.h>
23*bfec6d7fSVivian Wang #include <linux/of_mdio.h>
24*bfec6d7fSVivian Wang #include <linux/of_net.h>
25*bfec6d7fSVivian Wang #include <linux/phy.h>
26*bfec6d7fSVivian Wang #include <linux/platform_device.h>
27*bfec6d7fSVivian Wang #include <linux/pm_runtime.h>
28*bfec6d7fSVivian Wang #include <linux/pm.h>
29*bfec6d7fSVivian Wang #include <linux/regmap.h>
30*bfec6d7fSVivian Wang #include <linux/reset.h>
31*bfec6d7fSVivian Wang #include <linux/rtnetlink.h>
32*bfec6d7fSVivian Wang #include <linux/timer.h>
33*bfec6d7fSVivian Wang #include <linux/types.h>
34*bfec6d7fSVivian Wang 
35*bfec6d7fSVivian Wang #include "k1_emac.h"
36*bfec6d7fSVivian Wang 
37*bfec6d7fSVivian Wang #define DRIVER_NAME "k1_emac"
38*bfec6d7fSVivian Wang 
39*bfec6d7fSVivian Wang #define EMAC_DEFAULT_BUFSIZE		1536
40*bfec6d7fSVivian Wang #define EMAC_RX_BUF_2K			2048
41*bfec6d7fSVivian Wang #define EMAC_RX_BUF_4K			4096
42*bfec6d7fSVivian Wang 
43*bfec6d7fSVivian Wang /* Tuning parameters from SpacemiT */
44*bfec6d7fSVivian Wang #define EMAC_TX_FRAMES			64
45*bfec6d7fSVivian Wang #define EMAC_TX_COAL_TIMEOUT		40000
46*bfec6d7fSVivian Wang #define EMAC_RX_FRAMES			64
47*bfec6d7fSVivian Wang #define EMAC_RX_COAL_TIMEOUT		(600 * 312)
48*bfec6d7fSVivian Wang 
49*bfec6d7fSVivian Wang #define DEFAULT_FC_PAUSE_TIME		0xffff
50*bfec6d7fSVivian Wang #define DEFAULT_FC_FIFO_HIGH		1600
51*bfec6d7fSVivian Wang #define DEFAULT_TX_ALMOST_FULL		0x1f8
52*bfec6d7fSVivian Wang #define DEFAULT_TX_THRESHOLD		1518
53*bfec6d7fSVivian Wang #define DEFAULT_RX_THRESHOLD		12
54*bfec6d7fSVivian Wang #define DEFAULT_TX_RING_NUM		1024
55*bfec6d7fSVivian Wang #define DEFAULT_RX_RING_NUM		1024
56*bfec6d7fSVivian Wang #define DEFAULT_DMA_BURST		MREGBIT_BURST_16WORD
57*bfec6d7fSVivian Wang #define HASH_TABLE_SIZE			64
58*bfec6d7fSVivian Wang 
59*bfec6d7fSVivian Wang struct desc_buf {
60*bfec6d7fSVivian Wang 	u64 dma_addr;
61*bfec6d7fSVivian Wang 	void *buff_addr;
62*bfec6d7fSVivian Wang 	u16 dma_len;
63*bfec6d7fSVivian Wang 	u8 map_as_page;
64*bfec6d7fSVivian Wang };
65*bfec6d7fSVivian Wang 
66*bfec6d7fSVivian Wang struct emac_tx_desc_buffer {
67*bfec6d7fSVivian Wang 	struct sk_buff *skb;
68*bfec6d7fSVivian Wang 	struct desc_buf buf[2];
69*bfec6d7fSVivian Wang };
70*bfec6d7fSVivian Wang 
71*bfec6d7fSVivian Wang struct emac_rx_desc_buffer {
72*bfec6d7fSVivian Wang 	struct sk_buff *skb;
73*bfec6d7fSVivian Wang 	u64 dma_addr;
74*bfec6d7fSVivian Wang 	void *buff_addr;
75*bfec6d7fSVivian Wang 	u16 dma_len;
76*bfec6d7fSVivian Wang 	u8 map_as_page;
77*bfec6d7fSVivian Wang };
78*bfec6d7fSVivian Wang 
79*bfec6d7fSVivian Wang /**
80*bfec6d7fSVivian Wang  * struct emac_desc_ring - Software-side information for one descriptor ring
81*bfec6d7fSVivian Wang  * Same structure used for both RX and TX
82*bfec6d7fSVivian Wang  * @desc_addr: Virtual address to the descriptor ring memory
83*bfec6d7fSVivian Wang  * @desc_dma_addr: DMA address of the descriptor ring
84*bfec6d7fSVivian Wang  * @total_size: Size of ring in bytes
85*bfec6d7fSVivian Wang  * @total_cnt: Number of descriptors
86*bfec6d7fSVivian Wang  * @head: Next descriptor to associate a buffer with
87*bfec6d7fSVivian Wang  * @tail: Next descriptor to check status bit
88*bfec6d7fSVivian Wang  * @rx_desc_buf: Array of descriptors for RX
89*bfec6d7fSVivian Wang  * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
90*bfec6d7fSVivian Wang  */
91*bfec6d7fSVivian Wang struct emac_desc_ring {
92*bfec6d7fSVivian Wang 	void *desc_addr;
93*bfec6d7fSVivian Wang 	dma_addr_t desc_dma_addr;
94*bfec6d7fSVivian Wang 	u32 total_size;
95*bfec6d7fSVivian Wang 	u32 total_cnt;
96*bfec6d7fSVivian Wang 	u32 head;
97*bfec6d7fSVivian Wang 	u32 tail;
98*bfec6d7fSVivian Wang 	union {
99*bfec6d7fSVivian Wang 		struct emac_rx_desc_buffer *rx_desc_buf;
100*bfec6d7fSVivian Wang 		struct emac_tx_desc_buffer *tx_desc_buf;
101*bfec6d7fSVivian Wang 	};
102*bfec6d7fSVivian Wang };
103*bfec6d7fSVivian Wang 
104*bfec6d7fSVivian Wang struct emac_priv {
105*bfec6d7fSVivian Wang 	void __iomem *iobase;
106*bfec6d7fSVivian Wang 	u32 dma_buf_sz;
107*bfec6d7fSVivian Wang 	struct emac_desc_ring tx_ring;
108*bfec6d7fSVivian Wang 	struct emac_desc_ring rx_ring;
109*bfec6d7fSVivian Wang 
110*bfec6d7fSVivian Wang 	struct net_device *ndev;
111*bfec6d7fSVivian Wang 	struct napi_struct napi;
112*bfec6d7fSVivian Wang 	struct platform_device *pdev;
113*bfec6d7fSVivian Wang 	struct clk *bus_clk;
114*bfec6d7fSVivian Wang 	struct clk *ref_clk;
115*bfec6d7fSVivian Wang 	struct regmap *regmap_apmu;
116*bfec6d7fSVivian Wang 	u32 regmap_apmu_offset;
117*bfec6d7fSVivian Wang 	int irq;
118*bfec6d7fSVivian Wang 
119*bfec6d7fSVivian Wang 	phy_interface_t phy_interface;
120*bfec6d7fSVivian Wang 
121*bfec6d7fSVivian Wang 	union emac_hw_tx_stats tx_stats, tx_stats_off;
122*bfec6d7fSVivian Wang 	union emac_hw_rx_stats rx_stats, rx_stats_off;
123*bfec6d7fSVivian Wang 
124*bfec6d7fSVivian Wang 	u32 tx_count_frames;
125*bfec6d7fSVivian Wang 	u32 tx_coal_frames;
126*bfec6d7fSVivian Wang 	u32 tx_coal_timeout;
127*bfec6d7fSVivian Wang 	struct work_struct tx_timeout_task;
128*bfec6d7fSVivian Wang 
129*bfec6d7fSVivian Wang 	struct timer_list txtimer;
130*bfec6d7fSVivian Wang 	struct timer_list stats_timer;
131*bfec6d7fSVivian Wang 
132*bfec6d7fSVivian Wang 	u32 tx_delay;
133*bfec6d7fSVivian Wang 	u32 rx_delay;
134*bfec6d7fSVivian Wang 
135*bfec6d7fSVivian Wang 	bool flow_control_autoneg;
136*bfec6d7fSVivian Wang 	u8 flow_control;
137*bfec6d7fSVivian Wang 
138*bfec6d7fSVivian Wang 	/* Hold while touching hardware statistics */
139*bfec6d7fSVivian Wang 	spinlock_t stats_lock;
140*bfec6d7fSVivian Wang };
141*bfec6d7fSVivian Wang 
142*bfec6d7fSVivian Wang static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
143*bfec6d7fSVivian Wang {
144*bfec6d7fSVivian Wang 	writel(val, priv->iobase + reg);
145*bfec6d7fSVivian Wang }
146*bfec6d7fSVivian Wang 
147*bfec6d7fSVivian Wang static u32 emac_rd(struct emac_priv *priv, u32 reg)
148*bfec6d7fSVivian Wang {
149*bfec6d7fSVivian Wang 	return readl(priv->iobase + reg);
150*bfec6d7fSVivian Wang }
151*bfec6d7fSVivian Wang 
152*bfec6d7fSVivian Wang static int emac_phy_interface_config(struct emac_priv *priv)
153*bfec6d7fSVivian Wang {
154*bfec6d7fSVivian Wang 	u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
155*bfec6d7fSVivian Wang 
156*bfec6d7fSVivian Wang 	if (phy_interface_mode_is_rgmii(priv->phy_interface))
157*bfec6d7fSVivian Wang 		val |= PHY_INTF_RGMII;
158*bfec6d7fSVivian Wang 
159*bfec6d7fSVivian Wang 	regmap_update_bits(priv->regmap_apmu,
160*bfec6d7fSVivian Wang 			   priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
161*bfec6d7fSVivian Wang 			   mask, val);
162*bfec6d7fSVivian Wang 
163*bfec6d7fSVivian Wang 	return 0;
164*bfec6d7fSVivian Wang }
165*bfec6d7fSVivian Wang 
166*bfec6d7fSVivian Wang /*
167*bfec6d7fSVivian Wang  * Where the hardware expects a MAC address, it is laid out in this high, med,
168*bfec6d7fSVivian Wang  * low order in three consecutive registers and in this format.
169*bfec6d7fSVivian Wang  */
170*bfec6d7fSVivian Wang 
171*bfec6d7fSVivian Wang static void emac_set_mac_addr_reg(struct emac_priv *priv,
172*bfec6d7fSVivian Wang 				  const unsigned char *addr,
173*bfec6d7fSVivian Wang 				  u32 reg)
174*bfec6d7fSVivian Wang {
175*bfec6d7fSVivian Wang 	emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
176*bfec6d7fSVivian Wang 	emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
177*bfec6d7fSVivian Wang 	emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
178*bfec6d7fSVivian Wang }
179*bfec6d7fSVivian Wang 
180*bfec6d7fSVivian Wang static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
181*bfec6d7fSVivian Wang {
182*bfec6d7fSVivian Wang 	/* We use only one address, so set the same for flow control as well */
183*bfec6d7fSVivian Wang 	emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
184*bfec6d7fSVivian Wang 	emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
185*bfec6d7fSVivian Wang }
186*bfec6d7fSVivian Wang 
187*bfec6d7fSVivian Wang static void emac_reset_hw(struct emac_priv *priv)
188*bfec6d7fSVivian Wang {
189*bfec6d7fSVivian Wang 	/* Disable all interrupts */
190*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
191*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
192*bfec6d7fSVivian Wang 
193*bfec6d7fSVivian Wang 	/* Disable transmit and receive units */
194*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
195*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
196*bfec6d7fSVivian Wang 
197*bfec6d7fSVivian Wang 	/* Disable DMA */
198*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_CONTROL, 0x0);
199*bfec6d7fSVivian Wang }
200*bfec6d7fSVivian Wang 
201*bfec6d7fSVivian Wang static void emac_init_hw(struct emac_priv *priv)
202*bfec6d7fSVivian Wang {
203*bfec6d7fSVivian Wang 	/* Destination address for 802.3x Ethernet flow control */
204*bfec6d7fSVivian Wang 	u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
205*bfec6d7fSVivian Wang 
206*bfec6d7fSVivian Wang 	u32 rxirq = 0, dma = 0;
207*bfec6d7fSVivian Wang 
208*bfec6d7fSVivian Wang 	regmap_set_bits(priv->regmap_apmu,
209*bfec6d7fSVivian Wang 			priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
210*bfec6d7fSVivian Wang 			AXI_SINGLE_ID);
211*bfec6d7fSVivian Wang 
212*bfec6d7fSVivian Wang 	/* Disable transmit and receive units */
213*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
214*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
215*bfec6d7fSVivian Wang 
216*bfec6d7fSVivian Wang 	/* Enable MAC address 1 filtering */
217*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
218*bfec6d7fSVivian Wang 
219*bfec6d7fSVivian Wang 	/* Zero initialize the multicast hash table */
220*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
221*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
222*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
223*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
224*bfec6d7fSVivian Wang 
225*bfec6d7fSVivian Wang 	/* Configure thresholds */
226*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
227*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
228*bfec6d7fSVivian Wang 		DEFAULT_TX_THRESHOLD);
229*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
230*bfec6d7fSVivian Wang 
231*bfec6d7fSVivian Wang 	/* Configure flow control (enabled in emac_adjust_link() later) */
232*bfec6d7fSVivian Wang 	emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
233*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
234*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
235*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
236*bfec6d7fSVivian Wang 
237*bfec6d7fSVivian Wang 	/* RX IRQ mitigation */
238*bfec6d7fSVivian Wang 	rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
239*bfec6d7fSVivian Wang 			   EMAC_RX_FRAMES);
240*bfec6d7fSVivian Wang 	rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
241*bfec6d7fSVivian Wang 			    EMAC_RX_COAL_TIMEOUT);
242*bfec6d7fSVivian Wang 	rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
243*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
244*bfec6d7fSVivian Wang 
245*bfec6d7fSVivian Wang 	/* Disable and set DMA config */
246*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_CONTROL, 0x0);
247*bfec6d7fSVivian Wang 
248*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
249*bfec6d7fSVivian Wang 	usleep_range(9000, 10000);
250*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_CONFIGURATION, 0x0);
251*bfec6d7fSVivian Wang 	usleep_range(9000, 10000);
252*bfec6d7fSVivian Wang 
253*bfec6d7fSVivian Wang 	dma |= MREGBIT_STRICT_BURST;
254*bfec6d7fSVivian Wang 	dma |= MREGBIT_DMA_64BIT_MODE;
255*bfec6d7fSVivian Wang 	dma |= DEFAULT_DMA_BURST;
256*bfec6d7fSVivian Wang 
257*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_CONFIGURATION, dma);
258*bfec6d7fSVivian Wang }
259*bfec6d7fSVivian Wang 
260*bfec6d7fSVivian Wang static void emac_dma_start_transmit(struct emac_priv *priv)
261*bfec6d7fSVivian Wang {
262*bfec6d7fSVivian Wang 	/* The actual value written does not matter */
263*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
264*bfec6d7fSVivian Wang }
265*bfec6d7fSVivian Wang 
266*bfec6d7fSVivian Wang static void emac_enable_interrupt(struct emac_priv *priv)
267*bfec6d7fSVivian Wang {
268*bfec6d7fSVivian Wang 	u32 val;
269*bfec6d7fSVivian Wang 
270*bfec6d7fSVivian Wang 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
271*bfec6d7fSVivian Wang 	val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
272*bfec6d7fSVivian Wang 	val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
273*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
274*bfec6d7fSVivian Wang }
275*bfec6d7fSVivian Wang 
276*bfec6d7fSVivian Wang static void emac_disable_interrupt(struct emac_priv *priv)
277*bfec6d7fSVivian Wang {
278*bfec6d7fSVivian Wang 	u32 val;
279*bfec6d7fSVivian Wang 
280*bfec6d7fSVivian Wang 	val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
281*bfec6d7fSVivian Wang 	val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
282*bfec6d7fSVivian Wang 	val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
283*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
284*bfec6d7fSVivian Wang }
285*bfec6d7fSVivian Wang 
286*bfec6d7fSVivian Wang static u32 emac_tx_avail(struct emac_priv *priv)
287*bfec6d7fSVivian Wang {
288*bfec6d7fSVivian Wang 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
289*bfec6d7fSVivian Wang 	u32 avail;
290*bfec6d7fSVivian Wang 
291*bfec6d7fSVivian Wang 	if (tx_ring->tail > tx_ring->head)
292*bfec6d7fSVivian Wang 		avail = tx_ring->tail - tx_ring->head - 1;
293*bfec6d7fSVivian Wang 	else
294*bfec6d7fSVivian Wang 		avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
295*bfec6d7fSVivian Wang 
296*bfec6d7fSVivian Wang 	return avail;
297*bfec6d7fSVivian Wang }
298*bfec6d7fSVivian Wang 
299*bfec6d7fSVivian Wang static void emac_tx_coal_timer_resched(struct emac_priv *priv)
300*bfec6d7fSVivian Wang {
301*bfec6d7fSVivian Wang 	mod_timer(&priv->txtimer,
302*bfec6d7fSVivian Wang 		  jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
303*bfec6d7fSVivian Wang }
304*bfec6d7fSVivian Wang 
305*bfec6d7fSVivian Wang static void emac_tx_coal_timer(struct timer_list *t)
306*bfec6d7fSVivian Wang {
307*bfec6d7fSVivian Wang 	struct emac_priv *priv = timer_container_of(priv, t, txtimer);
308*bfec6d7fSVivian Wang 
309*bfec6d7fSVivian Wang 	napi_schedule(&priv->napi);
310*bfec6d7fSVivian Wang }
311*bfec6d7fSVivian Wang 
312*bfec6d7fSVivian Wang static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
313*bfec6d7fSVivian Wang {
314*bfec6d7fSVivian Wang 	priv->tx_count_frames += pkt_num;
315*bfec6d7fSVivian Wang 	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
316*bfec6d7fSVivian Wang 		emac_tx_coal_timer_resched(priv);
317*bfec6d7fSVivian Wang 		return false;
318*bfec6d7fSVivian Wang 	}
319*bfec6d7fSVivian Wang 
320*bfec6d7fSVivian Wang 	priv->tx_count_frames = 0;
321*bfec6d7fSVivian Wang 	return true;
322*bfec6d7fSVivian Wang }
323*bfec6d7fSVivian Wang 
324*bfec6d7fSVivian Wang static void emac_free_tx_buf(struct emac_priv *priv, int i)
325*bfec6d7fSVivian Wang {
326*bfec6d7fSVivian Wang 	struct emac_tx_desc_buffer *tx_buf;
327*bfec6d7fSVivian Wang 	struct emac_desc_ring *tx_ring;
328*bfec6d7fSVivian Wang 	struct desc_buf *buf;
329*bfec6d7fSVivian Wang 	int j;
330*bfec6d7fSVivian Wang 
331*bfec6d7fSVivian Wang 	tx_ring = &priv->tx_ring;
332*bfec6d7fSVivian Wang 	tx_buf = &tx_ring->tx_desc_buf[i];
333*bfec6d7fSVivian Wang 
334*bfec6d7fSVivian Wang 	for (j = 0; j < 2; j++) {
335*bfec6d7fSVivian Wang 		buf = &tx_buf->buf[j];
336*bfec6d7fSVivian Wang 		if (!buf->dma_addr)
337*bfec6d7fSVivian Wang 			continue;
338*bfec6d7fSVivian Wang 
339*bfec6d7fSVivian Wang 		if (buf->map_as_page)
340*bfec6d7fSVivian Wang 			dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
341*bfec6d7fSVivian Wang 				       buf->dma_len, DMA_TO_DEVICE);
342*bfec6d7fSVivian Wang 		else
343*bfec6d7fSVivian Wang 			dma_unmap_single(&priv->pdev->dev,
344*bfec6d7fSVivian Wang 					 buf->dma_addr, buf->dma_len,
345*bfec6d7fSVivian Wang 					 DMA_TO_DEVICE);
346*bfec6d7fSVivian Wang 
347*bfec6d7fSVivian Wang 		buf->dma_addr = 0;
348*bfec6d7fSVivian Wang 		buf->map_as_page = false;
349*bfec6d7fSVivian Wang 		buf->buff_addr = NULL;
350*bfec6d7fSVivian Wang 	}
351*bfec6d7fSVivian Wang 
352*bfec6d7fSVivian Wang 	if (tx_buf->skb) {
353*bfec6d7fSVivian Wang 		dev_kfree_skb_any(tx_buf->skb);
354*bfec6d7fSVivian Wang 		tx_buf->skb = NULL;
355*bfec6d7fSVivian Wang 	}
356*bfec6d7fSVivian Wang }
357*bfec6d7fSVivian Wang 
358*bfec6d7fSVivian Wang static void emac_clean_tx_desc_ring(struct emac_priv *priv)
359*bfec6d7fSVivian Wang {
360*bfec6d7fSVivian Wang 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
361*bfec6d7fSVivian Wang 	u32 i;
362*bfec6d7fSVivian Wang 
363*bfec6d7fSVivian Wang 	for (i = 0; i < tx_ring->total_cnt; i++)
364*bfec6d7fSVivian Wang 		emac_free_tx_buf(priv, i);
365*bfec6d7fSVivian Wang 
366*bfec6d7fSVivian Wang 	tx_ring->head = 0;
367*bfec6d7fSVivian Wang 	tx_ring->tail = 0;
368*bfec6d7fSVivian Wang }
369*bfec6d7fSVivian Wang 
370*bfec6d7fSVivian Wang static void emac_clean_rx_desc_ring(struct emac_priv *priv)
371*bfec6d7fSVivian Wang {
372*bfec6d7fSVivian Wang 	struct emac_rx_desc_buffer *rx_buf;
373*bfec6d7fSVivian Wang 	struct emac_desc_ring *rx_ring;
374*bfec6d7fSVivian Wang 	u32 i;
375*bfec6d7fSVivian Wang 
376*bfec6d7fSVivian Wang 	rx_ring = &priv->rx_ring;
377*bfec6d7fSVivian Wang 
378*bfec6d7fSVivian Wang 	for (i = 0; i < rx_ring->total_cnt; i++) {
379*bfec6d7fSVivian Wang 		rx_buf = &rx_ring->rx_desc_buf[i];
380*bfec6d7fSVivian Wang 
381*bfec6d7fSVivian Wang 		if (!rx_buf->skb)
382*bfec6d7fSVivian Wang 			continue;
383*bfec6d7fSVivian Wang 
384*bfec6d7fSVivian Wang 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
385*bfec6d7fSVivian Wang 				 rx_buf->dma_len, DMA_FROM_DEVICE);
386*bfec6d7fSVivian Wang 
387*bfec6d7fSVivian Wang 		dev_kfree_skb(rx_buf->skb);
388*bfec6d7fSVivian Wang 		rx_buf->skb = NULL;
389*bfec6d7fSVivian Wang 	}
390*bfec6d7fSVivian Wang 
391*bfec6d7fSVivian Wang 	rx_ring->tail = 0;
392*bfec6d7fSVivian Wang 	rx_ring->head = 0;
393*bfec6d7fSVivian Wang }
394*bfec6d7fSVivian Wang 
395*bfec6d7fSVivian Wang static int emac_alloc_tx_resources(struct emac_priv *priv)
396*bfec6d7fSVivian Wang {
397*bfec6d7fSVivian Wang 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
398*bfec6d7fSVivian Wang 	struct platform_device *pdev = priv->pdev;
399*bfec6d7fSVivian Wang 
400*bfec6d7fSVivian Wang 	tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
401*bfec6d7fSVivian Wang 				       sizeof(*tx_ring->tx_desc_buf),
402*bfec6d7fSVivian Wang 				       GFP_KERNEL);
403*bfec6d7fSVivian Wang 
404*bfec6d7fSVivian Wang 	if (!tx_ring->tx_desc_buf)
405*bfec6d7fSVivian Wang 		return -ENOMEM;
406*bfec6d7fSVivian Wang 
407*bfec6d7fSVivian Wang 	tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
408*bfec6d7fSVivian Wang 	tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
409*bfec6d7fSVivian Wang 
410*bfec6d7fSVivian Wang 	tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
411*bfec6d7fSVivian Wang 						&tx_ring->desc_dma_addr,
412*bfec6d7fSVivian Wang 						GFP_KERNEL);
413*bfec6d7fSVivian Wang 	if (!tx_ring->desc_addr) {
414*bfec6d7fSVivian Wang 		kfree(tx_ring->tx_desc_buf);
415*bfec6d7fSVivian Wang 		return -ENOMEM;
416*bfec6d7fSVivian Wang 	}
417*bfec6d7fSVivian Wang 
418*bfec6d7fSVivian Wang 	tx_ring->head = 0;
419*bfec6d7fSVivian Wang 	tx_ring->tail = 0;
420*bfec6d7fSVivian Wang 
421*bfec6d7fSVivian Wang 	return 0;
422*bfec6d7fSVivian Wang }
423*bfec6d7fSVivian Wang 
424*bfec6d7fSVivian Wang static int emac_alloc_rx_resources(struct emac_priv *priv)
425*bfec6d7fSVivian Wang {
426*bfec6d7fSVivian Wang 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
427*bfec6d7fSVivian Wang 	struct platform_device *pdev = priv->pdev;
428*bfec6d7fSVivian Wang 
429*bfec6d7fSVivian Wang 	rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
430*bfec6d7fSVivian Wang 				       sizeof(*rx_ring->rx_desc_buf),
431*bfec6d7fSVivian Wang 				       GFP_KERNEL);
432*bfec6d7fSVivian Wang 	if (!rx_ring->rx_desc_buf)
433*bfec6d7fSVivian Wang 		return -ENOMEM;
434*bfec6d7fSVivian Wang 
435*bfec6d7fSVivian Wang 	rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
436*bfec6d7fSVivian Wang 
437*bfec6d7fSVivian Wang 	rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
438*bfec6d7fSVivian Wang 
439*bfec6d7fSVivian Wang 	rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
440*bfec6d7fSVivian Wang 						&rx_ring->desc_dma_addr,
441*bfec6d7fSVivian Wang 						GFP_KERNEL);
442*bfec6d7fSVivian Wang 	if (!rx_ring->desc_addr) {
443*bfec6d7fSVivian Wang 		kfree(rx_ring->rx_desc_buf);
444*bfec6d7fSVivian Wang 		return -ENOMEM;
445*bfec6d7fSVivian Wang 	}
446*bfec6d7fSVivian Wang 
447*bfec6d7fSVivian Wang 	rx_ring->head = 0;
448*bfec6d7fSVivian Wang 	rx_ring->tail = 0;
449*bfec6d7fSVivian Wang 
450*bfec6d7fSVivian Wang 	return 0;
451*bfec6d7fSVivian Wang }
452*bfec6d7fSVivian Wang 
453*bfec6d7fSVivian Wang static void emac_free_tx_resources(struct emac_priv *priv)
454*bfec6d7fSVivian Wang {
455*bfec6d7fSVivian Wang 	struct emac_desc_ring *tr = &priv->tx_ring;
456*bfec6d7fSVivian Wang 	struct device *dev = &priv->pdev->dev;
457*bfec6d7fSVivian Wang 
458*bfec6d7fSVivian Wang 	emac_clean_tx_desc_ring(priv);
459*bfec6d7fSVivian Wang 
460*bfec6d7fSVivian Wang 	kfree(tr->tx_desc_buf);
461*bfec6d7fSVivian Wang 	tr->tx_desc_buf = NULL;
462*bfec6d7fSVivian Wang 
463*bfec6d7fSVivian Wang 	dma_free_coherent(dev, tr->total_size, tr->desc_addr,
464*bfec6d7fSVivian Wang 			  tr->desc_dma_addr);
465*bfec6d7fSVivian Wang 	tr->desc_addr = NULL;
466*bfec6d7fSVivian Wang }
467*bfec6d7fSVivian Wang 
468*bfec6d7fSVivian Wang static void emac_free_rx_resources(struct emac_priv *priv)
469*bfec6d7fSVivian Wang {
470*bfec6d7fSVivian Wang 	struct emac_desc_ring *rr = &priv->rx_ring;
471*bfec6d7fSVivian Wang 	struct device *dev = &priv->pdev->dev;
472*bfec6d7fSVivian Wang 
473*bfec6d7fSVivian Wang 	emac_clean_rx_desc_ring(priv);
474*bfec6d7fSVivian Wang 
475*bfec6d7fSVivian Wang 	kfree(rr->rx_desc_buf);
476*bfec6d7fSVivian Wang 	rr->rx_desc_buf = NULL;
477*bfec6d7fSVivian Wang 
478*bfec6d7fSVivian Wang 	dma_free_coherent(dev, rr->total_size, rr->desc_addr,
479*bfec6d7fSVivian Wang 			  rr->desc_dma_addr);
480*bfec6d7fSVivian Wang 	rr->desc_addr = NULL;
481*bfec6d7fSVivian Wang }
482*bfec6d7fSVivian Wang 
483*bfec6d7fSVivian Wang static int emac_tx_clean_desc(struct emac_priv *priv)
484*bfec6d7fSVivian Wang {
485*bfec6d7fSVivian Wang 	struct net_device *ndev = priv->ndev;
486*bfec6d7fSVivian Wang 	struct emac_desc_ring *tx_ring;
487*bfec6d7fSVivian Wang 	struct emac_desc *tx_desc;
488*bfec6d7fSVivian Wang 	u32 i;
489*bfec6d7fSVivian Wang 
490*bfec6d7fSVivian Wang 	netif_tx_lock(ndev);
491*bfec6d7fSVivian Wang 
492*bfec6d7fSVivian Wang 	tx_ring = &priv->tx_ring;
493*bfec6d7fSVivian Wang 
494*bfec6d7fSVivian Wang 	i = tx_ring->tail;
495*bfec6d7fSVivian Wang 
496*bfec6d7fSVivian Wang 	while (i != tx_ring->head) {
497*bfec6d7fSVivian Wang 		tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
498*bfec6d7fSVivian Wang 
499*bfec6d7fSVivian Wang 		/* Stop checking if desc still own by DMA */
500*bfec6d7fSVivian Wang 		if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
501*bfec6d7fSVivian Wang 			break;
502*bfec6d7fSVivian Wang 
503*bfec6d7fSVivian Wang 		emac_free_tx_buf(priv, i);
504*bfec6d7fSVivian Wang 		memset(tx_desc, 0, sizeof(struct emac_desc));
505*bfec6d7fSVivian Wang 
506*bfec6d7fSVivian Wang 		if (++i == tx_ring->total_cnt)
507*bfec6d7fSVivian Wang 			i = 0;
508*bfec6d7fSVivian Wang 	}
509*bfec6d7fSVivian Wang 
510*bfec6d7fSVivian Wang 	tx_ring->tail = i;
511*bfec6d7fSVivian Wang 
512*bfec6d7fSVivian Wang 	if (unlikely(netif_queue_stopped(ndev) &&
513*bfec6d7fSVivian Wang 		     emac_tx_avail(priv) > tx_ring->total_cnt / 4))
514*bfec6d7fSVivian Wang 		netif_wake_queue(ndev);
515*bfec6d7fSVivian Wang 
516*bfec6d7fSVivian Wang 	netif_tx_unlock(ndev);
517*bfec6d7fSVivian Wang 
518*bfec6d7fSVivian Wang 	return 0;
519*bfec6d7fSVivian Wang }
520*bfec6d7fSVivian Wang 
521*bfec6d7fSVivian Wang static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
522*bfec6d7fSVivian Wang {
523*bfec6d7fSVivian Wang 	const char *msg;
524*bfec6d7fSVivian Wang 	u32 len;
525*bfec6d7fSVivian Wang 
526*bfec6d7fSVivian Wang 	len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
527*bfec6d7fSVivian Wang 
528*bfec6d7fSVivian Wang 	if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
529*bfec6d7fSVivian Wang 		msg = "Not last descriptor"; /* This would be a bug */
530*bfec6d7fSVivian Wang 	else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
531*bfec6d7fSVivian Wang 		msg = "Runt frame";
532*bfec6d7fSVivian Wang 	else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
533*bfec6d7fSVivian Wang 		msg = "Frame CRC error";
534*bfec6d7fSVivian Wang 	else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
535*bfec6d7fSVivian Wang 		msg = "Frame exceeds max length";
536*bfec6d7fSVivian Wang 	else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
537*bfec6d7fSVivian Wang 		msg = "Frame jabber error";
538*bfec6d7fSVivian Wang 	else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
539*bfec6d7fSVivian Wang 		msg = "Frame length error";
540*bfec6d7fSVivian Wang 	else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
541*bfec6d7fSVivian Wang 		msg = "Frame length unacceptable";
542*bfec6d7fSVivian Wang 	else
543*bfec6d7fSVivian Wang 		return true; /* All good */
544*bfec6d7fSVivian Wang 
545*bfec6d7fSVivian Wang 	dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
546*bfec6d7fSVivian Wang 
547*bfec6d7fSVivian Wang 	return false;
548*bfec6d7fSVivian Wang }
549*bfec6d7fSVivian Wang 
550*bfec6d7fSVivian Wang static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
551*bfec6d7fSVivian Wang {
552*bfec6d7fSVivian Wang 	struct emac_desc_ring *rx_ring = &priv->rx_ring;
553*bfec6d7fSVivian Wang 	struct emac_desc rx_desc, *rx_desc_addr;
554*bfec6d7fSVivian Wang 	struct net_device *ndev = priv->ndev;
555*bfec6d7fSVivian Wang 	struct emac_rx_desc_buffer *rx_buf;
556*bfec6d7fSVivian Wang 	struct sk_buff *skb;
557*bfec6d7fSVivian Wang 	u32 i;
558*bfec6d7fSVivian Wang 
559*bfec6d7fSVivian Wang 	i = rx_ring->head;
560*bfec6d7fSVivian Wang 	rx_buf = &rx_ring->rx_desc_buf[i];
561*bfec6d7fSVivian Wang 
562*bfec6d7fSVivian Wang 	while (!rx_buf->skb) {
563*bfec6d7fSVivian Wang 		skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
564*bfec6d7fSVivian Wang 		if (!skb)
565*bfec6d7fSVivian Wang 			break;
566*bfec6d7fSVivian Wang 
567*bfec6d7fSVivian Wang 		skb->dev = ndev;
568*bfec6d7fSVivian Wang 
569*bfec6d7fSVivian Wang 		rx_buf->skb = skb;
570*bfec6d7fSVivian Wang 		rx_buf->dma_len = priv->dma_buf_sz;
571*bfec6d7fSVivian Wang 		rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
572*bfec6d7fSVivian Wang 						  priv->dma_buf_sz,
573*bfec6d7fSVivian Wang 						  DMA_FROM_DEVICE);
574*bfec6d7fSVivian Wang 		if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
575*bfec6d7fSVivian Wang 			dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
576*bfec6d7fSVivian Wang 			goto err_free_skb;
577*bfec6d7fSVivian Wang 		}
578*bfec6d7fSVivian Wang 
579*bfec6d7fSVivian Wang 		rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
580*bfec6d7fSVivian Wang 
581*bfec6d7fSVivian Wang 		memset(&rx_desc, 0, sizeof(rx_desc));
582*bfec6d7fSVivian Wang 
583*bfec6d7fSVivian Wang 		rx_desc.buffer_addr_1 = rx_buf->dma_addr;
584*bfec6d7fSVivian Wang 		rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
585*bfec6d7fSVivian Wang 					   rx_buf->dma_len);
586*bfec6d7fSVivian Wang 
587*bfec6d7fSVivian Wang 		if (++i == rx_ring->total_cnt) {
588*bfec6d7fSVivian Wang 			rx_desc.desc1 |= RX_DESC_1_END_RING;
589*bfec6d7fSVivian Wang 			i = 0;
590*bfec6d7fSVivian Wang 		}
591*bfec6d7fSVivian Wang 
592*bfec6d7fSVivian Wang 		*rx_desc_addr = rx_desc;
593*bfec6d7fSVivian Wang 		dma_wmb();
594*bfec6d7fSVivian Wang 		WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
595*bfec6d7fSVivian Wang 
596*bfec6d7fSVivian Wang 		rx_buf = &rx_ring->rx_desc_buf[i];
597*bfec6d7fSVivian Wang 	}
598*bfec6d7fSVivian Wang 
599*bfec6d7fSVivian Wang 	rx_ring->head = i;
600*bfec6d7fSVivian Wang 	return;
601*bfec6d7fSVivian Wang 
602*bfec6d7fSVivian Wang err_free_skb:
603*bfec6d7fSVivian Wang 	dev_kfree_skb_any(skb);
604*bfec6d7fSVivian Wang 	rx_buf->skb = NULL;
605*bfec6d7fSVivian Wang }
606*bfec6d7fSVivian Wang 
607*bfec6d7fSVivian Wang /* Returns number of packets received */
608*bfec6d7fSVivian Wang static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
609*bfec6d7fSVivian Wang {
610*bfec6d7fSVivian Wang 	struct net_device *ndev = priv->ndev;
611*bfec6d7fSVivian Wang 	struct emac_rx_desc_buffer *rx_buf;
612*bfec6d7fSVivian Wang 	struct emac_desc_ring *rx_ring;
613*bfec6d7fSVivian Wang 	struct sk_buff *skb = NULL;
614*bfec6d7fSVivian Wang 	struct emac_desc *rx_desc;
615*bfec6d7fSVivian Wang 	u32 got = 0, skb_len, i;
616*bfec6d7fSVivian Wang 
617*bfec6d7fSVivian Wang 	rx_ring = &priv->rx_ring;
618*bfec6d7fSVivian Wang 
619*bfec6d7fSVivian Wang 	i = rx_ring->tail;
620*bfec6d7fSVivian Wang 
621*bfec6d7fSVivian Wang 	while (budget--) {
622*bfec6d7fSVivian Wang 		rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
623*bfec6d7fSVivian Wang 
624*bfec6d7fSVivian Wang 		/* Stop checking if rx_desc still owned by DMA */
625*bfec6d7fSVivian Wang 		if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
626*bfec6d7fSVivian Wang 			break;
627*bfec6d7fSVivian Wang 
628*bfec6d7fSVivian Wang 		dma_rmb();
629*bfec6d7fSVivian Wang 
630*bfec6d7fSVivian Wang 		rx_buf = &rx_ring->rx_desc_buf[i];
631*bfec6d7fSVivian Wang 
632*bfec6d7fSVivian Wang 		if (!rx_buf->skb)
633*bfec6d7fSVivian Wang 			break;
634*bfec6d7fSVivian Wang 
635*bfec6d7fSVivian Wang 		got++;
636*bfec6d7fSVivian Wang 
637*bfec6d7fSVivian Wang 		dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
638*bfec6d7fSVivian Wang 				 rx_buf->dma_len, DMA_FROM_DEVICE);
639*bfec6d7fSVivian Wang 
640*bfec6d7fSVivian Wang 		if (likely(emac_rx_frame_good(priv, rx_desc))) {
641*bfec6d7fSVivian Wang 			skb = rx_buf->skb;
642*bfec6d7fSVivian Wang 
643*bfec6d7fSVivian Wang 			skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
644*bfec6d7fSVivian Wang 					    rx_desc->desc0);
645*bfec6d7fSVivian Wang 			skb_len -= ETH_FCS_LEN;
646*bfec6d7fSVivian Wang 
647*bfec6d7fSVivian Wang 			skb_put(skb, skb_len);
648*bfec6d7fSVivian Wang 			skb->dev = ndev;
649*bfec6d7fSVivian Wang 			ndev->hard_header_len = ETH_HLEN;
650*bfec6d7fSVivian Wang 
651*bfec6d7fSVivian Wang 			skb->protocol = eth_type_trans(skb, ndev);
652*bfec6d7fSVivian Wang 
653*bfec6d7fSVivian Wang 			skb->ip_summed = CHECKSUM_NONE;
654*bfec6d7fSVivian Wang 
655*bfec6d7fSVivian Wang 			napi_gro_receive(&priv->napi, skb);
656*bfec6d7fSVivian Wang 
657*bfec6d7fSVivian Wang 			memset(rx_desc, 0, sizeof(struct emac_desc));
658*bfec6d7fSVivian Wang 			rx_buf->skb = NULL;
659*bfec6d7fSVivian Wang 		} else {
660*bfec6d7fSVivian Wang 			dev_kfree_skb_irq(rx_buf->skb);
661*bfec6d7fSVivian Wang 			rx_buf->skb = NULL;
662*bfec6d7fSVivian Wang 		}
663*bfec6d7fSVivian Wang 
664*bfec6d7fSVivian Wang 		if (++i == rx_ring->total_cnt)
665*bfec6d7fSVivian Wang 			i = 0;
666*bfec6d7fSVivian Wang 	}
667*bfec6d7fSVivian Wang 
668*bfec6d7fSVivian Wang 	rx_ring->tail = i;
669*bfec6d7fSVivian Wang 
670*bfec6d7fSVivian Wang 	emac_alloc_rx_desc_buffers(priv);
671*bfec6d7fSVivian Wang 
672*bfec6d7fSVivian Wang 	return got;
673*bfec6d7fSVivian Wang }
674*bfec6d7fSVivian Wang 
675*bfec6d7fSVivian Wang static int emac_rx_poll(struct napi_struct *napi, int budget)
676*bfec6d7fSVivian Wang {
677*bfec6d7fSVivian Wang 	struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
678*bfec6d7fSVivian Wang 	int work_done;
679*bfec6d7fSVivian Wang 
680*bfec6d7fSVivian Wang 	emac_tx_clean_desc(priv);
681*bfec6d7fSVivian Wang 
682*bfec6d7fSVivian Wang 	work_done = emac_rx_clean_desc(priv, budget);
683*bfec6d7fSVivian Wang 	if (work_done < budget && napi_complete_done(napi, work_done))
684*bfec6d7fSVivian Wang 		emac_enable_interrupt(priv);
685*bfec6d7fSVivian Wang 
686*bfec6d7fSVivian Wang 	return work_done;
687*bfec6d7fSVivian Wang }
688*bfec6d7fSVivian Wang 
689*bfec6d7fSVivian Wang /*
690*bfec6d7fSVivian Wang  * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
691*bfec6d7fSVivian Wang  *
692*bfec6d7fSVivian Wang  * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
693*bfec6d7fSVivian Wang  * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
694*bfec6d7fSVivian Wang  */
695*bfec6d7fSVivian Wang 
696*bfec6d7fSVivian Wang static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
697*bfec6d7fSVivian Wang 			    struct emac_tx_desc_buffer *tx_buf,
698*bfec6d7fSVivian Wang 			    struct sk_buff *skb, u32 frag_idx)
699*bfec6d7fSVivian Wang {
700*bfec6d7fSVivian Wang 	bool map_as_page, buf_idx;
701*bfec6d7fSVivian Wang 	const skb_frag_t *frag;
702*bfec6d7fSVivian Wang 	phys_addr_t addr;
703*bfec6d7fSVivian Wang 	u32 len;
704*bfec6d7fSVivian Wang 	int ret;
705*bfec6d7fSVivian Wang 
706*bfec6d7fSVivian Wang 	buf_idx = frag_idx % 2;
707*bfec6d7fSVivian Wang 
708*bfec6d7fSVivian Wang 	if (frag_idx == 0) {
709*bfec6d7fSVivian Wang 		/* Non-fragmented part */
710*bfec6d7fSVivian Wang 		len = skb_headlen(skb);
711*bfec6d7fSVivian Wang 		addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
712*bfec6d7fSVivian Wang 		map_as_page = false;
713*bfec6d7fSVivian Wang 	} else {
714*bfec6d7fSVivian Wang 		/* Fragment */
715*bfec6d7fSVivian Wang 		frag = &skb_shinfo(skb)->frags[frag_idx - 1];
716*bfec6d7fSVivian Wang 		len = skb_frag_size(frag);
717*bfec6d7fSVivian Wang 		addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
718*bfec6d7fSVivian Wang 		map_as_page = true;
719*bfec6d7fSVivian Wang 	}
720*bfec6d7fSVivian Wang 
721*bfec6d7fSVivian Wang 	ret = dma_mapping_error(dev, addr);
722*bfec6d7fSVivian Wang 	if (ret)
723*bfec6d7fSVivian Wang 		return ret;
724*bfec6d7fSVivian Wang 
725*bfec6d7fSVivian Wang 	tx_buf->buf[buf_idx].dma_addr = addr;
726*bfec6d7fSVivian Wang 	tx_buf->buf[buf_idx].dma_len = len;
727*bfec6d7fSVivian Wang 	tx_buf->buf[buf_idx].map_as_page = map_as_page;
728*bfec6d7fSVivian Wang 
729*bfec6d7fSVivian Wang 	if (buf_idx == 0) {
730*bfec6d7fSVivian Wang 		tx_desc->buffer_addr_1 = addr;
731*bfec6d7fSVivian Wang 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
732*bfec6d7fSVivian Wang 	} else {
733*bfec6d7fSVivian Wang 		tx_desc->buffer_addr_2 = addr;
734*bfec6d7fSVivian Wang 		tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
735*bfec6d7fSVivian Wang 	}
736*bfec6d7fSVivian Wang 
737*bfec6d7fSVivian Wang 	return 0;
738*bfec6d7fSVivian Wang }
739*bfec6d7fSVivian Wang 
740*bfec6d7fSVivian Wang static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
741*bfec6d7fSVivian Wang {
742*bfec6d7fSVivian Wang 	struct emac_desc_ring *tx_ring = &priv->tx_ring;
743*bfec6d7fSVivian Wang 	struct emac_desc tx_desc, *tx_desc_addr;
744*bfec6d7fSVivian Wang 	struct device *dev = &priv->pdev->dev;
745*bfec6d7fSVivian Wang 	struct emac_tx_desc_buffer *tx_buf;
746*bfec6d7fSVivian Wang 	u32 head, old_head, frag_num, f;
747*bfec6d7fSVivian Wang 	bool buf_idx;
748*bfec6d7fSVivian Wang 
749*bfec6d7fSVivian Wang 	frag_num = skb_shinfo(skb)->nr_frags;
750*bfec6d7fSVivian Wang 	head = tx_ring->head;
751*bfec6d7fSVivian Wang 	old_head = head;
752*bfec6d7fSVivian Wang 
753*bfec6d7fSVivian Wang 	for (f = 0; f < frag_num + 1; f++) {
754*bfec6d7fSVivian Wang 		buf_idx = f % 2;
755*bfec6d7fSVivian Wang 
756*bfec6d7fSVivian Wang 		/*
757*bfec6d7fSVivian Wang 		 * If using buffer 1, initialize a new desc. Otherwise, use
758*bfec6d7fSVivian Wang 		 * buffer 2 of previous fragment's desc.
759*bfec6d7fSVivian Wang 		 */
760*bfec6d7fSVivian Wang 		if (!buf_idx) {
761*bfec6d7fSVivian Wang 			tx_buf = &tx_ring->tx_desc_buf[head];
762*bfec6d7fSVivian Wang 			tx_desc_addr =
763*bfec6d7fSVivian Wang 				&((struct emac_desc *)tx_ring->desc_addr)[head];
764*bfec6d7fSVivian Wang 			memset(&tx_desc, 0, sizeof(tx_desc));
765*bfec6d7fSVivian Wang 
766*bfec6d7fSVivian Wang 			/*
767*bfec6d7fSVivian Wang 			 * Give ownership for all but first desc initially. For
768*bfec6d7fSVivian Wang 			 * first desc, give at the end so DMA cannot start
769*bfec6d7fSVivian Wang 			 * reading uninitialized descs.
770*bfec6d7fSVivian Wang 			 */
771*bfec6d7fSVivian Wang 			if (head != old_head)
772*bfec6d7fSVivian Wang 				tx_desc.desc0 |= TX_DESC_0_OWN;
773*bfec6d7fSVivian Wang 
774*bfec6d7fSVivian Wang 			if (++head == tx_ring->total_cnt) {
775*bfec6d7fSVivian Wang 				/* Just used last desc in ring */
776*bfec6d7fSVivian Wang 				tx_desc.desc1 |= TX_DESC_1_END_RING;
777*bfec6d7fSVivian Wang 				head = 0;
778*bfec6d7fSVivian Wang 			}
779*bfec6d7fSVivian Wang 		}
780*bfec6d7fSVivian Wang 
781*bfec6d7fSVivian Wang 		if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
782*bfec6d7fSVivian Wang 			dev_err_ratelimited(&priv->ndev->dev,
783*bfec6d7fSVivian Wang 					    "Map TX frag %d failed\n", f);
784*bfec6d7fSVivian Wang 			goto err_free_skb;
785*bfec6d7fSVivian Wang 		}
786*bfec6d7fSVivian Wang 
787*bfec6d7fSVivian Wang 		if (f == 0)
788*bfec6d7fSVivian Wang 			tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
789*bfec6d7fSVivian Wang 
790*bfec6d7fSVivian Wang 		if (f == frag_num) {
791*bfec6d7fSVivian Wang 			tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
792*bfec6d7fSVivian Wang 			tx_buf->skb = skb;
793*bfec6d7fSVivian Wang 			if (emac_tx_should_interrupt(priv, frag_num + 1))
794*bfec6d7fSVivian Wang 				tx_desc.desc1 |=
795*bfec6d7fSVivian Wang 					TX_DESC_1_INTERRUPT_ON_COMPLETION;
796*bfec6d7fSVivian Wang 		}
797*bfec6d7fSVivian Wang 
798*bfec6d7fSVivian Wang 		*tx_desc_addr = tx_desc;
799*bfec6d7fSVivian Wang 	}
800*bfec6d7fSVivian Wang 
801*bfec6d7fSVivian Wang 	/* All descriptors are ready, give ownership for first desc */
802*bfec6d7fSVivian Wang 	tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
803*bfec6d7fSVivian Wang 	dma_wmb();
804*bfec6d7fSVivian Wang 	WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
805*bfec6d7fSVivian Wang 
806*bfec6d7fSVivian Wang 	emac_dma_start_transmit(priv);
807*bfec6d7fSVivian Wang 
808*bfec6d7fSVivian Wang 	tx_ring->head = head;
809*bfec6d7fSVivian Wang 
810*bfec6d7fSVivian Wang 	return;
811*bfec6d7fSVivian Wang 
812*bfec6d7fSVivian Wang err_free_skb:
813*bfec6d7fSVivian Wang 	dev_dstats_tx_dropped(priv->ndev);
814*bfec6d7fSVivian Wang 	dev_kfree_skb_any(skb);
815*bfec6d7fSVivian Wang }
816*bfec6d7fSVivian Wang 
817*bfec6d7fSVivian Wang static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
818*bfec6d7fSVivian Wang {
819*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
820*bfec6d7fSVivian Wang 	int nfrags = skb_shinfo(skb)->nr_frags;
821*bfec6d7fSVivian Wang 	struct device *dev = &priv->pdev->dev;
822*bfec6d7fSVivian Wang 
823*bfec6d7fSVivian Wang 	if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
824*bfec6d7fSVivian Wang 		if (!netif_queue_stopped(ndev)) {
825*bfec6d7fSVivian Wang 			netif_stop_queue(ndev);
826*bfec6d7fSVivian Wang 			dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
827*bfec6d7fSVivian Wang 		}
828*bfec6d7fSVivian Wang 		return NETDEV_TX_BUSY;
829*bfec6d7fSVivian Wang 	}
830*bfec6d7fSVivian Wang 
831*bfec6d7fSVivian Wang 	emac_tx_mem_map(priv, skb);
832*bfec6d7fSVivian Wang 
833*bfec6d7fSVivian Wang 	/* Make sure there is space in the ring for the next TX. */
834*bfec6d7fSVivian Wang 	if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
835*bfec6d7fSVivian Wang 		netif_stop_queue(ndev);
836*bfec6d7fSVivian Wang 
837*bfec6d7fSVivian Wang 	return NETDEV_TX_OK;
838*bfec6d7fSVivian Wang }
839*bfec6d7fSVivian Wang 
840*bfec6d7fSVivian Wang static int emac_set_mac_address(struct net_device *ndev, void *addr)
841*bfec6d7fSVivian Wang {
842*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
843*bfec6d7fSVivian Wang 	int ret = eth_mac_addr(ndev, addr);
844*bfec6d7fSVivian Wang 
845*bfec6d7fSVivian Wang 	if (ret)
846*bfec6d7fSVivian Wang 		return ret;
847*bfec6d7fSVivian Wang 
848*bfec6d7fSVivian Wang 	/* If running, set now; if not running it will be set in emac_up. */
849*bfec6d7fSVivian Wang 	if (netif_running(ndev))
850*bfec6d7fSVivian Wang 		emac_set_mac_addr(priv, ndev->dev_addr);
851*bfec6d7fSVivian Wang 
852*bfec6d7fSVivian Wang 	return 0;
853*bfec6d7fSVivian Wang }
854*bfec6d7fSVivian Wang 
855*bfec6d7fSVivian Wang static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
856*bfec6d7fSVivian Wang {
857*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
858*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
859*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
860*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
861*bfec6d7fSVivian Wang }
862*bfec6d7fSVivian Wang 
863*bfec6d7fSVivian Wang /*
864*bfec6d7fSVivian Wang  * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
865*bfec6d7fSVivian Wang  * when matching multicast addresses.
866*bfec6d7fSVivian Wang  */
867*bfec6d7fSVivian Wang static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
868*bfec6d7fSVivian Wang {
869*bfec6d7fSVivian Wang 	u32 crc32 = ether_crc(ETH_ALEN, addr);
870*bfec6d7fSVivian Wang 
871*bfec6d7fSVivian Wang 	return crc32 >> 26;
872*bfec6d7fSVivian Wang }
873*bfec6d7fSVivian Wang 
874*bfec6d7fSVivian Wang /* Configure Multicast and Promiscuous modes */
875*bfec6d7fSVivian Wang static void emac_set_rx_mode(struct net_device *ndev)
876*bfec6d7fSVivian Wang {
877*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
878*bfec6d7fSVivian Wang 	struct netdev_hw_addr *ha;
879*bfec6d7fSVivian Wang 	u32 mc_filter[4] = { 0 };
880*bfec6d7fSVivian Wang 	u32 hash, reg, bit, val;
881*bfec6d7fSVivian Wang 
882*bfec6d7fSVivian Wang 	val = emac_rd(priv, MAC_ADDRESS_CONTROL);
883*bfec6d7fSVivian Wang 
884*bfec6d7fSVivian Wang 	val &= ~MREGBIT_PROMISCUOUS_MODE;
885*bfec6d7fSVivian Wang 
886*bfec6d7fSVivian Wang 	if (ndev->flags & IFF_PROMISC) {
887*bfec6d7fSVivian Wang 		/* Enable promisc mode */
888*bfec6d7fSVivian Wang 		val |= MREGBIT_PROMISCUOUS_MODE;
889*bfec6d7fSVivian Wang 	} else if ((ndev->flags & IFF_ALLMULTI) ||
890*bfec6d7fSVivian Wang 		   (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
891*bfec6d7fSVivian Wang 		/* Accept all multicast frames by setting every bit */
892*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
893*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
894*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
895*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
896*bfec6d7fSVivian Wang 	} else if (!netdev_mc_empty(ndev)) {
897*bfec6d7fSVivian Wang 		emac_mac_multicast_filter_clear(priv);
898*bfec6d7fSVivian Wang 		netdev_for_each_mc_addr(ha, ndev) {
899*bfec6d7fSVivian Wang 			/*
900*bfec6d7fSVivian Wang 			 * The hash table is an array of 4 16-bit registers. It
901*bfec6d7fSVivian Wang 			 * is treated like an array of 64 bits (bits[hash]).
902*bfec6d7fSVivian Wang 			 */
903*bfec6d7fSVivian Wang 			hash = emac_ether_addr_hash(ha->addr);
904*bfec6d7fSVivian Wang 			reg = hash / 16;
905*bfec6d7fSVivian Wang 			bit = hash % 16;
906*bfec6d7fSVivian Wang 			mc_filter[reg] |= BIT(bit);
907*bfec6d7fSVivian Wang 		}
908*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
909*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
910*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
911*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
912*bfec6d7fSVivian Wang 	}
913*bfec6d7fSVivian Wang 
914*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_ADDRESS_CONTROL, val);
915*bfec6d7fSVivian Wang }
916*bfec6d7fSVivian Wang 
917*bfec6d7fSVivian Wang static int emac_change_mtu(struct net_device *ndev, int mtu)
918*bfec6d7fSVivian Wang {
919*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
920*bfec6d7fSVivian Wang 	u32 frame_len;
921*bfec6d7fSVivian Wang 
922*bfec6d7fSVivian Wang 	if (netif_running(ndev)) {
923*bfec6d7fSVivian Wang 		netdev_err(ndev, "must be stopped to change MTU\n");
924*bfec6d7fSVivian Wang 		return -EBUSY;
925*bfec6d7fSVivian Wang 	}
926*bfec6d7fSVivian Wang 
927*bfec6d7fSVivian Wang 	frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
928*bfec6d7fSVivian Wang 
929*bfec6d7fSVivian Wang 	if (frame_len <= EMAC_DEFAULT_BUFSIZE)
930*bfec6d7fSVivian Wang 		priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
931*bfec6d7fSVivian Wang 	else if (frame_len <= EMAC_RX_BUF_2K)
932*bfec6d7fSVivian Wang 		priv->dma_buf_sz = EMAC_RX_BUF_2K;
933*bfec6d7fSVivian Wang 	else
934*bfec6d7fSVivian Wang 		priv->dma_buf_sz = EMAC_RX_BUF_4K;
935*bfec6d7fSVivian Wang 
936*bfec6d7fSVivian Wang 	ndev->mtu = mtu;
937*bfec6d7fSVivian Wang 
938*bfec6d7fSVivian Wang 	return 0;
939*bfec6d7fSVivian Wang }
940*bfec6d7fSVivian Wang 
941*bfec6d7fSVivian Wang static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
942*bfec6d7fSVivian Wang {
943*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
944*bfec6d7fSVivian Wang 
945*bfec6d7fSVivian Wang 	schedule_work(&priv->tx_timeout_task);
946*bfec6d7fSVivian Wang }
947*bfec6d7fSVivian Wang 
948*bfec6d7fSVivian Wang static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
949*bfec6d7fSVivian Wang {
950*bfec6d7fSVivian Wang 	struct emac_priv *priv = bus->priv;
951*bfec6d7fSVivian Wang 	u32 cmd = 0, val;
952*bfec6d7fSVivian Wang 	int ret;
953*bfec6d7fSVivian Wang 
954*bfec6d7fSVivian Wang 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
955*bfec6d7fSVivian Wang 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
956*bfec6d7fSVivian Wang 	cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
957*bfec6d7fSVivian Wang 
958*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MDIO_DATA, 0x0);
959*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
960*bfec6d7fSVivian Wang 
961*bfec6d7fSVivian Wang 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
962*bfec6d7fSVivian Wang 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
963*bfec6d7fSVivian Wang 
964*bfec6d7fSVivian Wang 	if (ret)
965*bfec6d7fSVivian Wang 		return ret;
966*bfec6d7fSVivian Wang 
967*bfec6d7fSVivian Wang 	val = emac_rd(priv, MAC_MDIO_DATA);
968*bfec6d7fSVivian Wang 	return FIELD_GET(MREGBIT_MDIO_DATA, val);
969*bfec6d7fSVivian Wang }
970*bfec6d7fSVivian Wang 
971*bfec6d7fSVivian Wang static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
972*bfec6d7fSVivian Wang 			  u16 value)
973*bfec6d7fSVivian Wang {
974*bfec6d7fSVivian Wang 	struct emac_priv *priv = bus->priv;
975*bfec6d7fSVivian Wang 	u32 cmd = 0, val;
976*bfec6d7fSVivian Wang 	int ret;
977*bfec6d7fSVivian Wang 
978*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MDIO_DATA, value);
979*bfec6d7fSVivian Wang 
980*bfec6d7fSVivian Wang 	cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
981*bfec6d7fSVivian Wang 	cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
982*bfec6d7fSVivian Wang 	cmd |= MREGBIT_START_MDIO_TRANS;
983*bfec6d7fSVivian Wang 
984*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_MDIO_CONTROL, cmd);
985*bfec6d7fSVivian Wang 
986*bfec6d7fSVivian Wang 	ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
987*bfec6d7fSVivian Wang 				 !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
988*bfec6d7fSVivian Wang 
989*bfec6d7fSVivian Wang 	return ret;
990*bfec6d7fSVivian Wang }
991*bfec6d7fSVivian Wang 
992*bfec6d7fSVivian Wang static int emac_mdio_init(struct emac_priv *priv)
993*bfec6d7fSVivian Wang {
994*bfec6d7fSVivian Wang 	struct device *dev = &priv->pdev->dev;
995*bfec6d7fSVivian Wang 	struct device_node *mii_np;
996*bfec6d7fSVivian Wang 	struct mii_bus *mii;
997*bfec6d7fSVivian Wang 	int ret;
998*bfec6d7fSVivian Wang 
999*bfec6d7fSVivian Wang 	mii = devm_mdiobus_alloc(dev);
1000*bfec6d7fSVivian Wang 	if (!mii)
1001*bfec6d7fSVivian Wang 		return -ENOMEM;
1002*bfec6d7fSVivian Wang 
1003*bfec6d7fSVivian Wang 	mii->priv = priv;
1004*bfec6d7fSVivian Wang 	mii->name = "k1_emac_mii";
1005*bfec6d7fSVivian Wang 	mii->read = emac_mii_read;
1006*bfec6d7fSVivian Wang 	mii->write = emac_mii_write;
1007*bfec6d7fSVivian Wang 	mii->parent = dev;
1008*bfec6d7fSVivian Wang 	mii->phy_mask = ~0;
1009*bfec6d7fSVivian Wang 	snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
1010*bfec6d7fSVivian Wang 
1011*bfec6d7fSVivian Wang 	mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
1012*bfec6d7fSVivian Wang 
1013*bfec6d7fSVivian Wang 	ret = devm_of_mdiobus_register(dev, mii, mii_np);
1014*bfec6d7fSVivian Wang 	if (ret)
1015*bfec6d7fSVivian Wang 		dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1016*bfec6d7fSVivian Wang 
1017*bfec6d7fSVivian Wang 	of_node_put(mii_np);
1018*bfec6d7fSVivian Wang 	return ret;
1019*bfec6d7fSVivian Wang }
1020*bfec6d7fSVivian Wang 
1021*bfec6d7fSVivian Wang static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
1022*bfec6d7fSVivian Wang {
1023*bfec6d7fSVivian Wang 	u32 val;
1024*bfec6d7fSVivian Wang 
1025*bfec6d7fSVivian Wang 	val = emac_rd(priv, MAC_FC_CONTROL);
1026*bfec6d7fSVivian Wang 
1027*bfec6d7fSVivian Wang 	FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
1028*bfec6d7fSVivian Wang 	FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
1029*bfec6d7fSVivian Wang 
1030*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_FC_CONTROL, val);
1031*bfec6d7fSVivian Wang }
1032*bfec6d7fSVivian Wang 
1033*bfec6d7fSVivian Wang static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
1034*bfec6d7fSVivian Wang {
1035*bfec6d7fSVivian Wang 	u32 val = emac_rd(priv, MAC_FC_CONTROL);
1036*bfec6d7fSVivian Wang 
1037*bfec6d7fSVivian Wang 	FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
1038*bfec6d7fSVivian Wang 
1039*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_FC_CONTROL, val);
1040*bfec6d7fSVivian Wang }
1041*bfec6d7fSVivian Wang 
1042*bfec6d7fSVivian Wang static void emac_set_fc(struct emac_priv *priv, u8 fc)
1043*bfec6d7fSVivian Wang {
1044*bfec6d7fSVivian Wang 	emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
1045*bfec6d7fSVivian Wang 	emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
1046*bfec6d7fSVivian Wang 	priv->flow_control = fc;
1047*bfec6d7fSVivian Wang }
1048*bfec6d7fSVivian Wang 
1049*bfec6d7fSVivian Wang static void emac_set_fc_autoneg(struct emac_priv *priv)
1050*bfec6d7fSVivian Wang {
1051*bfec6d7fSVivian Wang 	struct phy_device *phydev = priv->ndev->phydev;
1052*bfec6d7fSVivian Wang 	u32 local_adv, remote_adv;
1053*bfec6d7fSVivian Wang 	u8 fc;
1054*bfec6d7fSVivian Wang 
1055*bfec6d7fSVivian Wang 	local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1056*bfec6d7fSVivian Wang 
1057*bfec6d7fSVivian Wang 	remote_adv = 0;
1058*bfec6d7fSVivian Wang 
1059*bfec6d7fSVivian Wang 	if (phydev->pause)
1060*bfec6d7fSVivian Wang 		remote_adv |= LPA_PAUSE_CAP;
1061*bfec6d7fSVivian Wang 
1062*bfec6d7fSVivian Wang 	if (phydev->asym_pause)
1063*bfec6d7fSVivian Wang 		remote_adv |= LPA_PAUSE_ASYM;
1064*bfec6d7fSVivian Wang 
1065*bfec6d7fSVivian Wang 	fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
1066*bfec6d7fSVivian Wang 
1067*bfec6d7fSVivian Wang 	priv->flow_control_autoneg = true;
1068*bfec6d7fSVivian Wang 
1069*bfec6d7fSVivian Wang 	emac_set_fc(priv, fc);
1070*bfec6d7fSVivian Wang }
1071*bfec6d7fSVivian Wang 
1072*bfec6d7fSVivian Wang /*
1073*bfec6d7fSVivian Wang  * Even though this MAC supports gigabit operation, it only provides 32-bit
1074*bfec6d7fSVivian Wang  * statistics counters. The most overflow-prone counters are the "bytes" ones,
1075*bfec6d7fSVivian Wang  * which at gigabit overflow about twice a minute.
1076*bfec6d7fSVivian Wang  *
1077*bfec6d7fSVivian Wang  * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1078*bfec6d7fSVivian Wang  * every time statistics seem to go backwards. Also, update periodically to
1079*bfec6d7fSVivian Wang  * catch overflows when we are not otherwise checking the statistics often
1080*bfec6d7fSVivian Wang  * enough.
1081*bfec6d7fSVivian Wang  */
1082*bfec6d7fSVivian Wang 
1083*bfec6d7fSVivian Wang #define EMAC_STATS_TIMER_PERIOD		20
1084*bfec6d7fSVivian Wang 
1085*bfec6d7fSVivian Wang static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1086*bfec6d7fSVivian Wang 			      u32 control_reg, u32 high_reg, u32 low_reg)
1087*bfec6d7fSVivian Wang {
1088*bfec6d7fSVivian Wang 	u32 val, high, low;
1089*bfec6d7fSVivian Wang 	int ret;
1090*bfec6d7fSVivian Wang 
1091*bfec6d7fSVivian Wang 	/* The "read" bit is the same for TX and RX */
1092*bfec6d7fSVivian Wang 
1093*bfec6d7fSVivian Wang 	val = MREGBIT_START_TX_COUNTER_READ | cnt;
1094*bfec6d7fSVivian Wang 	emac_wr(priv, control_reg, val);
1095*bfec6d7fSVivian Wang 	val = emac_rd(priv, control_reg);
1096*bfec6d7fSVivian Wang 
1097*bfec6d7fSVivian Wang 	ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1098*bfec6d7fSVivian Wang 					!(val & MREGBIT_START_TX_COUNTER_READ),
1099*bfec6d7fSVivian Wang 					100, 10000);
1100*bfec6d7fSVivian Wang 
1101*bfec6d7fSVivian Wang 	if (ret) {
1102*bfec6d7fSVivian Wang 		netdev_err(priv->ndev, "Read stat timeout\n");
1103*bfec6d7fSVivian Wang 		return ret;
1104*bfec6d7fSVivian Wang 	}
1105*bfec6d7fSVivian Wang 
1106*bfec6d7fSVivian Wang 	high = emac_rd(priv, high_reg);
1107*bfec6d7fSVivian Wang 	low = emac_rd(priv, low_reg);
1108*bfec6d7fSVivian Wang 	*res = high << 16 | lower_16_bits(low);
1109*bfec6d7fSVivian Wang 
1110*bfec6d7fSVivian Wang 	return 0;
1111*bfec6d7fSVivian Wang }
1112*bfec6d7fSVivian Wang 
1113*bfec6d7fSVivian Wang static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1114*bfec6d7fSVivian Wang {
1115*bfec6d7fSVivian Wang 	return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1116*bfec6d7fSVivian Wang 				  MAC_TX_STATCTR_DATA_HIGH,
1117*bfec6d7fSVivian Wang 				  MAC_TX_STATCTR_DATA_LOW);
1118*bfec6d7fSVivian Wang }
1119*bfec6d7fSVivian Wang 
1120*bfec6d7fSVivian Wang static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1121*bfec6d7fSVivian Wang {
1122*bfec6d7fSVivian Wang 	return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1123*bfec6d7fSVivian Wang 				  MAC_RX_STATCTR_DATA_HIGH,
1124*bfec6d7fSVivian Wang 				  MAC_RX_STATCTR_DATA_LOW);
1125*bfec6d7fSVivian Wang }
1126*bfec6d7fSVivian Wang 
1127*bfec6d7fSVivian Wang static void emac_update_counter(u64 *counter, u32 new_low)
1128*bfec6d7fSVivian Wang {
1129*bfec6d7fSVivian Wang 	u32 old_low = lower_32_bits(*counter);
1130*bfec6d7fSVivian Wang 	u64 high = upper_32_bits(*counter);
1131*bfec6d7fSVivian Wang 
1132*bfec6d7fSVivian Wang 	if (old_low > new_low) {
1133*bfec6d7fSVivian Wang 		/* Overflowed, increment high 32 bits */
1134*bfec6d7fSVivian Wang 		high++;
1135*bfec6d7fSVivian Wang 	}
1136*bfec6d7fSVivian Wang 
1137*bfec6d7fSVivian Wang 	*counter = (high << 32) | new_low;
1138*bfec6d7fSVivian Wang }
1139*bfec6d7fSVivian Wang 
1140*bfec6d7fSVivian Wang static void emac_stats_update(struct emac_priv *priv)
1141*bfec6d7fSVivian Wang {
1142*bfec6d7fSVivian Wang 	u64 *tx_stats_off = priv->tx_stats_off.array;
1143*bfec6d7fSVivian Wang 	u64 *rx_stats_off = priv->rx_stats_off.array;
1144*bfec6d7fSVivian Wang 	u64 *tx_stats = priv->tx_stats.array;
1145*bfec6d7fSVivian Wang 	u64 *rx_stats = priv->rx_stats.array;
1146*bfec6d7fSVivian Wang 	u32 i, res, offset;
1147*bfec6d7fSVivian Wang 
1148*bfec6d7fSVivian Wang 	assert_spin_locked(&priv->stats_lock);
1149*bfec6d7fSVivian Wang 
1150*bfec6d7fSVivian Wang 	if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
1151*bfec6d7fSVivian Wang 		/* Not up, don't try to update */
1152*bfec6d7fSVivian Wang 		return;
1153*bfec6d7fSVivian Wang 	}
1154*bfec6d7fSVivian Wang 
1155*bfec6d7fSVivian Wang 	for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1156*bfec6d7fSVivian Wang 		/*
1157*bfec6d7fSVivian Wang 		 * If reading stats times out, everything is broken and there's
1158*bfec6d7fSVivian Wang 		 * nothing we can do. Reading statistics also can't return an
1159*bfec6d7fSVivian Wang 		 * error, so just return without updating and without
1160*bfec6d7fSVivian Wang 		 * rescheduling.
1161*bfec6d7fSVivian Wang 		 */
1162*bfec6d7fSVivian Wang 		if (emac_tx_read_stat_cnt(priv, i, &res))
1163*bfec6d7fSVivian Wang 			return;
1164*bfec6d7fSVivian Wang 
1165*bfec6d7fSVivian Wang 		/*
1166*bfec6d7fSVivian Wang 		 * Re-initializing while bringing interface up resets counters
1167*bfec6d7fSVivian Wang 		 * to zero, so to provide continuity, we add the values saved
1168*bfec6d7fSVivian Wang 		 * last time we did emac_down() to the new hardware-provided
1169*bfec6d7fSVivian Wang 		 * value.
1170*bfec6d7fSVivian Wang 		 */
1171*bfec6d7fSVivian Wang 		offset = lower_32_bits(tx_stats_off[i]);
1172*bfec6d7fSVivian Wang 		emac_update_counter(&tx_stats[i], res + offset);
1173*bfec6d7fSVivian Wang 	}
1174*bfec6d7fSVivian Wang 
1175*bfec6d7fSVivian Wang 	/* Similar remarks as TX stats */
1176*bfec6d7fSVivian Wang 	for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1177*bfec6d7fSVivian Wang 		if (emac_rx_read_stat_cnt(priv, i, &res))
1178*bfec6d7fSVivian Wang 			return;
1179*bfec6d7fSVivian Wang 		offset = lower_32_bits(rx_stats_off[i]);
1180*bfec6d7fSVivian Wang 		emac_update_counter(&rx_stats[i], res + offset);
1181*bfec6d7fSVivian Wang 	}
1182*bfec6d7fSVivian Wang 
1183*bfec6d7fSVivian Wang 	mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1184*bfec6d7fSVivian Wang }
1185*bfec6d7fSVivian Wang 
1186*bfec6d7fSVivian Wang static void emac_stats_timer(struct timer_list *t)
1187*bfec6d7fSVivian Wang {
1188*bfec6d7fSVivian Wang 	struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1189*bfec6d7fSVivian Wang 
1190*bfec6d7fSVivian Wang 	spin_lock(&priv->stats_lock);
1191*bfec6d7fSVivian Wang 
1192*bfec6d7fSVivian Wang 	emac_stats_update(priv);
1193*bfec6d7fSVivian Wang 
1194*bfec6d7fSVivian Wang 	spin_unlock(&priv->stats_lock);
1195*bfec6d7fSVivian Wang }
1196*bfec6d7fSVivian Wang 
1197*bfec6d7fSVivian Wang static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1198*bfec6d7fSVivian Wang 	{   64,   64 },
1199*bfec6d7fSVivian Wang 	{   65,  127 },
1200*bfec6d7fSVivian Wang 	{  128,  255 },
1201*bfec6d7fSVivian Wang 	{  256,  511 },
1202*bfec6d7fSVivian Wang 	{  512, 1023 },
1203*bfec6d7fSVivian Wang 	{ 1024, 1518 },
1204*bfec6d7fSVivian Wang 	{ 1519, 4096 },
1205*bfec6d7fSVivian Wang 	{ /* sentinel */ },
1206*bfec6d7fSVivian Wang };
1207*bfec6d7fSVivian Wang 
1208*bfec6d7fSVivian Wang /* Like dev_fetch_dstats(), but we only use tx_drops */
1209*bfec6d7fSVivian Wang static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1210*bfec6d7fSVivian Wang {
1211*bfec6d7fSVivian Wang 	const struct pcpu_dstats *stats;
1212*bfec6d7fSVivian Wang 	u64 tx_drops, total = 0;
1213*bfec6d7fSVivian Wang 	unsigned int start;
1214*bfec6d7fSVivian Wang 	int cpu;
1215*bfec6d7fSVivian Wang 
1216*bfec6d7fSVivian Wang 	for_each_possible_cpu(cpu) {
1217*bfec6d7fSVivian Wang 		stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1218*bfec6d7fSVivian Wang 		do {
1219*bfec6d7fSVivian Wang 			start = u64_stats_fetch_begin(&stats->syncp);
1220*bfec6d7fSVivian Wang 			tx_drops = u64_stats_read(&stats->tx_drops);
1221*bfec6d7fSVivian Wang 		} while (u64_stats_fetch_retry(&stats->syncp, start));
1222*bfec6d7fSVivian Wang 
1223*bfec6d7fSVivian Wang 		total += tx_drops;
1224*bfec6d7fSVivian Wang 	}
1225*bfec6d7fSVivian Wang 
1226*bfec6d7fSVivian Wang 	return total;
1227*bfec6d7fSVivian Wang }
1228*bfec6d7fSVivian Wang 
1229*bfec6d7fSVivian Wang static void emac_get_stats64(struct net_device *dev,
1230*bfec6d7fSVivian Wang 			     struct rtnl_link_stats64 *storage)
1231*bfec6d7fSVivian Wang {
1232*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1233*bfec6d7fSVivian Wang 	union emac_hw_tx_stats *tx_stats;
1234*bfec6d7fSVivian Wang 	union emac_hw_rx_stats *rx_stats;
1235*bfec6d7fSVivian Wang 
1236*bfec6d7fSVivian Wang 	tx_stats = &priv->tx_stats;
1237*bfec6d7fSVivian Wang 	rx_stats = &priv->rx_stats;
1238*bfec6d7fSVivian Wang 
1239*bfec6d7fSVivian Wang 	/* This is the only software counter */
1240*bfec6d7fSVivian Wang 	storage->tx_dropped = emac_get_stat_tx_drops(priv);
1241*bfec6d7fSVivian Wang 
1242*bfec6d7fSVivian Wang 	spin_lock(&priv->stats_lock);
1243*bfec6d7fSVivian Wang 
1244*bfec6d7fSVivian Wang 	emac_stats_update(priv);
1245*bfec6d7fSVivian Wang 
1246*bfec6d7fSVivian Wang 	storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1247*bfec6d7fSVivian Wang 	storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1248*bfec6d7fSVivian Wang 	storage->tx_errors = tx_stats->stats.tx_err_pkts;
1249*bfec6d7fSVivian Wang 
1250*bfec6d7fSVivian Wang 	storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1251*bfec6d7fSVivian Wang 	storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1252*bfec6d7fSVivian Wang 	storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1253*bfec6d7fSVivian Wang 	storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1254*bfec6d7fSVivian Wang 	storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1255*bfec6d7fSVivian Wang 	storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1256*bfec6d7fSVivian Wang 
1257*bfec6d7fSVivian Wang 	storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1258*bfec6d7fSVivian Wang 	storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1259*bfec6d7fSVivian Wang 	storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1260*bfec6d7fSVivian Wang 
1261*bfec6d7fSVivian Wang 	storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1262*bfec6d7fSVivian Wang 	storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1263*bfec6d7fSVivian Wang 
1264*bfec6d7fSVivian Wang 	spin_unlock(&priv->stats_lock);
1265*bfec6d7fSVivian Wang }
1266*bfec6d7fSVivian Wang 
1267*bfec6d7fSVivian Wang static void emac_get_rmon_stats(struct net_device *dev,
1268*bfec6d7fSVivian Wang 				struct ethtool_rmon_stats *rmon_stats,
1269*bfec6d7fSVivian Wang 				const struct ethtool_rmon_hist_range **ranges)
1270*bfec6d7fSVivian Wang {
1271*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1272*bfec6d7fSVivian Wang 	union emac_hw_rx_stats *rx_stats;
1273*bfec6d7fSVivian Wang 
1274*bfec6d7fSVivian Wang 	rx_stats = &priv->rx_stats;
1275*bfec6d7fSVivian Wang 
1276*bfec6d7fSVivian Wang 	*ranges = emac_rmon_hist_ranges;
1277*bfec6d7fSVivian Wang 
1278*bfec6d7fSVivian Wang 	spin_lock(&priv->stats_lock);
1279*bfec6d7fSVivian Wang 
1280*bfec6d7fSVivian Wang 	emac_stats_update(priv);
1281*bfec6d7fSVivian Wang 
1282*bfec6d7fSVivian Wang 	rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1283*bfec6d7fSVivian Wang 	rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1284*bfec6d7fSVivian Wang 	rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1285*bfec6d7fSVivian Wang 	rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1286*bfec6d7fSVivian Wang 
1287*bfec6d7fSVivian Wang 	/* Only RX has histogram stats */
1288*bfec6d7fSVivian Wang 
1289*bfec6d7fSVivian Wang 	rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1290*bfec6d7fSVivian Wang 	rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1291*bfec6d7fSVivian Wang 	rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1292*bfec6d7fSVivian Wang 	rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1293*bfec6d7fSVivian Wang 	rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1294*bfec6d7fSVivian Wang 	rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1295*bfec6d7fSVivian Wang 	rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1296*bfec6d7fSVivian Wang 
1297*bfec6d7fSVivian Wang 	spin_unlock(&priv->stats_lock);
1298*bfec6d7fSVivian Wang }
1299*bfec6d7fSVivian Wang 
1300*bfec6d7fSVivian Wang static void emac_get_eth_mac_stats(struct net_device *dev,
1301*bfec6d7fSVivian Wang 				   struct ethtool_eth_mac_stats *mac_stats)
1302*bfec6d7fSVivian Wang {
1303*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1304*bfec6d7fSVivian Wang 	union emac_hw_tx_stats *tx_stats;
1305*bfec6d7fSVivian Wang 	union emac_hw_rx_stats *rx_stats;
1306*bfec6d7fSVivian Wang 
1307*bfec6d7fSVivian Wang 	tx_stats = &priv->tx_stats;
1308*bfec6d7fSVivian Wang 	rx_stats = &priv->rx_stats;
1309*bfec6d7fSVivian Wang 
1310*bfec6d7fSVivian Wang 	spin_lock(&priv->stats_lock);
1311*bfec6d7fSVivian Wang 
1312*bfec6d7fSVivian Wang 	emac_stats_update(priv);
1313*bfec6d7fSVivian Wang 
1314*bfec6d7fSVivian Wang 	mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1315*bfec6d7fSVivian Wang 	mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1316*bfec6d7fSVivian Wang 
1317*bfec6d7fSVivian Wang 	mac_stats->MulticastFramesReceivedOK =
1318*bfec6d7fSVivian Wang 		rx_stats->stats.rx_multicast_pkts;
1319*bfec6d7fSVivian Wang 	mac_stats->BroadcastFramesReceivedOK =
1320*bfec6d7fSVivian Wang 		rx_stats->stats.rx_broadcast_pkts;
1321*bfec6d7fSVivian Wang 
1322*bfec6d7fSVivian Wang 	mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1323*bfec6d7fSVivian Wang 	mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1324*bfec6d7fSVivian Wang 	mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1325*bfec6d7fSVivian Wang 	mac_stats->FramesAbortedDueToXSColls =
1326*bfec6d7fSVivian Wang 		tx_stats->stats.tx_excessclsn_pkts;
1327*bfec6d7fSVivian Wang 
1328*bfec6d7fSVivian Wang 	spin_unlock(&priv->stats_lock);
1329*bfec6d7fSVivian Wang }
1330*bfec6d7fSVivian Wang 
1331*bfec6d7fSVivian Wang static void emac_get_pause_stats(struct net_device *dev,
1332*bfec6d7fSVivian Wang 				 struct ethtool_pause_stats *pause_stats)
1333*bfec6d7fSVivian Wang {
1334*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1335*bfec6d7fSVivian Wang 	union emac_hw_tx_stats *tx_stats;
1336*bfec6d7fSVivian Wang 	union emac_hw_rx_stats *rx_stats;
1337*bfec6d7fSVivian Wang 
1338*bfec6d7fSVivian Wang 	tx_stats = &priv->tx_stats;
1339*bfec6d7fSVivian Wang 	rx_stats = &priv->rx_stats;
1340*bfec6d7fSVivian Wang 
1341*bfec6d7fSVivian Wang 	spin_lock(&priv->stats_lock);
1342*bfec6d7fSVivian Wang 
1343*bfec6d7fSVivian Wang 	emac_stats_update(priv);
1344*bfec6d7fSVivian Wang 
1345*bfec6d7fSVivian Wang 	pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1346*bfec6d7fSVivian Wang 	pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1347*bfec6d7fSVivian Wang 
1348*bfec6d7fSVivian Wang 	spin_unlock(&priv->stats_lock);
1349*bfec6d7fSVivian Wang }
1350*bfec6d7fSVivian Wang 
1351*bfec6d7fSVivian Wang /* Other statistics that are not derivable from standard statistics */
1352*bfec6d7fSVivian Wang 
1353*bfec6d7fSVivian Wang #define EMAC_ETHTOOL_STAT(type, name) \
1354*bfec6d7fSVivian Wang 	{ offsetof(type, stats.name) / sizeof(u64), #name }
1355*bfec6d7fSVivian Wang 
1356*bfec6d7fSVivian Wang static const struct emac_ethtool_stats {
1357*bfec6d7fSVivian Wang 	size_t offset;
1358*bfec6d7fSVivian Wang 	char str[ETH_GSTRING_LEN];
1359*bfec6d7fSVivian Wang } emac_ethtool_rx_stats[] = {
1360*bfec6d7fSVivian Wang 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1361*bfec6d7fSVivian Wang 	EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1362*bfec6d7fSVivian Wang };
1363*bfec6d7fSVivian Wang 
1364*bfec6d7fSVivian Wang static int emac_get_sset_count(struct net_device *dev, int sset)
1365*bfec6d7fSVivian Wang {
1366*bfec6d7fSVivian Wang 	switch (sset) {
1367*bfec6d7fSVivian Wang 	case ETH_SS_STATS:
1368*bfec6d7fSVivian Wang 		return ARRAY_SIZE(emac_ethtool_rx_stats);
1369*bfec6d7fSVivian Wang 	default:
1370*bfec6d7fSVivian Wang 		return -EOPNOTSUPP;
1371*bfec6d7fSVivian Wang 	}
1372*bfec6d7fSVivian Wang }
1373*bfec6d7fSVivian Wang 
1374*bfec6d7fSVivian Wang static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1375*bfec6d7fSVivian Wang {
1376*bfec6d7fSVivian Wang 	int i;
1377*bfec6d7fSVivian Wang 
1378*bfec6d7fSVivian Wang 	switch (stringset) {
1379*bfec6d7fSVivian Wang 	case ETH_SS_STATS:
1380*bfec6d7fSVivian Wang 		for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1381*bfec6d7fSVivian Wang 			memcpy(data, emac_ethtool_rx_stats[i].str,
1382*bfec6d7fSVivian Wang 			       ETH_GSTRING_LEN);
1383*bfec6d7fSVivian Wang 			data += ETH_GSTRING_LEN;
1384*bfec6d7fSVivian Wang 		}
1385*bfec6d7fSVivian Wang 		break;
1386*bfec6d7fSVivian Wang 	}
1387*bfec6d7fSVivian Wang }
1388*bfec6d7fSVivian Wang 
1389*bfec6d7fSVivian Wang static void emac_get_ethtool_stats(struct net_device *dev,
1390*bfec6d7fSVivian Wang 				   struct ethtool_stats *stats, u64 *data)
1391*bfec6d7fSVivian Wang {
1392*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1393*bfec6d7fSVivian Wang 	u64 *rx_stats = (u64 *)&priv->rx_stats;
1394*bfec6d7fSVivian Wang 	int i;
1395*bfec6d7fSVivian Wang 
1396*bfec6d7fSVivian Wang 	spin_lock(&priv->stats_lock);
1397*bfec6d7fSVivian Wang 
1398*bfec6d7fSVivian Wang 	emac_stats_update(priv);
1399*bfec6d7fSVivian Wang 
1400*bfec6d7fSVivian Wang 	for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1401*bfec6d7fSVivian Wang 		data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1402*bfec6d7fSVivian Wang 
1403*bfec6d7fSVivian Wang 	spin_unlock(&priv->stats_lock);
1404*bfec6d7fSVivian Wang }
1405*bfec6d7fSVivian Wang 
1406*bfec6d7fSVivian Wang static int emac_ethtool_get_regs_len(struct net_device *dev)
1407*bfec6d7fSVivian Wang {
1408*bfec6d7fSVivian Wang 	return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1409*bfec6d7fSVivian Wang }
1410*bfec6d7fSVivian Wang 
1411*bfec6d7fSVivian Wang static void emac_ethtool_get_regs(struct net_device *dev,
1412*bfec6d7fSVivian Wang 				  struct ethtool_regs *regs, void *space)
1413*bfec6d7fSVivian Wang {
1414*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1415*bfec6d7fSVivian Wang 	u32 *reg_space = space;
1416*bfec6d7fSVivian Wang 	int i;
1417*bfec6d7fSVivian Wang 
1418*bfec6d7fSVivian Wang 	regs->version = 1;
1419*bfec6d7fSVivian Wang 
1420*bfec6d7fSVivian Wang 	for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1421*bfec6d7fSVivian Wang 		reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1422*bfec6d7fSVivian Wang 
1423*bfec6d7fSVivian Wang 	for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1424*bfec6d7fSVivian Wang 		reg_space[i + EMAC_DMA_REG_CNT] =
1425*bfec6d7fSVivian Wang 			emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1426*bfec6d7fSVivian Wang }
1427*bfec6d7fSVivian Wang 
1428*bfec6d7fSVivian Wang static void emac_get_pauseparam(struct net_device *dev,
1429*bfec6d7fSVivian Wang 				struct ethtool_pauseparam *pause)
1430*bfec6d7fSVivian Wang {
1431*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1432*bfec6d7fSVivian Wang 
1433*bfec6d7fSVivian Wang 	pause->autoneg = priv->flow_control_autoneg;
1434*bfec6d7fSVivian Wang 	pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
1435*bfec6d7fSVivian Wang 	pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
1436*bfec6d7fSVivian Wang }
1437*bfec6d7fSVivian Wang 
1438*bfec6d7fSVivian Wang static int emac_set_pauseparam(struct net_device *dev,
1439*bfec6d7fSVivian Wang 			       struct ethtool_pauseparam *pause)
1440*bfec6d7fSVivian Wang {
1441*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1442*bfec6d7fSVivian Wang 	u8 fc = 0;
1443*bfec6d7fSVivian Wang 
1444*bfec6d7fSVivian Wang 	priv->flow_control_autoneg = pause->autoneg;
1445*bfec6d7fSVivian Wang 
1446*bfec6d7fSVivian Wang 	if (pause->autoneg) {
1447*bfec6d7fSVivian Wang 		emac_set_fc_autoneg(priv);
1448*bfec6d7fSVivian Wang 	} else {
1449*bfec6d7fSVivian Wang 		if (pause->tx_pause)
1450*bfec6d7fSVivian Wang 			fc |= FLOW_CTRL_TX;
1451*bfec6d7fSVivian Wang 
1452*bfec6d7fSVivian Wang 		if (pause->rx_pause)
1453*bfec6d7fSVivian Wang 			fc |= FLOW_CTRL_RX;
1454*bfec6d7fSVivian Wang 
1455*bfec6d7fSVivian Wang 		emac_set_fc(priv, fc);
1456*bfec6d7fSVivian Wang 	}
1457*bfec6d7fSVivian Wang 
1458*bfec6d7fSVivian Wang 	return 0;
1459*bfec6d7fSVivian Wang }
1460*bfec6d7fSVivian Wang 
1461*bfec6d7fSVivian Wang static void emac_get_drvinfo(struct net_device *dev,
1462*bfec6d7fSVivian Wang 			     struct ethtool_drvinfo *info)
1463*bfec6d7fSVivian Wang {
1464*bfec6d7fSVivian Wang 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1465*bfec6d7fSVivian Wang 	info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1466*bfec6d7fSVivian Wang }
1467*bfec6d7fSVivian Wang 
1468*bfec6d7fSVivian Wang static void emac_tx_timeout_task(struct work_struct *work)
1469*bfec6d7fSVivian Wang {
1470*bfec6d7fSVivian Wang 	struct net_device *ndev;
1471*bfec6d7fSVivian Wang 	struct emac_priv *priv;
1472*bfec6d7fSVivian Wang 
1473*bfec6d7fSVivian Wang 	priv = container_of(work, struct emac_priv, tx_timeout_task);
1474*bfec6d7fSVivian Wang 	ndev = priv->ndev;
1475*bfec6d7fSVivian Wang 
1476*bfec6d7fSVivian Wang 	rtnl_lock();
1477*bfec6d7fSVivian Wang 
1478*bfec6d7fSVivian Wang 	/* No need to reset if already down */
1479*bfec6d7fSVivian Wang 	if (!netif_running(ndev)) {
1480*bfec6d7fSVivian Wang 		rtnl_unlock();
1481*bfec6d7fSVivian Wang 		return;
1482*bfec6d7fSVivian Wang 	}
1483*bfec6d7fSVivian Wang 
1484*bfec6d7fSVivian Wang 	netdev_err(ndev, "MAC reset due to TX timeout\n");
1485*bfec6d7fSVivian Wang 
1486*bfec6d7fSVivian Wang 	netif_trans_update(ndev); /* prevent tx timeout */
1487*bfec6d7fSVivian Wang 	dev_close(ndev);
1488*bfec6d7fSVivian Wang 	dev_open(ndev, NULL);
1489*bfec6d7fSVivian Wang 
1490*bfec6d7fSVivian Wang 	rtnl_unlock();
1491*bfec6d7fSVivian Wang }
1492*bfec6d7fSVivian Wang 
1493*bfec6d7fSVivian Wang static void emac_sw_init(struct emac_priv *priv)
1494*bfec6d7fSVivian Wang {
1495*bfec6d7fSVivian Wang 	priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1496*bfec6d7fSVivian Wang 
1497*bfec6d7fSVivian Wang 	priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1498*bfec6d7fSVivian Wang 	priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1499*bfec6d7fSVivian Wang 
1500*bfec6d7fSVivian Wang 	spin_lock_init(&priv->stats_lock);
1501*bfec6d7fSVivian Wang 
1502*bfec6d7fSVivian Wang 	INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1503*bfec6d7fSVivian Wang 
1504*bfec6d7fSVivian Wang 	priv->tx_coal_frames = EMAC_TX_FRAMES;
1505*bfec6d7fSVivian Wang 	priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1506*bfec6d7fSVivian Wang 
1507*bfec6d7fSVivian Wang 	timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1508*bfec6d7fSVivian Wang 	timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1509*bfec6d7fSVivian Wang }
1510*bfec6d7fSVivian Wang 
1511*bfec6d7fSVivian Wang static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1512*bfec6d7fSVivian Wang {
1513*bfec6d7fSVivian Wang 	struct net_device *ndev = (struct net_device *)dev_id;
1514*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
1515*bfec6d7fSVivian Wang 	bool should_schedule = false;
1516*bfec6d7fSVivian Wang 	u32 clr = 0;
1517*bfec6d7fSVivian Wang 	u32 status;
1518*bfec6d7fSVivian Wang 
1519*bfec6d7fSVivian Wang 	status = emac_rd(priv, DMA_STATUS_IRQ);
1520*bfec6d7fSVivian Wang 
1521*bfec6d7fSVivian Wang 	if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1522*bfec6d7fSVivian Wang 		clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1523*bfec6d7fSVivian Wang 		should_schedule = true;
1524*bfec6d7fSVivian Wang 	}
1525*bfec6d7fSVivian Wang 
1526*bfec6d7fSVivian Wang 	if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1527*bfec6d7fSVivian Wang 		clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1528*bfec6d7fSVivian Wang 
1529*bfec6d7fSVivian Wang 	if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1530*bfec6d7fSVivian Wang 		clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1531*bfec6d7fSVivian Wang 
1532*bfec6d7fSVivian Wang 	if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1533*bfec6d7fSVivian Wang 		clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1534*bfec6d7fSVivian Wang 		should_schedule = true;
1535*bfec6d7fSVivian Wang 	}
1536*bfec6d7fSVivian Wang 
1537*bfec6d7fSVivian Wang 	if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1538*bfec6d7fSVivian Wang 		clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1539*bfec6d7fSVivian Wang 
1540*bfec6d7fSVivian Wang 	if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1541*bfec6d7fSVivian Wang 		clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1542*bfec6d7fSVivian Wang 
1543*bfec6d7fSVivian Wang 	if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1544*bfec6d7fSVivian Wang 		clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1545*bfec6d7fSVivian Wang 
1546*bfec6d7fSVivian Wang 	if (should_schedule) {
1547*bfec6d7fSVivian Wang 		if (napi_schedule_prep(&priv->napi)) {
1548*bfec6d7fSVivian Wang 			emac_disable_interrupt(priv);
1549*bfec6d7fSVivian Wang 			__napi_schedule_irqoff(&priv->napi);
1550*bfec6d7fSVivian Wang 		}
1551*bfec6d7fSVivian Wang 	}
1552*bfec6d7fSVivian Wang 
1553*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_STATUS_IRQ, clr);
1554*bfec6d7fSVivian Wang 
1555*bfec6d7fSVivian Wang 	return IRQ_HANDLED;
1556*bfec6d7fSVivian Wang }
1557*bfec6d7fSVivian Wang 
1558*bfec6d7fSVivian Wang static void emac_configure_tx(struct emac_priv *priv)
1559*bfec6d7fSVivian Wang {
1560*bfec6d7fSVivian Wang 	u32 val;
1561*bfec6d7fSVivian Wang 
1562*bfec6d7fSVivian Wang 	/* Set base address */
1563*bfec6d7fSVivian Wang 	val = (u32)priv->tx_ring.desc_dma_addr;
1564*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1565*bfec6d7fSVivian Wang 
1566*bfec6d7fSVivian Wang 	/* Set TX inter-frame gap value, enable transmit */
1567*bfec6d7fSVivian Wang 	val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1568*bfec6d7fSVivian Wang 	val &= ~MREGBIT_IFG_LEN;
1569*bfec6d7fSVivian Wang 	val |= MREGBIT_TRANSMIT_ENABLE;
1570*bfec6d7fSVivian Wang 	val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1571*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1572*bfec6d7fSVivian Wang 
1573*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1574*bfec6d7fSVivian Wang 
1575*bfec6d7fSVivian Wang 	/* Start TX DMA */
1576*bfec6d7fSVivian Wang 	val = emac_rd(priv, DMA_CONTROL);
1577*bfec6d7fSVivian Wang 	val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1578*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_CONTROL, val);
1579*bfec6d7fSVivian Wang }
1580*bfec6d7fSVivian Wang 
1581*bfec6d7fSVivian Wang static void emac_configure_rx(struct emac_priv *priv)
1582*bfec6d7fSVivian Wang {
1583*bfec6d7fSVivian Wang 	u32 val;
1584*bfec6d7fSVivian Wang 
1585*bfec6d7fSVivian Wang 	/* Set base address */
1586*bfec6d7fSVivian Wang 	val = (u32)priv->rx_ring.desc_dma_addr;
1587*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1588*bfec6d7fSVivian Wang 
1589*bfec6d7fSVivian Wang 	/* Enable receive */
1590*bfec6d7fSVivian Wang 	val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1591*bfec6d7fSVivian Wang 	val |= MREGBIT_RECEIVE_ENABLE;
1592*bfec6d7fSVivian Wang 	val |= MREGBIT_STORE_FORWARD;
1593*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1594*bfec6d7fSVivian Wang 
1595*bfec6d7fSVivian Wang 	/* Start RX DMA */
1596*bfec6d7fSVivian Wang 	val = emac_rd(priv, DMA_CONTROL);
1597*bfec6d7fSVivian Wang 	val |= MREGBIT_START_STOP_RECEIVE_DMA;
1598*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_CONTROL, val);
1599*bfec6d7fSVivian Wang }
1600*bfec6d7fSVivian Wang 
1601*bfec6d7fSVivian Wang static void emac_adjust_link(struct net_device *dev)
1602*bfec6d7fSVivian Wang {
1603*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(dev);
1604*bfec6d7fSVivian Wang 	struct phy_device *phydev = dev->phydev;
1605*bfec6d7fSVivian Wang 	u32 ctrl;
1606*bfec6d7fSVivian Wang 
1607*bfec6d7fSVivian Wang 	if (phydev->link) {
1608*bfec6d7fSVivian Wang 		ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1609*bfec6d7fSVivian Wang 
1610*bfec6d7fSVivian Wang 		/* Update duplex and speed from PHY */
1611*bfec6d7fSVivian Wang 
1612*bfec6d7fSVivian Wang 		FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1613*bfec6d7fSVivian Wang 			     phydev->duplex == DUPLEX_FULL);
1614*bfec6d7fSVivian Wang 
1615*bfec6d7fSVivian Wang 		ctrl &= ~MREGBIT_SPEED;
1616*bfec6d7fSVivian Wang 
1617*bfec6d7fSVivian Wang 		switch (phydev->speed) {
1618*bfec6d7fSVivian Wang 		case SPEED_1000:
1619*bfec6d7fSVivian Wang 			ctrl |= MREGBIT_SPEED_1000M;
1620*bfec6d7fSVivian Wang 			break;
1621*bfec6d7fSVivian Wang 		case SPEED_100:
1622*bfec6d7fSVivian Wang 			ctrl |= MREGBIT_SPEED_100M;
1623*bfec6d7fSVivian Wang 			break;
1624*bfec6d7fSVivian Wang 		case SPEED_10:
1625*bfec6d7fSVivian Wang 			ctrl |= MREGBIT_SPEED_10M;
1626*bfec6d7fSVivian Wang 			break;
1627*bfec6d7fSVivian Wang 		default:
1628*bfec6d7fSVivian Wang 			netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1629*bfec6d7fSVivian Wang 			phydev->speed = SPEED_UNKNOWN;
1630*bfec6d7fSVivian Wang 			break;
1631*bfec6d7fSVivian Wang 		}
1632*bfec6d7fSVivian Wang 
1633*bfec6d7fSVivian Wang 		emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1634*bfec6d7fSVivian Wang 
1635*bfec6d7fSVivian Wang 		emac_set_fc_autoneg(priv);
1636*bfec6d7fSVivian Wang 	}
1637*bfec6d7fSVivian Wang 
1638*bfec6d7fSVivian Wang 	phy_print_status(phydev);
1639*bfec6d7fSVivian Wang }
1640*bfec6d7fSVivian Wang 
1641*bfec6d7fSVivian Wang static void emac_update_delay_line(struct emac_priv *priv)
1642*bfec6d7fSVivian Wang {
1643*bfec6d7fSVivian Wang 	u32 mask = 0, val = 0;
1644*bfec6d7fSVivian Wang 
1645*bfec6d7fSVivian Wang 	mask |= EMAC_RX_DLINE_EN;
1646*bfec6d7fSVivian Wang 	mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1647*bfec6d7fSVivian Wang 	mask |= EMAC_TX_DLINE_EN;
1648*bfec6d7fSVivian Wang 	mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1649*bfec6d7fSVivian Wang 
1650*bfec6d7fSVivian Wang 	if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1651*bfec6d7fSVivian Wang 		val |= EMAC_RX_DLINE_EN;
1652*bfec6d7fSVivian Wang 		val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1653*bfec6d7fSVivian Wang 				  EMAC_DLINE_STEP_15P6);
1654*bfec6d7fSVivian Wang 		val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1655*bfec6d7fSVivian Wang 
1656*bfec6d7fSVivian Wang 		val |= EMAC_TX_DLINE_EN;
1657*bfec6d7fSVivian Wang 		val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1658*bfec6d7fSVivian Wang 				  EMAC_DLINE_STEP_15P6);
1659*bfec6d7fSVivian Wang 		val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1660*bfec6d7fSVivian Wang 	}
1661*bfec6d7fSVivian Wang 
1662*bfec6d7fSVivian Wang 	regmap_update_bits(priv->regmap_apmu,
1663*bfec6d7fSVivian Wang 			   priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1664*bfec6d7fSVivian Wang 			   mask, val);
1665*bfec6d7fSVivian Wang }
1666*bfec6d7fSVivian Wang 
1667*bfec6d7fSVivian Wang static int emac_phy_connect(struct net_device *ndev)
1668*bfec6d7fSVivian Wang {
1669*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
1670*bfec6d7fSVivian Wang 	struct device *dev = &priv->pdev->dev;
1671*bfec6d7fSVivian Wang 	struct phy_device *phydev;
1672*bfec6d7fSVivian Wang 	struct device_node *np;
1673*bfec6d7fSVivian Wang 	int ret;
1674*bfec6d7fSVivian Wang 
1675*bfec6d7fSVivian Wang 	ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1676*bfec6d7fSVivian Wang 	if (ret) {
1677*bfec6d7fSVivian Wang 		netdev_err(ndev, "No phy-mode found");
1678*bfec6d7fSVivian Wang 		return ret;
1679*bfec6d7fSVivian Wang 	}
1680*bfec6d7fSVivian Wang 
1681*bfec6d7fSVivian Wang 	switch (priv->phy_interface) {
1682*bfec6d7fSVivian Wang 	case PHY_INTERFACE_MODE_RMII:
1683*bfec6d7fSVivian Wang 	case PHY_INTERFACE_MODE_RGMII:
1684*bfec6d7fSVivian Wang 	case PHY_INTERFACE_MODE_RGMII_ID:
1685*bfec6d7fSVivian Wang 	case PHY_INTERFACE_MODE_RGMII_RXID:
1686*bfec6d7fSVivian Wang 	case PHY_INTERFACE_MODE_RGMII_TXID:
1687*bfec6d7fSVivian Wang 		break;
1688*bfec6d7fSVivian Wang 	default:
1689*bfec6d7fSVivian Wang 		netdev_err(ndev, "Unsupported PHY interface %s",
1690*bfec6d7fSVivian Wang 			   phy_modes(priv->phy_interface));
1691*bfec6d7fSVivian Wang 		return -EINVAL;
1692*bfec6d7fSVivian Wang 	}
1693*bfec6d7fSVivian Wang 
1694*bfec6d7fSVivian Wang 	np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1695*bfec6d7fSVivian Wang 	if (!np && of_phy_is_fixed_link(dev->of_node))
1696*bfec6d7fSVivian Wang 		np = of_node_get(dev->of_node);
1697*bfec6d7fSVivian Wang 
1698*bfec6d7fSVivian Wang 	if (!np) {
1699*bfec6d7fSVivian Wang 		netdev_err(ndev, "No PHY specified");
1700*bfec6d7fSVivian Wang 		return -ENODEV;
1701*bfec6d7fSVivian Wang 	}
1702*bfec6d7fSVivian Wang 
1703*bfec6d7fSVivian Wang 	ret = emac_phy_interface_config(priv);
1704*bfec6d7fSVivian Wang 	if (ret)
1705*bfec6d7fSVivian Wang 		goto err_node_put;
1706*bfec6d7fSVivian Wang 
1707*bfec6d7fSVivian Wang 	phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1708*bfec6d7fSVivian Wang 				priv->phy_interface);
1709*bfec6d7fSVivian Wang 	if (!phydev) {
1710*bfec6d7fSVivian Wang 		netdev_err(ndev, "Could not attach to PHY\n");
1711*bfec6d7fSVivian Wang 		ret = -ENODEV;
1712*bfec6d7fSVivian Wang 		goto err_node_put;
1713*bfec6d7fSVivian Wang 	}
1714*bfec6d7fSVivian Wang 
1715*bfec6d7fSVivian Wang 	phy_support_asym_pause(phydev);
1716*bfec6d7fSVivian Wang 
1717*bfec6d7fSVivian Wang 	phydev->mac_managed_pm = true;
1718*bfec6d7fSVivian Wang 
1719*bfec6d7fSVivian Wang 	emac_update_delay_line(priv);
1720*bfec6d7fSVivian Wang 
1721*bfec6d7fSVivian Wang err_node_put:
1722*bfec6d7fSVivian Wang 	of_node_put(np);
1723*bfec6d7fSVivian Wang 	return ret;
1724*bfec6d7fSVivian Wang }
1725*bfec6d7fSVivian Wang 
1726*bfec6d7fSVivian Wang static int emac_up(struct emac_priv *priv)
1727*bfec6d7fSVivian Wang {
1728*bfec6d7fSVivian Wang 	struct platform_device *pdev = priv->pdev;
1729*bfec6d7fSVivian Wang 	struct net_device *ndev = priv->ndev;
1730*bfec6d7fSVivian Wang 	int ret;
1731*bfec6d7fSVivian Wang 
1732*bfec6d7fSVivian Wang 	pm_runtime_get_sync(&pdev->dev);
1733*bfec6d7fSVivian Wang 
1734*bfec6d7fSVivian Wang 	ret = emac_phy_connect(ndev);
1735*bfec6d7fSVivian Wang 	if (ret) {
1736*bfec6d7fSVivian Wang 		dev_err(&pdev->dev, "emac_phy_connect failed\n");
1737*bfec6d7fSVivian Wang 		goto err_pm_put;
1738*bfec6d7fSVivian Wang 	}
1739*bfec6d7fSVivian Wang 
1740*bfec6d7fSVivian Wang 	emac_init_hw(priv);
1741*bfec6d7fSVivian Wang 
1742*bfec6d7fSVivian Wang 	emac_set_mac_addr(priv, ndev->dev_addr);
1743*bfec6d7fSVivian Wang 	emac_configure_tx(priv);
1744*bfec6d7fSVivian Wang 	emac_configure_rx(priv);
1745*bfec6d7fSVivian Wang 
1746*bfec6d7fSVivian Wang 	emac_alloc_rx_desc_buffers(priv);
1747*bfec6d7fSVivian Wang 
1748*bfec6d7fSVivian Wang 	phy_start(ndev->phydev);
1749*bfec6d7fSVivian Wang 
1750*bfec6d7fSVivian Wang 	ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1751*bfec6d7fSVivian Wang 			  ndev->name, ndev);
1752*bfec6d7fSVivian Wang 	if (ret) {
1753*bfec6d7fSVivian Wang 		dev_err(&pdev->dev, "request_irq failed\n");
1754*bfec6d7fSVivian Wang 		goto err_reset_disconnect_phy;
1755*bfec6d7fSVivian Wang 	}
1756*bfec6d7fSVivian Wang 
1757*bfec6d7fSVivian Wang 	/* Don't enable MAC interrupts */
1758*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1759*bfec6d7fSVivian Wang 
1760*bfec6d7fSVivian Wang 	/* Enable DMA interrupts */
1761*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_INTERRUPT_ENABLE,
1762*bfec6d7fSVivian Wang 		MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1763*bfec6d7fSVivian Wang 			MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1764*bfec6d7fSVivian Wang 			MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1765*bfec6d7fSVivian Wang 			MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1766*bfec6d7fSVivian Wang 			MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1767*bfec6d7fSVivian Wang 
1768*bfec6d7fSVivian Wang 	napi_enable(&priv->napi);
1769*bfec6d7fSVivian Wang 
1770*bfec6d7fSVivian Wang 	netif_start_queue(ndev);
1771*bfec6d7fSVivian Wang 
1772*bfec6d7fSVivian Wang 	emac_stats_timer(&priv->stats_timer);
1773*bfec6d7fSVivian Wang 
1774*bfec6d7fSVivian Wang 	return 0;
1775*bfec6d7fSVivian Wang 
1776*bfec6d7fSVivian Wang err_reset_disconnect_phy:
1777*bfec6d7fSVivian Wang 	emac_reset_hw(priv);
1778*bfec6d7fSVivian Wang 	phy_disconnect(ndev->phydev);
1779*bfec6d7fSVivian Wang 
1780*bfec6d7fSVivian Wang err_pm_put:
1781*bfec6d7fSVivian Wang 	pm_runtime_put_sync(&pdev->dev);
1782*bfec6d7fSVivian Wang 	return ret;
1783*bfec6d7fSVivian Wang }
1784*bfec6d7fSVivian Wang 
1785*bfec6d7fSVivian Wang static int emac_down(struct emac_priv *priv)
1786*bfec6d7fSVivian Wang {
1787*bfec6d7fSVivian Wang 	struct platform_device *pdev = priv->pdev;
1788*bfec6d7fSVivian Wang 	struct net_device *ndev = priv->ndev;
1789*bfec6d7fSVivian Wang 
1790*bfec6d7fSVivian Wang 	netif_stop_queue(ndev);
1791*bfec6d7fSVivian Wang 
1792*bfec6d7fSVivian Wang 	phy_disconnect(ndev->phydev);
1793*bfec6d7fSVivian Wang 
1794*bfec6d7fSVivian Wang 	emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1795*bfec6d7fSVivian Wang 	emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1796*bfec6d7fSVivian Wang 
1797*bfec6d7fSVivian Wang 	free_irq(priv->irq, ndev);
1798*bfec6d7fSVivian Wang 
1799*bfec6d7fSVivian Wang 	napi_disable(&priv->napi);
1800*bfec6d7fSVivian Wang 
1801*bfec6d7fSVivian Wang 	timer_delete_sync(&priv->txtimer);
1802*bfec6d7fSVivian Wang 	cancel_work_sync(&priv->tx_timeout_task);
1803*bfec6d7fSVivian Wang 
1804*bfec6d7fSVivian Wang 	timer_delete_sync(&priv->stats_timer);
1805*bfec6d7fSVivian Wang 
1806*bfec6d7fSVivian Wang 	emac_reset_hw(priv);
1807*bfec6d7fSVivian Wang 
1808*bfec6d7fSVivian Wang 	/* Update and save current stats, see emac_stats_update() for usage */
1809*bfec6d7fSVivian Wang 
1810*bfec6d7fSVivian Wang 	spin_lock(&priv->stats_lock);
1811*bfec6d7fSVivian Wang 
1812*bfec6d7fSVivian Wang 	emac_stats_update(priv);
1813*bfec6d7fSVivian Wang 
1814*bfec6d7fSVivian Wang 	priv->tx_stats_off = priv->tx_stats;
1815*bfec6d7fSVivian Wang 	priv->rx_stats_off = priv->rx_stats;
1816*bfec6d7fSVivian Wang 
1817*bfec6d7fSVivian Wang 	spin_unlock(&priv->stats_lock);
1818*bfec6d7fSVivian Wang 
1819*bfec6d7fSVivian Wang 	pm_runtime_put_sync(&pdev->dev);
1820*bfec6d7fSVivian Wang 	return 0;
1821*bfec6d7fSVivian Wang }
1822*bfec6d7fSVivian Wang 
1823*bfec6d7fSVivian Wang /* Called when net interface is brought up. */
1824*bfec6d7fSVivian Wang static int emac_open(struct net_device *ndev)
1825*bfec6d7fSVivian Wang {
1826*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
1827*bfec6d7fSVivian Wang 	struct device *dev = &priv->pdev->dev;
1828*bfec6d7fSVivian Wang 	int ret;
1829*bfec6d7fSVivian Wang 
1830*bfec6d7fSVivian Wang 	ret = emac_alloc_tx_resources(priv);
1831*bfec6d7fSVivian Wang 	if (ret) {
1832*bfec6d7fSVivian Wang 		dev_err(dev, "Cannot allocate TX resources\n");
1833*bfec6d7fSVivian Wang 		return ret;
1834*bfec6d7fSVivian Wang 	}
1835*bfec6d7fSVivian Wang 
1836*bfec6d7fSVivian Wang 	ret = emac_alloc_rx_resources(priv);
1837*bfec6d7fSVivian Wang 	if (ret) {
1838*bfec6d7fSVivian Wang 		dev_err(dev, "Cannot allocate RX resources\n");
1839*bfec6d7fSVivian Wang 		goto err_free_tx;
1840*bfec6d7fSVivian Wang 	}
1841*bfec6d7fSVivian Wang 
1842*bfec6d7fSVivian Wang 	ret = emac_up(priv);
1843*bfec6d7fSVivian Wang 	if (ret) {
1844*bfec6d7fSVivian Wang 		dev_err(dev, "Error when bringing interface up\n");
1845*bfec6d7fSVivian Wang 		goto err_free_rx;
1846*bfec6d7fSVivian Wang 	}
1847*bfec6d7fSVivian Wang 	return 0;
1848*bfec6d7fSVivian Wang 
1849*bfec6d7fSVivian Wang err_free_rx:
1850*bfec6d7fSVivian Wang 	emac_free_rx_resources(priv);
1851*bfec6d7fSVivian Wang err_free_tx:
1852*bfec6d7fSVivian Wang 	emac_free_tx_resources(priv);
1853*bfec6d7fSVivian Wang 
1854*bfec6d7fSVivian Wang 	return ret;
1855*bfec6d7fSVivian Wang }
1856*bfec6d7fSVivian Wang 
1857*bfec6d7fSVivian Wang /* Called when interface is brought down. */
1858*bfec6d7fSVivian Wang static int emac_stop(struct net_device *ndev)
1859*bfec6d7fSVivian Wang {
1860*bfec6d7fSVivian Wang 	struct emac_priv *priv = netdev_priv(ndev);
1861*bfec6d7fSVivian Wang 
1862*bfec6d7fSVivian Wang 	emac_down(priv);
1863*bfec6d7fSVivian Wang 	emac_free_tx_resources(priv);
1864*bfec6d7fSVivian Wang 	emac_free_rx_resources(priv);
1865*bfec6d7fSVivian Wang 
1866*bfec6d7fSVivian Wang 	return 0;
1867*bfec6d7fSVivian Wang }
1868*bfec6d7fSVivian Wang 
1869*bfec6d7fSVivian Wang static const struct ethtool_ops emac_ethtool_ops = {
1870*bfec6d7fSVivian Wang 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1871*bfec6d7fSVivian Wang 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1872*bfec6d7fSVivian Wang 	.nway_reset		= phy_ethtool_nway_reset,
1873*bfec6d7fSVivian Wang 	.get_drvinfo		= emac_get_drvinfo,
1874*bfec6d7fSVivian Wang 	.get_link		= ethtool_op_get_link,
1875*bfec6d7fSVivian Wang 
1876*bfec6d7fSVivian Wang 	.get_regs		= emac_ethtool_get_regs,
1877*bfec6d7fSVivian Wang 	.get_regs_len		= emac_ethtool_get_regs_len,
1878*bfec6d7fSVivian Wang 
1879*bfec6d7fSVivian Wang 	.get_rmon_stats		= emac_get_rmon_stats,
1880*bfec6d7fSVivian Wang 	.get_pause_stats	= emac_get_pause_stats,
1881*bfec6d7fSVivian Wang 	.get_eth_mac_stats	= emac_get_eth_mac_stats,
1882*bfec6d7fSVivian Wang 
1883*bfec6d7fSVivian Wang 	.get_sset_count		= emac_get_sset_count,
1884*bfec6d7fSVivian Wang 	.get_strings		= emac_get_strings,
1885*bfec6d7fSVivian Wang 	.get_ethtool_stats	= emac_get_ethtool_stats,
1886*bfec6d7fSVivian Wang 
1887*bfec6d7fSVivian Wang 	.get_pauseparam		= emac_get_pauseparam,
1888*bfec6d7fSVivian Wang 	.set_pauseparam		= emac_set_pauseparam,
1889*bfec6d7fSVivian Wang };
1890*bfec6d7fSVivian Wang 
1891*bfec6d7fSVivian Wang static const struct net_device_ops emac_netdev_ops = {
1892*bfec6d7fSVivian Wang 	.ndo_open               = emac_open,
1893*bfec6d7fSVivian Wang 	.ndo_stop               = emac_stop,
1894*bfec6d7fSVivian Wang 	.ndo_start_xmit         = emac_start_xmit,
1895*bfec6d7fSVivian Wang 	.ndo_validate_addr	= eth_validate_addr,
1896*bfec6d7fSVivian Wang 	.ndo_set_mac_address    = emac_set_mac_address,
1897*bfec6d7fSVivian Wang 	.ndo_eth_ioctl          = phy_do_ioctl_running,
1898*bfec6d7fSVivian Wang 	.ndo_change_mtu         = emac_change_mtu,
1899*bfec6d7fSVivian Wang 	.ndo_tx_timeout         = emac_tx_timeout,
1900*bfec6d7fSVivian Wang 	.ndo_set_rx_mode        = emac_set_rx_mode,
1901*bfec6d7fSVivian Wang 	.ndo_get_stats64	= emac_get_stats64,
1902*bfec6d7fSVivian Wang };
1903*bfec6d7fSVivian Wang 
1904*bfec6d7fSVivian Wang /* Currently we always use 15.6 ps/step for the delay line */
1905*bfec6d7fSVivian Wang 
1906*bfec6d7fSVivian Wang static u32 delay_ps_to_unit(u32 ps)
1907*bfec6d7fSVivian Wang {
1908*bfec6d7fSVivian Wang 	return DIV_ROUND_CLOSEST(ps * 10, 156);
1909*bfec6d7fSVivian Wang }
1910*bfec6d7fSVivian Wang 
1911*bfec6d7fSVivian Wang static u32 delay_unit_to_ps(u32 unit)
1912*bfec6d7fSVivian Wang {
1913*bfec6d7fSVivian Wang 	return DIV_ROUND_CLOSEST(unit * 156, 10);
1914*bfec6d7fSVivian Wang }
1915*bfec6d7fSVivian Wang 
1916*bfec6d7fSVivian Wang #define EMAC_MAX_DELAY_UNIT	FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1917*bfec6d7fSVivian Wang 
1918*bfec6d7fSVivian Wang /* Minus one just to be safe from rounding errors */
1919*bfec6d7fSVivian Wang #define EMAC_MAX_DELAY_PS	(delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1920*bfec6d7fSVivian Wang 
1921*bfec6d7fSVivian Wang static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1922*bfec6d7fSVivian Wang {
1923*bfec6d7fSVivian Wang 	struct device_node *np = pdev->dev.of_node;
1924*bfec6d7fSVivian Wang 	struct device *dev = &pdev->dev;
1925*bfec6d7fSVivian Wang 	u8 mac_addr[ETH_ALEN] = { 0 };
1926*bfec6d7fSVivian Wang 	int ret;
1927*bfec6d7fSVivian Wang 
1928*bfec6d7fSVivian Wang 	priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1929*bfec6d7fSVivian Wang 	if (IS_ERR(priv->iobase))
1930*bfec6d7fSVivian Wang 		return dev_err_probe(dev, PTR_ERR(priv->iobase),
1931*bfec6d7fSVivian Wang 				     "ioremap failed\n");
1932*bfec6d7fSVivian Wang 
1933*bfec6d7fSVivian Wang 	priv->regmap_apmu =
1934*bfec6d7fSVivian Wang 		syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1935*bfec6d7fSVivian Wang 						     &priv->regmap_apmu_offset);
1936*bfec6d7fSVivian Wang 
1937*bfec6d7fSVivian Wang 	if (IS_ERR(priv->regmap_apmu))
1938*bfec6d7fSVivian Wang 		return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1939*bfec6d7fSVivian Wang 				     "failed to get syscon\n");
1940*bfec6d7fSVivian Wang 
1941*bfec6d7fSVivian Wang 	priv->irq = platform_get_irq(pdev, 0);
1942*bfec6d7fSVivian Wang 	if (priv->irq < 0)
1943*bfec6d7fSVivian Wang 		return priv->irq;
1944*bfec6d7fSVivian Wang 
1945*bfec6d7fSVivian Wang 	ret = of_get_mac_address(np, mac_addr);
1946*bfec6d7fSVivian Wang 	if (ret) {
1947*bfec6d7fSVivian Wang 		if (ret == -EPROBE_DEFER)
1948*bfec6d7fSVivian Wang 			return dev_err_probe(dev, ret,
1949*bfec6d7fSVivian Wang 					     "Can't get MAC address\n");
1950*bfec6d7fSVivian Wang 
1951*bfec6d7fSVivian Wang 		dev_info(&pdev->dev, "Using random MAC address\n");
1952*bfec6d7fSVivian Wang 		eth_hw_addr_random(priv->ndev);
1953*bfec6d7fSVivian Wang 	} else {
1954*bfec6d7fSVivian Wang 		eth_hw_addr_set(priv->ndev, mac_addr);
1955*bfec6d7fSVivian Wang 	}
1956*bfec6d7fSVivian Wang 
1957*bfec6d7fSVivian Wang 	priv->tx_delay = 0;
1958*bfec6d7fSVivian Wang 	priv->rx_delay = 0;
1959*bfec6d7fSVivian Wang 
1960*bfec6d7fSVivian Wang 	of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1961*bfec6d7fSVivian Wang 	of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1962*bfec6d7fSVivian Wang 
1963*bfec6d7fSVivian Wang 	if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1964*bfec6d7fSVivian Wang 		dev_err(&pdev->dev,
1965*bfec6d7fSVivian Wang 			"tx-internal-delay-ps too large: max %d, got %d",
1966*bfec6d7fSVivian Wang 			EMAC_MAX_DELAY_PS, priv->tx_delay);
1967*bfec6d7fSVivian Wang 		return -EINVAL;
1968*bfec6d7fSVivian Wang 	}
1969*bfec6d7fSVivian Wang 
1970*bfec6d7fSVivian Wang 	if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
1971*bfec6d7fSVivian Wang 		dev_err(&pdev->dev,
1972*bfec6d7fSVivian Wang 			"rx-internal-delay-ps too large: max %d, got %d",
1973*bfec6d7fSVivian Wang 			EMAC_MAX_DELAY_PS, priv->rx_delay);
1974*bfec6d7fSVivian Wang 		return -EINVAL;
1975*bfec6d7fSVivian Wang 	}
1976*bfec6d7fSVivian Wang 
1977*bfec6d7fSVivian Wang 	priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
1978*bfec6d7fSVivian Wang 	priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
1979*bfec6d7fSVivian Wang 
1980*bfec6d7fSVivian Wang 	return 0;
1981*bfec6d7fSVivian Wang }
1982*bfec6d7fSVivian Wang 
1983*bfec6d7fSVivian Wang static void emac_phy_deregister_fixed_link(void *data)
1984*bfec6d7fSVivian Wang {
1985*bfec6d7fSVivian Wang 	struct device_node *of_node = data;
1986*bfec6d7fSVivian Wang 
1987*bfec6d7fSVivian Wang 	of_phy_deregister_fixed_link(of_node);
1988*bfec6d7fSVivian Wang }
1989*bfec6d7fSVivian Wang 
1990*bfec6d7fSVivian Wang static int emac_probe(struct platform_device *pdev)
1991*bfec6d7fSVivian Wang {
1992*bfec6d7fSVivian Wang 	struct device *dev = &pdev->dev;
1993*bfec6d7fSVivian Wang 	struct reset_control *reset;
1994*bfec6d7fSVivian Wang 	struct net_device *ndev;
1995*bfec6d7fSVivian Wang 	struct emac_priv *priv;
1996*bfec6d7fSVivian Wang 	int ret;
1997*bfec6d7fSVivian Wang 
1998*bfec6d7fSVivian Wang 	ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
1999*bfec6d7fSVivian Wang 	if (!ndev)
2000*bfec6d7fSVivian Wang 		return -ENOMEM;
2001*bfec6d7fSVivian Wang 
2002*bfec6d7fSVivian Wang 	ndev->hw_features = NETIF_F_SG;
2003*bfec6d7fSVivian Wang 	ndev->features |= ndev->hw_features;
2004*bfec6d7fSVivian Wang 
2005*bfec6d7fSVivian Wang 	ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
2006*bfec6d7fSVivian Wang 	ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
2007*bfec6d7fSVivian Wang 
2008*bfec6d7fSVivian Wang 	priv = netdev_priv(ndev);
2009*bfec6d7fSVivian Wang 	priv->ndev = ndev;
2010*bfec6d7fSVivian Wang 	priv->pdev = pdev;
2011*bfec6d7fSVivian Wang 	platform_set_drvdata(pdev, priv);
2012*bfec6d7fSVivian Wang 
2013*bfec6d7fSVivian Wang 	ret = emac_config_dt(pdev, priv);
2014*bfec6d7fSVivian Wang 	if (ret < 0)
2015*bfec6d7fSVivian Wang 		return dev_err_probe(dev, ret, "Configuration failed\n");
2016*bfec6d7fSVivian Wang 
2017*bfec6d7fSVivian Wang 	ndev->watchdog_timeo = 5 * HZ;
2018*bfec6d7fSVivian Wang 	ndev->base_addr = (unsigned long)priv->iobase;
2019*bfec6d7fSVivian Wang 	ndev->irq = priv->irq;
2020*bfec6d7fSVivian Wang 
2021*bfec6d7fSVivian Wang 	ndev->ethtool_ops = &emac_ethtool_ops;
2022*bfec6d7fSVivian Wang 	ndev->netdev_ops = &emac_netdev_ops;
2023*bfec6d7fSVivian Wang 
2024*bfec6d7fSVivian Wang 	devm_pm_runtime_enable(&pdev->dev);
2025*bfec6d7fSVivian Wang 
2026*bfec6d7fSVivian Wang 	priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
2027*bfec6d7fSVivian Wang 	if (IS_ERR(priv->bus_clk))
2028*bfec6d7fSVivian Wang 		return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
2029*bfec6d7fSVivian Wang 				     "Failed to get clock\n");
2030*bfec6d7fSVivian Wang 
2031*bfec6d7fSVivian Wang 	reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
2032*bfec6d7fSVivian Wang 								     NULL);
2033*bfec6d7fSVivian Wang 	if (IS_ERR(reset))
2034*bfec6d7fSVivian Wang 		return dev_err_probe(dev, PTR_ERR(reset),
2035*bfec6d7fSVivian Wang 				     "Failed to get reset\n");
2036*bfec6d7fSVivian Wang 
2037*bfec6d7fSVivian Wang 	if (of_phy_is_fixed_link(dev->of_node)) {
2038*bfec6d7fSVivian Wang 		ret = of_phy_register_fixed_link(dev->of_node);
2039*bfec6d7fSVivian Wang 		if (ret)
2040*bfec6d7fSVivian Wang 			return dev_err_probe(dev, ret,
2041*bfec6d7fSVivian Wang 					     "Failed to register fixed-link\n");
2042*bfec6d7fSVivian Wang 
2043*bfec6d7fSVivian Wang 		ret = devm_add_action_or_reset(dev,
2044*bfec6d7fSVivian Wang 					       emac_phy_deregister_fixed_link,
2045*bfec6d7fSVivian Wang 					       dev->of_node);
2046*bfec6d7fSVivian Wang 
2047*bfec6d7fSVivian Wang 		if (ret) {
2048*bfec6d7fSVivian Wang 			dev_err(dev, "devm_add_action_or_reset failed\n");
2049*bfec6d7fSVivian Wang 			return ret;
2050*bfec6d7fSVivian Wang 		}
2051*bfec6d7fSVivian Wang 	}
2052*bfec6d7fSVivian Wang 
2053*bfec6d7fSVivian Wang 	emac_sw_init(priv);
2054*bfec6d7fSVivian Wang 
2055*bfec6d7fSVivian Wang 	ret = emac_mdio_init(priv);
2056*bfec6d7fSVivian Wang 	if (ret)
2057*bfec6d7fSVivian Wang 		goto err_timer_delete;
2058*bfec6d7fSVivian Wang 
2059*bfec6d7fSVivian Wang 	SET_NETDEV_DEV(ndev, &pdev->dev);
2060*bfec6d7fSVivian Wang 
2061*bfec6d7fSVivian Wang 	ret = devm_register_netdev(dev, ndev);
2062*bfec6d7fSVivian Wang 	if (ret) {
2063*bfec6d7fSVivian Wang 		dev_err(dev, "devm_register_netdev failed\n");
2064*bfec6d7fSVivian Wang 		goto err_timer_delete;
2065*bfec6d7fSVivian Wang 	}
2066*bfec6d7fSVivian Wang 
2067*bfec6d7fSVivian Wang 	netif_napi_add(ndev, &priv->napi, emac_rx_poll);
2068*bfec6d7fSVivian Wang 	netif_carrier_off(ndev);
2069*bfec6d7fSVivian Wang 
2070*bfec6d7fSVivian Wang 	return 0;
2071*bfec6d7fSVivian Wang 
2072*bfec6d7fSVivian Wang err_timer_delete:
2073*bfec6d7fSVivian Wang 	timer_delete_sync(&priv->txtimer);
2074*bfec6d7fSVivian Wang 	timer_delete_sync(&priv->stats_timer);
2075*bfec6d7fSVivian Wang 
2076*bfec6d7fSVivian Wang 	return ret;
2077*bfec6d7fSVivian Wang }
2078*bfec6d7fSVivian Wang 
2079*bfec6d7fSVivian Wang static void emac_remove(struct platform_device *pdev)
2080*bfec6d7fSVivian Wang {
2081*bfec6d7fSVivian Wang 	struct emac_priv *priv = platform_get_drvdata(pdev);
2082*bfec6d7fSVivian Wang 
2083*bfec6d7fSVivian Wang 	timer_shutdown_sync(&priv->txtimer);
2084*bfec6d7fSVivian Wang 	cancel_work_sync(&priv->tx_timeout_task);
2085*bfec6d7fSVivian Wang 
2086*bfec6d7fSVivian Wang 	timer_shutdown_sync(&priv->stats_timer);
2087*bfec6d7fSVivian Wang 
2088*bfec6d7fSVivian Wang 	emac_reset_hw(priv);
2089*bfec6d7fSVivian Wang }
2090*bfec6d7fSVivian Wang 
2091*bfec6d7fSVivian Wang static int emac_resume(struct device *dev)
2092*bfec6d7fSVivian Wang {
2093*bfec6d7fSVivian Wang 	struct emac_priv *priv = dev_get_drvdata(dev);
2094*bfec6d7fSVivian Wang 	struct net_device *ndev = priv->ndev;
2095*bfec6d7fSVivian Wang 	int ret;
2096*bfec6d7fSVivian Wang 
2097*bfec6d7fSVivian Wang 	ret = clk_prepare_enable(priv->bus_clk);
2098*bfec6d7fSVivian Wang 	if (ret < 0) {
2099*bfec6d7fSVivian Wang 		dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2100*bfec6d7fSVivian Wang 		return ret;
2101*bfec6d7fSVivian Wang 	}
2102*bfec6d7fSVivian Wang 
2103*bfec6d7fSVivian Wang 	if (!netif_running(ndev))
2104*bfec6d7fSVivian Wang 		return 0;
2105*bfec6d7fSVivian Wang 
2106*bfec6d7fSVivian Wang 	ret = emac_open(ndev);
2107*bfec6d7fSVivian Wang 	if (ret) {
2108*bfec6d7fSVivian Wang 		clk_disable_unprepare(priv->bus_clk);
2109*bfec6d7fSVivian Wang 		return ret;
2110*bfec6d7fSVivian Wang 	}
2111*bfec6d7fSVivian Wang 
2112*bfec6d7fSVivian Wang 	netif_device_attach(ndev);
2113*bfec6d7fSVivian Wang 
2114*bfec6d7fSVivian Wang 	emac_stats_timer(&priv->stats_timer);
2115*bfec6d7fSVivian Wang 
2116*bfec6d7fSVivian Wang 	return 0;
2117*bfec6d7fSVivian Wang }
2118*bfec6d7fSVivian Wang 
2119*bfec6d7fSVivian Wang static int emac_suspend(struct device *dev)
2120*bfec6d7fSVivian Wang {
2121*bfec6d7fSVivian Wang 	struct emac_priv *priv = dev_get_drvdata(dev);
2122*bfec6d7fSVivian Wang 	struct net_device *ndev = priv->ndev;
2123*bfec6d7fSVivian Wang 
2124*bfec6d7fSVivian Wang 	if (!ndev || !netif_running(ndev)) {
2125*bfec6d7fSVivian Wang 		clk_disable_unprepare(priv->bus_clk);
2126*bfec6d7fSVivian Wang 		return 0;
2127*bfec6d7fSVivian Wang 	}
2128*bfec6d7fSVivian Wang 
2129*bfec6d7fSVivian Wang 	emac_stop(ndev);
2130*bfec6d7fSVivian Wang 
2131*bfec6d7fSVivian Wang 	clk_disable_unprepare(priv->bus_clk);
2132*bfec6d7fSVivian Wang 	netif_device_detach(ndev);
2133*bfec6d7fSVivian Wang 	return 0;
2134*bfec6d7fSVivian Wang }
2135*bfec6d7fSVivian Wang 
2136*bfec6d7fSVivian Wang static const struct dev_pm_ops emac_pm_ops = {
2137*bfec6d7fSVivian Wang 	SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2138*bfec6d7fSVivian Wang };
2139*bfec6d7fSVivian Wang 
2140*bfec6d7fSVivian Wang static const struct of_device_id emac_of_match[] = {
2141*bfec6d7fSVivian Wang 	{ .compatible = "spacemit,k1-emac" },
2142*bfec6d7fSVivian Wang 	{ /* sentinel */ },
2143*bfec6d7fSVivian Wang };
2144*bfec6d7fSVivian Wang MODULE_DEVICE_TABLE(of, emac_of_match);
2145*bfec6d7fSVivian Wang 
2146*bfec6d7fSVivian Wang static struct platform_driver emac_driver = {
2147*bfec6d7fSVivian Wang 	.probe = emac_probe,
2148*bfec6d7fSVivian Wang 	.remove = emac_remove,
2149*bfec6d7fSVivian Wang 	.driver = {
2150*bfec6d7fSVivian Wang 		.name = DRIVER_NAME,
2151*bfec6d7fSVivian Wang 		.of_match_table = of_match_ptr(emac_of_match),
2152*bfec6d7fSVivian Wang 		.pm = &emac_pm_ops,
2153*bfec6d7fSVivian Wang 	},
2154*bfec6d7fSVivian Wang };
2155*bfec6d7fSVivian Wang module_platform_driver(emac_driver);
2156*bfec6d7fSVivian Wang 
2157*bfec6d7fSVivian Wang MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2158*bfec6d7fSVivian Wang MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2159*bfec6d7fSVivian Wang MODULE_LICENSE("GPL");
2160