1bfec6d7fSVivian Wang // SPDX-License-Identifier: GPL-2.0
2bfec6d7fSVivian Wang /*
3bfec6d7fSVivian Wang * SpacemiT K1 Ethernet driver
4bfec6d7fSVivian Wang *
5bfec6d7fSVivian Wang * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
6bfec6d7fSVivian Wang * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
7bfec6d7fSVivian Wang */
8bfec6d7fSVivian Wang
9bfec6d7fSVivian Wang #include <linux/bitfield.h>
10bfec6d7fSVivian Wang #include <linux/clk.h>
11bfec6d7fSVivian Wang #include <linux/delay.h>
12bfec6d7fSVivian Wang #include <linux/dma-mapping.h>
13bfec6d7fSVivian Wang #include <linux/etherdevice.h>
14bfec6d7fSVivian Wang #include <linux/ethtool.h>
15bfec6d7fSVivian Wang #include <linux/interrupt.h>
16bfec6d7fSVivian Wang #include <linux/io.h>
17bfec6d7fSVivian Wang #include <linux/iopoll.h>
18bfec6d7fSVivian Wang #include <linux/kernel.h>
19bfec6d7fSVivian Wang #include <linux/mfd/syscon.h>
20bfec6d7fSVivian Wang #include <linux/module.h>
21bfec6d7fSVivian Wang #include <linux/of.h>
22bfec6d7fSVivian Wang #include <linux/of_irq.h>
23bfec6d7fSVivian Wang #include <linux/of_mdio.h>
24bfec6d7fSVivian Wang #include <linux/of_net.h>
25bfec6d7fSVivian Wang #include <linux/phy.h>
26bfec6d7fSVivian Wang #include <linux/platform_device.h>
27bfec6d7fSVivian Wang #include <linux/pm_runtime.h>
28bfec6d7fSVivian Wang #include <linux/pm.h>
29bfec6d7fSVivian Wang #include <linux/regmap.h>
30bfec6d7fSVivian Wang #include <linux/reset.h>
31bfec6d7fSVivian Wang #include <linux/rtnetlink.h>
32bfec6d7fSVivian Wang #include <linux/timer.h>
33bfec6d7fSVivian Wang #include <linux/types.h>
34bfec6d7fSVivian Wang
35bfec6d7fSVivian Wang #include "k1_emac.h"
36bfec6d7fSVivian Wang
37bfec6d7fSVivian Wang #define DRIVER_NAME "k1_emac"
38bfec6d7fSVivian Wang
39bfec6d7fSVivian Wang #define EMAC_DEFAULT_BUFSIZE 1536
40bfec6d7fSVivian Wang #define EMAC_RX_BUF_2K 2048
41bfec6d7fSVivian Wang #define EMAC_RX_BUF_4K 4096
42bfec6d7fSVivian Wang
43bfec6d7fSVivian Wang /* Tuning parameters from SpacemiT */
44bfec6d7fSVivian Wang #define EMAC_TX_FRAMES 64
45bfec6d7fSVivian Wang #define EMAC_TX_COAL_TIMEOUT 40000
46bfec6d7fSVivian Wang #define EMAC_RX_FRAMES 64
47bfec6d7fSVivian Wang #define EMAC_RX_COAL_TIMEOUT (600 * 312)
48bfec6d7fSVivian Wang
49bfec6d7fSVivian Wang #define DEFAULT_FC_PAUSE_TIME 0xffff
50bfec6d7fSVivian Wang #define DEFAULT_FC_FIFO_HIGH 1600
51bfec6d7fSVivian Wang #define DEFAULT_TX_ALMOST_FULL 0x1f8
52bfec6d7fSVivian Wang #define DEFAULT_TX_THRESHOLD 1518
53bfec6d7fSVivian Wang #define DEFAULT_RX_THRESHOLD 12
54bfec6d7fSVivian Wang #define DEFAULT_TX_RING_NUM 1024
55bfec6d7fSVivian Wang #define DEFAULT_RX_RING_NUM 1024
56bfec6d7fSVivian Wang #define DEFAULT_DMA_BURST MREGBIT_BURST_16WORD
57bfec6d7fSVivian Wang #define HASH_TABLE_SIZE 64
58bfec6d7fSVivian Wang
59bfec6d7fSVivian Wang struct desc_buf {
60bfec6d7fSVivian Wang u64 dma_addr;
61bfec6d7fSVivian Wang void *buff_addr;
62bfec6d7fSVivian Wang u16 dma_len;
63bfec6d7fSVivian Wang u8 map_as_page;
64bfec6d7fSVivian Wang };
65bfec6d7fSVivian Wang
66bfec6d7fSVivian Wang struct emac_tx_desc_buffer {
67bfec6d7fSVivian Wang struct sk_buff *skb;
68bfec6d7fSVivian Wang struct desc_buf buf[2];
69bfec6d7fSVivian Wang };
70bfec6d7fSVivian Wang
71bfec6d7fSVivian Wang struct emac_rx_desc_buffer {
72bfec6d7fSVivian Wang struct sk_buff *skb;
73bfec6d7fSVivian Wang u64 dma_addr;
74bfec6d7fSVivian Wang void *buff_addr;
75bfec6d7fSVivian Wang u16 dma_len;
76bfec6d7fSVivian Wang u8 map_as_page;
77bfec6d7fSVivian Wang };
78bfec6d7fSVivian Wang
79bfec6d7fSVivian Wang /**
80bfec6d7fSVivian Wang * struct emac_desc_ring - Software-side information for one descriptor ring
81bfec6d7fSVivian Wang * Same structure used for both RX and TX
82bfec6d7fSVivian Wang * @desc_addr: Virtual address to the descriptor ring memory
83bfec6d7fSVivian Wang * @desc_dma_addr: DMA address of the descriptor ring
84bfec6d7fSVivian Wang * @total_size: Size of ring in bytes
85bfec6d7fSVivian Wang * @total_cnt: Number of descriptors
86bfec6d7fSVivian Wang * @head: Next descriptor to associate a buffer with
87bfec6d7fSVivian Wang * @tail: Next descriptor to check status bit
88bfec6d7fSVivian Wang * @rx_desc_buf: Array of descriptors for RX
89bfec6d7fSVivian Wang * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
90bfec6d7fSVivian Wang */
91bfec6d7fSVivian Wang struct emac_desc_ring {
92bfec6d7fSVivian Wang void *desc_addr;
93bfec6d7fSVivian Wang dma_addr_t desc_dma_addr;
94bfec6d7fSVivian Wang u32 total_size;
95bfec6d7fSVivian Wang u32 total_cnt;
96bfec6d7fSVivian Wang u32 head;
97bfec6d7fSVivian Wang u32 tail;
98bfec6d7fSVivian Wang union {
99bfec6d7fSVivian Wang struct emac_rx_desc_buffer *rx_desc_buf;
100bfec6d7fSVivian Wang struct emac_tx_desc_buffer *tx_desc_buf;
101bfec6d7fSVivian Wang };
102bfec6d7fSVivian Wang };
103bfec6d7fSVivian Wang
104bfec6d7fSVivian Wang struct emac_priv {
105bfec6d7fSVivian Wang void __iomem *iobase;
106bfec6d7fSVivian Wang u32 dma_buf_sz;
107bfec6d7fSVivian Wang struct emac_desc_ring tx_ring;
108bfec6d7fSVivian Wang struct emac_desc_ring rx_ring;
109bfec6d7fSVivian Wang
110bfec6d7fSVivian Wang struct net_device *ndev;
111bfec6d7fSVivian Wang struct napi_struct napi;
112bfec6d7fSVivian Wang struct platform_device *pdev;
113bfec6d7fSVivian Wang struct clk *bus_clk;
114bfec6d7fSVivian Wang struct clk *ref_clk;
115bfec6d7fSVivian Wang struct regmap *regmap_apmu;
116bfec6d7fSVivian Wang u32 regmap_apmu_offset;
117bfec6d7fSVivian Wang int irq;
118bfec6d7fSVivian Wang
119bfec6d7fSVivian Wang phy_interface_t phy_interface;
120bfec6d7fSVivian Wang
121bfec6d7fSVivian Wang union emac_hw_tx_stats tx_stats, tx_stats_off;
122bfec6d7fSVivian Wang union emac_hw_rx_stats rx_stats, rx_stats_off;
123bfec6d7fSVivian Wang
124bfec6d7fSVivian Wang u32 tx_count_frames;
125bfec6d7fSVivian Wang u32 tx_coal_frames;
126bfec6d7fSVivian Wang u32 tx_coal_timeout;
127bfec6d7fSVivian Wang struct work_struct tx_timeout_task;
128bfec6d7fSVivian Wang
129bfec6d7fSVivian Wang struct timer_list txtimer;
130bfec6d7fSVivian Wang struct timer_list stats_timer;
131bfec6d7fSVivian Wang
132bfec6d7fSVivian Wang u32 tx_delay;
133bfec6d7fSVivian Wang u32 rx_delay;
134bfec6d7fSVivian Wang
135bfec6d7fSVivian Wang bool flow_control_autoneg;
136bfec6d7fSVivian Wang u8 flow_control;
137bfec6d7fSVivian Wang
13835626012SVivian Wang /* Softirq-safe, hold while touching hardware statistics */
139bfec6d7fSVivian Wang spinlock_t stats_lock;
140bfec6d7fSVivian Wang };
141bfec6d7fSVivian Wang
emac_wr(struct emac_priv * priv,u32 reg,u32 val)142bfec6d7fSVivian Wang static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
143bfec6d7fSVivian Wang {
144bfec6d7fSVivian Wang writel(val, priv->iobase + reg);
145bfec6d7fSVivian Wang }
146bfec6d7fSVivian Wang
emac_rd(struct emac_priv * priv,u32 reg)147bfec6d7fSVivian Wang static u32 emac_rd(struct emac_priv *priv, u32 reg)
148bfec6d7fSVivian Wang {
149bfec6d7fSVivian Wang return readl(priv->iobase + reg);
150bfec6d7fSVivian Wang }
151bfec6d7fSVivian Wang
emac_phy_interface_config(struct emac_priv * priv)152bfec6d7fSVivian Wang static int emac_phy_interface_config(struct emac_priv *priv)
153bfec6d7fSVivian Wang {
154bfec6d7fSVivian Wang u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
155bfec6d7fSVivian Wang
156bfec6d7fSVivian Wang if (phy_interface_mode_is_rgmii(priv->phy_interface))
157bfec6d7fSVivian Wang val |= PHY_INTF_RGMII;
158bfec6d7fSVivian Wang
159bfec6d7fSVivian Wang regmap_update_bits(priv->regmap_apmu,
160bfec6d7fSVivian Wang priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
161bfec6d7fSVivian Wang mask, val);
162bfec6d7fSVivian Wang
163bfec6d7fSVivian Wang return 0;
164bfec6d7fSVivian Wang }
165bfec6d7fSVivian Wang
166bfec6d7fSVivian Wang /*
167bfec6d7fSVivian Wang * Where the hardware expects a MAC address, it is laid out in this high, med,
168bfec6d7fSVivian Wang * low order in three consecutive registers and in this format.
169bfec6d7fSVivian Wang */
170bfec6d7fSVivian Wang
emac_set_mac_addr_reg(struct emac_priv * priv,const unsigned char * addr,u32 reg)171bfec6d7fSVivian Wang static void emac_set_mac_addr_reg(struct emac_priv *priv,
172bfec6d7fSVivian Wang const unsigned char *addr,
173bfec6d7fSVivian Wang u32 reg)
174bfec6d7fSVivian Wang {
175bfec6d7fSVivian Wang emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
176bfec6d7fSVivian Wang emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
177bfec6d7fSVivian Wang emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
178bfec6d7fSVivian Wang }
179bfec6d7fSVivian Wang
emac_set_mac_addr(struct emac_priv * priv,const unsigned char * addr)180bfec6d7fSVivian Wang static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
181bfec6d7fSVivian Wang {
182bfec6d7fSVivian Wang /* We use only one address, so set the same for flow control as well */
183bfec6d7fSVivian Wang emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
184bfec6d7fSVivian Wang emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
185bfec6d7fSVivian Wang }
186bfec6d7fSVivian Wang
emac_reset_hw(struct emac_priv * priv)187bfec6d7fSVivian Wang static void emac_reset_hw(struct emac_priv *priv)
188bfec6d7fSVivian Wang {
189bfec6d7fSVivian Wang /* Disable all interrupts */
190bfec6d7fSVivian Wang emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
191bfec6d7fSVivian Wang emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
192bfec6d7fSVivian Wang
193bfec6d7fSVivian Wang /* Disable transmit and receive units */
194bfec6d7fSVivian Wang emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
195bfec6d7fSVivian Wang emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
196bfec6d7fSVivian Wang
197bfec6d7fSVivian Wang /* Disable DMA */
198bfec6d7fSVivian Wang emac_wr(priv, DMA_CONTROL, 0x0);
199bfec6d7fSVivian Wang }
200bfec6d7fSVivian Wang
emac_init_hw(struct emac_priv * priv)201bfec6d7fSVivian Wang static void emac_init_hw(struct emac_priv *priv)
202bfec6d7fSVivian Wang {
203bfec6d7fSVivian Wang /* Destination address for 802.3x Ethernet flow control */
204bfec6d7fSVivian Wang u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
205bfec6d7fSVivian Wang
206bfec6d7fSVivian Wang u32 rxirq = 0, dma = 0;
207bfec6d7fSVivian Wang
208bfec6d7fSVivian Wang regmap_set_bits(priv->regmap_apmu,
209bfec6d7fSVivian Wang priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
210bfec6d7fSVivian Wang AXI_SINGLE_ID);
211bfec6d7fSVivian Wang
212bfec6d7fSVivian Wang /* Disable transmit and receive units */
213bfec6d7fSVivian Wang emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
214bfec6d7fSVivian Wang emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
215bfec6d7fSVivian Wang
216bfec6d7fSVivian Wang /* Enable MAC address 1 filtering */
217bfec6d7fSVivian Wang emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
218bfec6d7fSVivian Wang
219bfec6d7fSVivian Wang /* Zero initialize the multicast hash table */
220bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
221bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
222bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
223bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
224bfec6d7fSVivian Wang
225bfec6d7fSVivian Wang /* Configure thresholds */
226bfec6d7fSVivian Wang emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
227bfec6d7fSVivian Wang emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
228bfec6d7fSVivian Wang DEFAULT_TX_THRESHOLD);
229bfec6d7fSVivian Wang emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
230bfec6d7fSVivian Wang
231bfec6d7fSVivian Wang /* Configure flow control (enabled in emac_adjust_link() later) */
232bfec6d7fSVivian Wang emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
233bfec6d7fSVivian Wang emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
234bfec6d7fSVivian Wang emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
235bfec6d7fSVivian Wang emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
236bfec6d7fSVivian Wang
237bfec6d7fSVivian Wang /* RX IRQ mitigation */
238bfec6d7fSVivian Wang rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
239bfec6d7fSVivian Wang EMAC_RX_FRAMES);
240bfec6d7fSVivian Wang rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
241bfec6d7fSVivian Wang EMAC_RX_COAL_TIMEOUT);
242bfec6d7fSVivian Wang rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
243bfec6d7fSVivian Wang emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
244bfec6d7fSVivian Wang
245bfec6d7fSVivian Wang /* Disable and set DMA config */
246bfec6d7fSVivian Wang emac_wr(priv, DMA_CONTROL, 0x0);
247bfec6d7fSVivian Wang
248bfec6d7fSVivian Wang emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
249bfec6d7fSVivian Wang usleep_range(9000, 10000);
250bfec6d7fSVivian Wang emac_wr(priv, DMA_CONFIGURATION, 0x0);
251bfec6d7fSVivian Wang usleep_range(9000, 10000);
252bfec6d7fSVivian Wang
253bfec6d7fSVivian Wang dma |= MREGBIT_STRICT_BURST;
254bfec6d7fSVivian Wang dma |= MREGBIT_DMA_64BIT_MODE;
255bfec6d7fSVivian Wang dma |= DEFAULT_DMA_BURST;
256bfec6d7fSVivian Wang
257bfec6d7fSVivian Wang emac_wr(priv, DMA_CONFIGURATION, dma);
258bfec6d7fSVivian Wang }
259bfec6d7fSVivian Wang
emac_dma_start_transmit(struct emac_priv * priv)260bfec6d7fSVivian Wang static void emac_dma_start_transmit(struct emac_priv *priv)
261bfec6d7fSVivian Wang {
262bfec6d7fSVivian Wang /* The actual value written does not matter */
263bfec6d7fSVivian Wang emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
264bfec6d7fSVivian Wang }
265bfec6d7fSVivian Wang
emac_enable_interrupt(struct emac_priv * priv)266bfec6d7fSVivian Wang static void emac_enable_interrupt(struct emac_priv *priv)
267bfec6d7fSVivian Wang {
268bfec6d7fSVivian Wang u32 val;
269bfec6d7fSVivian Wang
270bfec6d7fSVivian Wang val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
271bfec6d7fSVivian Wang val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
272bfec6d7fSVivian Wang val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
273bfec6d7fSVivian Wang emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
274bfec6d7fSVivian Wang }
275bfec6d7fSVivian Wang
emac_disable_interrupt(struct emac_priv * priv)276bfec6d7fSVivian Wang static void emac_disable_interrupt(struct emac_priv *priv)
277bfec6d7fSVivian Wang {
278bfec6d7fSVivian Wang u32 val;
279bfec6d7fSVivian Wang
280bfec6d7fSVivian Wang val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
281bfec6d7fSVivian Wang val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
282bfec6d7fSVivian Wang val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
283bfec6d7fSVivian Wang emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
284bfec6d7fSVivian Wang }
285bfec6d7fSVivian Wang
emac_tx_avail(struct emac_priv * priv)286bfec6d7fSVivian Wang static u32 emac_tx_avail(struct emac_priv *priv)
287bfec6d7fSVivian Wang {
288bfec6d7fSVivian Wang struct emac_desc_ring *tx_ring = &priv->tx_ring;
289bfec6d7fSVivian Wang u32 avail;
290bfec6d7fSVivian Wang
291bfec6d7fSVivian Wang if (tx_ring->tail > tx_ring->head)
292bfec6d7fSVivian Wang avail = tx_ring->tail - tx_ring->head - 1;
293bfec6d7fSVivian Wang else
294bfec6d7fSVivian Wang avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
295bfec6d7fSVivian Wang
296bfec6d7fSVivian Wang return avail;
297bfec6d7fSVivian Wang }
298bfec6d7fSVivian Wang
emac_tx_coal_timer_resched(struct emac_priv * priv)299bfec6d7fSVivian Wang static void emac_tx_coal_timer_resched(struct emac_priv *priv)
300bfec6d7fSVivian Wang {
301bfec6d7fSVivian Wang mod_timer(&priv->txtimer,
302bfec6d7fSVivian Wang jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
303bfec6d7fSVivian Wang }
304bfec6d7fSVivian Wang
emac_tx_coal_timer(struct timer_list * t)305bfec6d7fSVivian Wang static void emac_tx_coal_timer(struct timer_list *t)
306bfec6d7fSVivian Wang {
307bfec6d7fSVivian Wang struct emac_priv *priv = timer_container_of(priv, t, txtimer);
308bfec6d7fSVivian Wang
309bfec6d7fSVivian Wang napi_schedule(&priv->napi);
310bfec6d7fSVivian Wang }
311bfec6d7fSVivian Wang
emac_tx_should_interrupt(struct emac_priv * priv,u32 pkt_num)312bfec6d7fSVivian Wang static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
313bfec6d7fSVivian Wang {
314bfec6d7fSVivian Wang priv->tx_count_frames += pkt_num;
315bfec6d7fSVivian Wang if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
316bfec6d7fSVivian Wang emac_tx_coal_timer_resched(priv);
317bfec6d7fSVivian Wang return false;
318bfec6d7fSVivian Wang }
319bfec6d7fSVivian Wang
320bfec6d7fSVivian Wang priv->tx_count_frames = 0;
321bfec6d7fSVivian Wang return true;
322bfec6d7fSVivian Wang }
323bfec6d7fSVivian Wang
emac_free_tx_buf(struct emac_priv * priv,int i)324bfec6d7fSVivian Wang static void emac_free_tx_buf(struct emac_priv *priv, int i)
325bfec6d7fSVivian Wang {
326bfec6d7fSVivian Wang struct emac_tx_desc_buffer *tx_buf;
327bfec6d7fSVivian Wang struct emac_desc_ring *tx_ring;
328bfec6d7fSVivian Wang struct desc_buf *buf;
329bfec6d7fSVivian Wang int j;
330bfec6d7fSVivian Wang
331bfec6d7fSVivian Wang tx_ring = &priv->tx_ring;
332bfec6d7fSVivian Wang tx_buf = &tx_ring->tx_desc_buf[i];
333bfec6d7fSVivian Wang
334bfec6d7fSVivian Wang for (j = 0; j < 2; j++) {
335bfec6d7fSVivian Wang buf = &tx_buf->buf[j];
336bfec6d7fSVivian Wang if (!buf->dma_addr)
337bfec6d7fSVivian Wang continue;
338bfec6d7fSVivian Wang
339bfec6d7fSVivian Wang if (buf->map_as_page)
340bfec6d7fSVivian Wang dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
341bfec6d7fSVivian Wang buf->dma_len, DMA_TO_DEVICE);
342bfec6d7fSVivian Wang else
343bfec6d7fSVivian Wang dma_unmap_single(&priv->pdev->dev,
344bfec6d7fSVivian Wang buf->dma_addr, buf->dma_len,
345bfec6d7fSVivian Wang DMA_TO_DEVICE);
346bfec6d7fSVivian Wang
347bfec6d7fSVivian Wang buf->dma_addr = 0;
348bfec6d7fSVivian Wang buf->map_as_page = false;
349bfec6d7fSVivian Wang buf->buff_addr = NULL;
350bfec6d7fSVivian Wang }
351bfec6d7fSVivian Wang
352bfec6d7fSVivian Wang if (tx_buf->skb) {
353bfec6d7fSVivian Wang dev_kfree_skb_any(tx_buf->skb);
354bfec6d7fSVivian Wang tx_buf->skb = NULL;
355bfec6d7fSVivian Wang }
356bfec6d7fSVivian Wang }
357bfec6d7fSVivian Wang
emac_clean_tx_desc_ring(struct emac_priv * priv)358bfec6d7fSVivian Wang static void emac_clean_tx_desc_ring(struct emac_priv *priv)
359bfec6d7fSVivian Wang {
360bfec6d7fSVivian Wang struct emac_desc_ring *tx_ring = &priv->tx_ring;
361bfec6d7fSVivian Wang u32 i;
362bfec6d7fSVivian Wang
363bfec6d7fSVivian Wang for (i = 0; i < tx_ring->total_cnt; i++)
364bfec6d7fSVivian Wang emac_free_tx_buf(priv, i);
365bfec6d7fSVivian Wang
366bfec6d7fSVivian Wang tx_ring->head = 0;
367bfec6d7fSVivian Wang tx_ring->tail = 0;
368bfec6d7fSVivian Wang }
369bfec6d7fSVivian Wang
emac_clean_rx_desc_ring(struct emac_priv * priv)370bfec6d7fSVivian Wang static void emac_clean_rx_desc_ring(struct emac_priv *priv)
371bfec6d7fSVivian Wang {
372bfec6d7fSVivian Wang struct emac_rx_desc_buffer *rx_buf;
373bfec6d7fSVivian Wang struct emac_desc_ring *rx_ring;
374bfec6d7fSVivian Wang u32 i;
375bfec6d7fSVivian Wang
376bfec6d7fSVivian Wang rx_ring = &priv->rx_ring;
377bfec6d7fSVivian Wang
378bfec6d7fSVivian Wang for (i = 0; i < rx_ring->total_cnt; i++) {
379bfec6d7fSVivian Wang rx_buf = &rx_ring->rx_desc_buf[i];
380bfec6d7fSVivian Wang
381bfec6d7fSVivian Wang if (!rx_buf->skb)
382bfec6d7fSVivian Wang continue;
383bfec6d7fSVivian Wang
384bfec6d7fSVivian Wang dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
385bfec6d7fSVivian Wang rx_buf->dma_len, DMA_FROM_DEVICE);
386bfec6d7fSVivian Wang
387bfec6d7fSVivian Wang dev_kfree_skb(rx_buf->skb);
388bfec6d7fSVivian Wang rx_buf->skb = NULL;
389bfec6d7fSVivian Wang }
390bfec6d7fSVivian Wang
391bfec6d7fSVivian Wang rx_ring->tail = 0;
392bfec6d7fSVivian Wang rx_ring->head = 0;
393bfec6d7fSVivian Wang }
394bfec6d7fSVivian Wang
emac_alloc_tx_resources(struct emac_priv * priv)395bfec6d7fSVivian Wang static int emac_alloc_tx_resources(struct emac_priv *priv)
396bfec6d7fSVivian Wang {
397bfec6d7fSVivian Wang struct emac_desc_ring *tx_ring = &priv->tx_ring;
398bfec6d7fSVivian Wang struct platform_device *pdev = priv->pdev;
399bfec6d7fSVivian Wang
400bfec6d7fSVivian Wang tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
401bfec6d7fSVivian Wang sizeof(*tx_ring->tx_desc_buf),
402bfec6d7fSVivian Wang GFP_KERNEL);
403bfec6d7fSVivian Wang
404bfec6d7fSVivian Wang if (!tx_ring->tx_desc_buf)
405bfec6d7fSVivian Wang return -ENOMEM;
406bfec6d7fSVivian Wang
407bfec6d7fSVivian Wang tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
408bfec6d7fSVivian Wang tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
409bfec6d7fSVivian Wang
410bfec6d7fSVivian Wang tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
411bfec6d7fSVivian Wang &tx_ring->desc_dma_addr,
412bfec6d7fSVivian Wang GFP_KERNEL);
413bfec6d7fSVivian Wang if (!tx_ring->desc_addr) {
414bfec6d7fSVivian Wang kfree(tx_ring->tx_desc_buf);
415bfec6d7fSVivian Wang return -ENOMEM;
416bfec6d7fSVivian Wang }
417bfec6d7fSVivian Wang
418bfec6d7fSVivian Wang tx_ring->head = 0;
419bfec6d7fSVivian Wang tx_ring->tail = 0;
420bfec6d7fSVivian Wang
421bfec6d7fSVivian Wang return 0;
422bfec6d7fSVivian Wang }
423bfec6d7fSVivian Wang
emac_alloc_rx_resources(struct emac_priv * priv)424bfec6d7fSVivian Wang static int emac_alloc_rx_resources(struct emac_priv *priv)
425bfec6d7fSVivian Wang {
426bfec6d7fSVivian Wang struct emac_desc_ring *rx_ring = &priv->rx_ring;
427bfec6d7fSVivian Wang struct platform_device *pdev = priv->pdev;
428bfec6d7fSVivian Wang
429bfec6d7fSVivian Wang rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
430bfec6d7fSVivian Wang sizeof(*rx_ring->rx_desc_buf),
431bfec6d7fSVivian Wang GFP_KERNEL);
432bfec6d7fSVivian Wang if (!rx_ring->rx_desc_buf)
433bfec6d7fSVivian Wang return -ENOMEM;
434bfec6d7fSVivian Wang
435bfec6d7fSVivian Wang rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
436bfec6d7fSVivian Wang
437bfec6d7fSVivian Wang rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
438bfec6d7fSVivian Wang
439bfec6d7fSVivian Wang rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
440bfec6d7fSVivian Wang &rx_ring->desc_dma_addr,
441bfec6d7fSVivian Wang GFP_KERNEL);
442bfec6d7fSVivian Wang if (!rx_ring->desc_addr) {
443bfec6d7fSVivian Wang kfree(rx_ring->rx_desc_buf);
444bfec6d7fSVivian Wang return -ENOMEM;
445bfec6d7fSVivian Wang }
446bfec6d7fSVivian Wang
447bfec6d7fSVivian Wang rx_ring->head = 0;
448bfec6d7fSVivian Wang rx_ring->tail = 0;
449bfec6d7fSVivian Wang
450bfec6d7fSVivian Wang return 0;
451bfec6d7fSVivian Wang }
452bfec6d7fSVivian Wang
emac_free_tx_resources(struct emac_priv * priv)453bfec6d7fSVivian Wang static void emac_free_tx_resources(struct emac_priv *priv)
454bfec6d7fSVivian Wang {
455bfec6d7fSVivian Wang struct emac_desc_ring *tr = &priv->tx_ring;
456bfec6d7fSVivian Wang struct device *dev = &priv->pdev->dev;
457bfec6d7fSVivian Wang
458bfec6d7fSVivian Wang emac_clean_tx_desc_ring(priv);
459bfec6d7fSVivian Wang
460bfec6d7fSVivian Wang kfree(tr->tx_desc_buf);
461bfec6d7fSVivian Wang tr->tx_desc_buf = NULL;
462bfec6d7fSVivian Wang
463bfec6d7fSVivian Wang dma_free_coherent(dev, tr->total_size, tr->desc_addr,
464bfec6d7fSVivian Wang tr->desc_dma_addr);
465bfec6d7fSVivian Wang tr->desc_addr = NULL;
466bfec6d7fSVivian Wang }
467bfec6d7fSVivian Wang
emac_free_rx_resources(struct emac_priv * priv)468bfec6d7fSVivian Wang static void emac_free_rx_resources(struct emac_priv *priv)
469bfec6d7fSVivian Wang {
470bfec6d7fSVivian Wang struct emac_desc_ring *rr = &priv->rx_ring;
471bfec6d7fSVivian Wang struct device *dev = &priv->pdev->dev;
472bfec6d7fSVivian Wang
473bfec6d7fSVivian Wang emac_clean_rx_desc_ring(priv);
474bfec6d7fSVivian Wang
475bfec6d7fSVivian Wang kfree(rr->rx_desc_buf);
476bfec6d7fSVivian Wang rr->rx_desc_buf = NULL;
477bfec6d7fSVivian Wang
478bfec6d7fSVivian Wang dma_free_coherent(dev, rr->total_size, rr->desc_addr,
479bfec6d7fSVivian Wang rr->desc_dma_addr);
480bfec6d7fSVivian Wang rr->desc_addr = NULL;
481bfec6d7fSVivian Wang }
482bfec6d7fSVivian Wang
emac_tx_clean_desc(struct emac_priv * priv)483bfec6d7fSVivian Wang static int emac_tx_clean_desc(struct emac_priv *priv)
484bfec6d7fSVivian Wang {
485bfec6d7fSVivian Wang struct net_device *ndev = priv->ndev;
486bfec6d7fSVivian Wang struct emac_desc_ring *tx_ring;
487bfec6d7fSVivian Wang struct emac_desc *tx_desc;
488bfec6d7fSVivian Wang u32 i;
489bfec6d7fSVivian Wang
490bfec6d7fSVivian Wang netif_tx_lock(ndev);
491bfec6d7fSVivian Wang
492bfec6d7fSVivian Wang tx_ring = &priv->tx_ring;
493bfec6d7fSVivian Wang
494bfec6d7fSVivian Wang i = tx_ring->tail;
495bfec6d7fSVivian Wang
496bfec6d7fSVivian Wang while (i != tx_ring->head) {
497bfec6d7fSVivian Wang tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
498bfec6d7fSVivian Wang
499bfec6d7fSVivian Wang /* Stop checking if desc still own by DMA */
500bfec6d7fSVivian Wang if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
501bfec6d7fSVivian Wang break;
502bfec6d7fSVivian Wang
503bfec6d7fSVivian Wang emac_free_tx_buf(priv, i);
504bfec6d7fSVivian Wang memset(tx_desc, 0, sizeof(struct emac_desc));
505bfec6d7fSVivian Wang
506bfec6d7fSVivian Wang if (++i == tx_ring->total_cnt)
507bfec6d7fSVivian Wang i = 0;
508bfec6d7fSVivian Wang }
509bfec6d7fSVivian Wang
510bfec6d7fSVivian Wang tx_ring->tail = i;
511bfec6d7fSVivian Wang
512bfec6d7fSVivian Wang if (unlikely(netif_queue_stopped(ndev) &&
513bfec6d7fSVivian Wang emac_tx_avail(priv) > tx_ring->total_cnt / 4))
514bfec6d7fSVivian Wang netif_wake_queue(ndev);
515bfec6d7fSVivian Wang
516bfec6d7fSVivian Wang netif_tx_unlock(ndev);
517bfec6d7fSVivian Wang
518bfec6d7fSVivian Wang return 0;
519bfec6d7fSVivian Wang }
520bfec6d7fSVivian Wang
emac_rx_frame_good(struct emac_priv * priv,struct emac_desc * desc)521bfec6d7fSVivian Wang static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
522bfec6d7fSVivian Wang {
523bfec6d7fSVivian Wang const char *msg;
524bfec6d7fSVivian Wang u32 len;
525bfec6d7fSVivian Wang
526bfec6d7fSVivian Wang len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
527bfec6d7fSVivian Wang
528bfec6d7fSVivian Wang if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
529bfec6d7fSVivian Wang msg = "Not last descriptor"; /* This would be a bug */
530bfec6d7fSVivian Wang else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
531bfec6d7fSVivian Wang msg = "Runt frame";
532bfec6d7fSVivian Wang else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
533bfec6d7fSVivian Wang msg = "Frame CRC error";
534bfec6d7fSVivian Wang else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
535bfec6d7fSVivian Wang msg = "Frame exceeds max length";
536bfec6d7fSVivian Wang else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
537bfec6d7fSVivian Wang msg = "Frame jabber error";
538bfec6d7fSVivian Wang else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
539bfec6d7fSVivian Wang msg = "Frame length error";
540bfec6d7fSVivian Wang else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
541bfec6d7fSVivian Wang msg = "Frame length unacceptable";
542bfec6d7fSVivian Wang else
543bfec6d7fSVivian Wang return true; /* All good */
544bfec6d7fSVivian Wang
545bfec6d7fSVivian Wang dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
546bfec6d7fSVivian Wang
547bfec6d7fSVivian Wang return false;
548bfec6d7fSVivian Wang }
549bfec6d7fSVivian Wang
emac_alloc_rx_desc_buffers(struct emac_priv * priv)550bfec6d7fSVivian Wang static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
551bfec6d7fSVivian Wang {
552bfec6d7fSVivian Wang struct emac_desc_ring *rx_ring = &priv->rx_ring;
553bfec6d7fSVivian Wang struct emac_desc rx_desc, *rx_desc_addr;
554bfec6d7fSVivian Wang struct net_device *ndev = priv->ndev;
555bfec6d7fSVivian Wang struct emac_rx_desc_buffer *rx_buf;
556bfec6d7fSVivian Wang struct sk_buff *skb;
557bfec6d7fSVivian Wang u32 i;
558bfec6d7fSVivian Wang
559bfec6d7fSVivian Wang i = rx_ring->head;
560bfec6d7fSVivian Wang rx_buf = &rx_ring->rx_desc_buf[i];
561bfec6d7fSVivian Wang
562bfec6d7fSVivian Wang while (!rx_buf->skb) {
563bfec6d7fSVivian Wang skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
564bfec6d7fSVivian Wang if (!skb)
565bfec6d7fSVivian Wang break;
566bfec6d7fSVivian Wang
567bfec6d7fSVivian Wang skb->dev = ndev;
568bfec6d7fSVivian Wang
569bfec6d7fSVivian Wang rx_buf->skb = skb;
570bfec6d7fSVivian Wang rx_buf->dma_len = priv->dma_buf_sz;
571bfec6d7fSVivian Wang rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
572bfec6d7fSVivian Wang priv->dma_buf_sz,
573bfec6d7fSVivian Wang DMA_FROM_DEVICE);
574bfec6d7fSVivian Wang if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
575bfec6d7fSVivian Wang dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
576bfec6d7fSVivian Wang goto err_free_skb;
577bfec6d7fSVivian Wang }
578bfec6d7fSVivian Wang
579bfec6d7fSVivian Wang rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
580bfec6d7fSVivian Wang
581bfec6d7fSVivian Wang memset(&rx_desc, 0, sizeof(rx_desc));
582bfec6d7fSVivian Wang
583bfec6d7fSVivian Wang rx_desc.buffer_addr_1 = rx_buf->dma_addr;
584bfec6d7fSVivian Wang rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
585bfec6d7fSVivian Wang rx_buf->dma_len);
586bfec6d7fSVivian Wang
587bfec6d7fSVivian Wang if (++i == rx_ring->total_cnt) {
588bfec6d7fSVivian Wang rx_desc.desc1 |= RX_DESC_1_END_RING;
589bfec6d7fSVivian Wang i = 0;
590bfec6d7fSVivian Wang }
591bfec6d7fSVivian Wang
592bfec6d7fSVivian Wang *rx_desc_addr = rx_desc;
593bfec6d7fSVivian Wang dma_wmb();
594bfec6d7fSVivian Wang WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
595bfec6d7fSVivian Wang
596bfec6d7fSVivian Wang rx_buf = &rx_ring->rx_desc_buf[i];
597bfec6d7fSVivian Wang }
598bfec6d7fSVivian Wang
599bfec6d7fSVivian Wang rx_ring->head = i;
600bfec6d7fSVivian Wang return;
601bfec6d7fSVivian Wang
602bfec6d7fSVivian Wang err_free_skb:
603bfec6d7fSVivian Wang dev_kfree_skb_any(skb);
604bfec6d7fSVivian Wang rx_buf->skb = NULL;
605bfec6d7fSVivian Wang }
606bfec6d7fSVivian Wang
607bfec6d7fSVivian Wang /* Returns number of packets received */
emac_rx_clean_desc(struct emac_priv * priv,int budget)608bfec6d7fSVivian Wang static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
609bfec6d7fSVivian Wang {
610bfec6d7fSVivian Wang struct net_device *ndev = priv->ndev;
611bfec6d7fSVivian Wang struct emac_rx_desc_buffer *rx_buf;
612bfec6d7fSVivian Wang struct emac_desc_ring *rx_ring;
613bfec6d7fSVivian Wang struct sk_buff *skb = NULL;
614bfec6d7fSVivian Wang struct emac_desc *rx_desc;
615bfec6d7fSVivian Wang u32 got = 0, skb_len, i;
616bfec6d7fSVivian Wang
617bfec6d7fSVivian Wang rx_ring = &priv->rx_ring;
618bfec6d7fSVivian Wang
619bfec6d7fSVivian Wang i = rx_ring->tail;
620bfec6d7fSVivian Wang
621bfec6d7fSVivian Wang while (budget--) {
622bfec6d7fSVivian Wang rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
623bfec6d7fSVivian Wang
624bfec6d7fSVivian Wang /* Stop checking if rx_desc still owned by DMA */
625bfec6d7fSVivian Wang if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
626bfec6d7fSVivian Wang break;
627bfec6d7fSVivian Wang
628bfec6d7fSVivian Wang dma_rmb();
629bfec6d7fSVivian Wang
630bfec6d7fSVivian Wang rx_buf = &rx_ring->rx_desc_buf[i];
631bfec6d7fSVivian Wang
632bfec6d7fSVivian Wang if (!rx_buf->skb)
633bfec6d7fSVivian Wang break;
634bfec6d7fSVivian Wang
635bfec6d7fSVivian Wang got++;
636bfec6d7fSVivian Wang
637bfec6d7fSVivian Wang dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
638bfec6d7fSVivian Wang rx_buf->dma_len, DMA_FROM_DEVICE);
639bfec6d7fSVivian Wang
640bfec6d7fSVivian Wang if (likely(emac_rx_frame_good(priv, rx_desc))) {
641bfec6d7fSVivian Wang skb = rx_buf->skb;
642bfec6d7fSVivian Wang
643bfec6d7fSVivian Wang skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
644bfec6d7fSVivian Wang rx_desc->desc0);
645bfec6d7fSVivian Wang skb_len -= ETH_FCS_LEN;
646bfec6d7fSVivian Wang
647bfec6d7fSVivian Wang skb_put(skb, skb_len);
648bfec6d7fSVivian Wang skb->dev = ndev;
649bfec6d7fSVivian Wang ndev->hard_header_len = ETH_HLEN;
650bfec6d7fSVivian Wang
651bfec6d7fSVivian Wang skb->protocol = eth_type_trans(skb, ndev);
652bfec6d7fSVivian Wang
653bfec6d7fSVivian Wang skb->ip_summed = CHECKSUM_NONE;
654bfec6d7fSVivian Wang
655bfec6d7fSVivian Wang napi_gro_receive(&priv->napi, skb);
656bfec6d7fSVivian Wang
657bfec6d7fSVivian Wang memset(rx_desc, 0, sizeof(struct emac_desc));
658bfec6d7fSVivian Wang rx_buf->skb = NULL;
659bfec6d7fSVivian Wang } else {
660bfec6d7fSVivian Wang dev_kfree_skb_irq(rx_buf->skb);
661bfec6d7fSVivian Wang rx_buf->skb = NULL;
662bfec6d7fSVivian Wang }
663bfec6d7fSVivian Wang
664bfec6d7fSVivian Wang if (++i == rx_ring->total_cnt)
665bfec6d7fSVivian Wang i = 0;
666bfec6d7fSVivian Wang }
667bfec6d7fSVivian Wang
668bfec6d7fSVivian Wang rx_ring->tail = i;
669bfec6d7fSVivian Wang
670bfec6d7fSVivian Wang emac_alloc_rx_desc_buffers(priv);
671bfec6d7fSVivian Wang
672bfec6d7fSVivian Wang return got;
673bfec6d7fSVivian Wang }
674bfec6d7fSVivian Wang
emac_rx_poll(struct napi_struct * napi,int budget)675bfec6d7fSVivian Wang static int emac_rx_poll(struct napi_struct *napi, int budget)
676bfec6d7fSVivian Wang {
677bfec6d7fSVivian Wang struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
678bfec6d7fSVivian Wang int work_done;
679bfec6d7fSVivian Wang
680bfec6d7fSVivian Wang emac_tx_clean_desc(priv);
681bfec6d7fSVivian Wang
682bfec6d7fSVivian Wang work_done = emac_rx_clean_desc(priv, budget);
683bfec6d7fSVivian Wang if (work_done < budget && napi_complete_done(napi, work_done))
684bfec6d7fSVivian Wang emac_enable_interrupt(priv);
685bfec6d7fSVivian Wang
686bfec6d7fSVivian Wang return work_done;
687bfec6d7fSVivian Wang }
688bfec6d7fSVivian Wang
689bfec6d7fSVivian Wang /*
690bfec6d7fSVivian Wang * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
691bfec6d7fSVivian Wang *
692bfec6d7fSVivian Wang * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
693bfec6d7fSVivian Wang * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
694bfec6d7fSVivian Wang */
695bfec6d7fSVivian Wang
emac_tx_map_frag(struct device * dev,struct emac_desc * tx_desc,struct emac_tx_desc_buffer * tx_buf,struct sk_buff * skb,u32 frag_idx)696bfec6d7fSVivian Wang static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
697bfec6d7fSVivian Wang struct emac_tx_desc_buffer *tx_buf,
698bfec6d7fSVivian Wang struct sk_buff *skb, u32 frag_idx)
699bfec6d7fSVivian Wang {
700bfec6d7fSVivian Wang bool map_as_page, buf_idx;
701bfec6d7fSVivian Wang const skb_frag_t *frag;
702bfec6d7fSVivian Wang phys_addr_t addr;
703bfec6d7fSVivian Wang u32 len;
704bfec6d7fSVivian Wang int ret;
705bfec6d7fSVivian Wang
706bfec6d7fSVivian Wang buf_idx = frag_idx % 2;
707bfec6d7fSVivian Wang
708bfec6d7fSVivian Wang if (frag_idx == 0) {
709bfec6d7fSVivian Wang /* Non-fragmented part */
710bfec6d7fSVivian Wang len = skb_headlen(skb);
711bfec6d7fSVivian Wang addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
712bfec6d7fSVivian Wang map_as_page = false;
713bfec6d7fSVivian Wang } else {
714bfec6d7fSVivian Wang /* Fragment */
715bfec6d7fSVivian Wang frag = &skb_shinfo(skb)->frags[frag_idx - 1];
716bfec6d7fSVivian Wang len = skb_frag_size(frag);
717bfec6d7fSVivian Wang addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
718bfec6d7fSVivian Wang map_as_page = true;
719bfec6d7fSVivian Wang }
720bfec6d7fSVivian Wang
721bfec6d7fSVivian Wang ret = dma_mapping_error(dev, addr);
722bfec6d7fSVivian Wang if (ret)
723bfec6d7fSVivian Wang return ret;
724bfec6d7fSVivian Wang
725bfec6d7fSVivian Wang tx_buf->buf[buf_idx].dma_addr = addr;
726bfec6d7fSVivian Wang tx_buf->buf[buf_idx].dma_len = len;
727bfec6d7fSVivian Wang tx_buf->buf[buf_idx].map_as_page = map_as_page;
728bfec6d7fSVivian Wang
729bfec6d7fSVivian Wang if (buf_idx == 0) {
730bfec6d7fSVivian Wang tx_desc->buffer_addr_1 = addr;
731bfec6d7fSVivian Wang tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
732bfec6d7fSVivian Wang } else {
733bfec6d7fSVivian Wang tx_desc->buffer_addr_2 = addr;
734bfec6d7fSVivian Wang tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
735bfec6d7fSVivian Wang }
736bfec6d7fSVivian Wang
737bfec6d7fSVivian Wang return 0;
738bfec6d7fSVivian Wang }
739bfec6d7fSVivian Wang
emac_tx_mem_map(struct emac_priv * priv,struct sk_buff * skb)740bfec6d7fSVivian Wang static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
741bfec6d7fSVivian Wang {
742bfec6d7fSVivian Wang struct emac_desc_ring *tx_ring = &priv->tx_ring;
743bfec6d7fSVivian Wang struct emac_desc tx_desc, *tx_desc_addr;
744bfec6d7fSVivian Wang struct device *dev = &priv->pdev->dev;
745bfec6d7fSVivian Wang struct emac_tx_desc_buffer *tx_buf;
746bfec6d7fSVivian Wang u32 head, old_head, frag_num, f;
747bfec6d7fSVivian Wang bool buf_idx;
748bfec6d7fSVivian Wang
749bfec6d7fSVivian Wang frag_num = skb_shinfo(skb)->nr_frags;
750bfec6d7fSVivian Wang head = tx_ring->head;
751bfec6d7fSVivian Wang old_head = head;
752bfec6d7fSVivian Wang
753bfec6d7fSVivian Wang for (f = 0; f < frag_num + 1; f++) {
754bfec6d7fSVivian Wang buf_idx = f % 2;
755bfec6d7fSVivian Wang
756bfec6d7fSVivian Wang /*
757bfec6d7fSVivian Wang * If using buffer 1, initialize a new desc. Otherwise, use
758bfec6d7fSVivian Wang * buffer 2 of previous fragment's desc.
759bfec6d7fSVivian Wang */
760bfec6d7fSVivian Wang if (!buf_idx) {
761bfec6d7fSVivian Wang tx_buf = &tx_ring->tx_desc_buf[head];
762bfec6d7fSVivian Wang tx_desc_addr =
763bfec6d7fSVivian Wang &((struct emac_desc *)tx_ring->desc_addr)[head];
764bfec6d7fSVivian Wang memset(&tx_desc, 0, sizeof(tx_desc));
765bfec6d7fSVivian Wang
766bfec6d7fSVivian Wang /*
767bfec6d7fSVivian Wang * Give ownership for all but first desc initially. For
768bfec6d7fSVivian Wang * first desc, give at the end so DMA cannot start
769bfec6d7fSVivian Wang * reading uninitialized descs.
770bfec6d7fSVivian Wang */
771bfec6d7fSVivian Wang if (head != old_head)
772bfec6d7fSVivian Wang tx_desc.desc0 |= TX_DESC_0_OWN;
773bfec6d7fSVivian Wang
774bfec6d7fSVivian Wang if (++head == tx_ring->total_cnt) {
775bfec6d7fSVivian Wang /* Just used last desc in ring */
776bfec6d7fSVivian Wang tx_desc.desc1 |= TX_DESC_1_END_RING;
777bfec6d7fSVivian Wang head = 0;
778bfec6d7fSVivian Wang }
779bfec6d7fSVivian Wang }
780bfec6d7fSVivian Wang
781bfec6d7fSVivian Wang if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
782bfec6d7fSVivian Wang dev_err_ratelimited(&priv->ndev->dev,
783bfec6d7fSVivian Wang "Map TX frag %d failed\n", f);
784bfec6d7fSVivian Wang goto err_free_skb;
785bfec6d7fSVivian Wang }
786bfec6d7fSVivian Wang
787bfec6d7fSVivian Wang if (f == 0)
788bfec6d7fSVivian Wang tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
789bfec6d7fSVivian Wang
790bfec6d7fSVivian Wang if (f == frag_num) {
791bfec6d7fSVivian Wang tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
792bfec6d7fSVivian Wang tx_buf->skb = skb;
793bfec6d7fSVivian Wang if (emac_tx_should_interrupt(priv, frag_num + 1))
794bfec6d7fSVivian Wang tx_desc.desc1 |=
795bfec6d7fSVivian Wang TX_DESC_1_INTERRUPT_ON_COMPLETION;
796bfec6d7fSVivian Wang }
797bfec6d7fSVivian Wang
798bfec6d7fSVivian Wang *tx_desc_addr = tx_desc;
799bfec6d7fSVivian Wang }
800bfec6d7fSVivian Wang
801bfec6d7fSVivian Wang /* All descriptors are ready, give ownership for first desc */
802bfec6d7fSVivian Wang tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
803bfec6d7fSVivian Wang dma_wmb();
804bfec6d7fSVivian Wang WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
805bfec6d7fSVivian Wang
806bfec6d7fSVivian Wang emac_dma_start_transmit(priv);
807bfec6d7fSVivian Wang
808bfec6d7fSVivian Wang tx_ring->head = head;
809bfec6d7fSVivian Wang
810bfec6d7fSVivian Wang return;
811bfec6d7fSVivian Wang
812bfec6d7fSVivian Wang err_free_skb:
813bfec6d7fSVivian Wang dev_dstats_tx_dropped(priv->ndev);
814bfec6d7fSVivian Wang dev_kfree_skb_any(skb);
815bfec6d7fSVivian Wang }
816bfec6d7fSVivian Wang
emac_start_xmit(struct sk_buff * skb,struct net_device * ndev)817bfec6d7fSVivian Wang static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
818bfec6d7fSVivian Wang {
819bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
820bfec6d7fSVivian Wang int nfrags = skb_shinfo(skb)->nr_frags;
821bfec6d7fSVivian Wang struct device *dev = &priv->pdev->dev;
822bfec6d7fSVivian Wang
823bfec6d7fSVivian Wang if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
824bfec6d7fSVivian Wang if (!netif_queue_stopped(ndev)) {
825bfec6d7fSVivian Wang netif_stop_queue(ndev);
826bfec6d7fSVivian Wang dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
827bfec6d7fSVivian Wang }
828bfec6d7fSVivian Wang return NETDEV_TX_BUSY;
829bfec6d7fSVivian Wang }
830bfec6d7fSVivian Wang
831bfec6d7fSVivian Wang emac_tx_mem_map(priv, skb);
832bfec6d7fSVivian Wang
833bfec6d7fSVivian Wang /* Make sure there is space in the ring for the next TX. */
834bfec6d7fSVivian Wang if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
835bfec6d7fSVivian Wang netif_stop_queue(ndev);
836bfec6d7fSVivian Wang
837bfec6d7fSVivian Wang return NETDEV_TX_OK;
838bfec6d7fSVivian Wang }
839bfec6d7fSVivian Wang
emac_set_mac_address(struct net_device * ndev,void * addr)840bfec6d7fSVivian Wang static int emac_set_mac_address(struct net_device *ndev, void *addr)
841bfec6d7fSVivian Wang {
842bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
843bfec6d7fSVivian Wang int ret = eth_mac_addr(ndev, addr);
844bfec6d7fSVivian Wang
845bfec6d7fSVivian Wang if (ret)
846bfec6d7fSVivian Wang return ret;
847bfec6d7fSVivian Wang
848bfec6d7fSVivian Wang /* If running, set now; if not running it will be set in emac_up. */
849bfec6d7fSVivian Wang if (netif_running(ndev))
850bfec6d7fSVivian Wang emac_set_mac_addr(priv, ndev->dev_addr);
851bfec6d7fSVivian Wang
852bfec6d7fSVivian Wang return 0;
853bfec6d7fSVivian Wang }
854bfec6d7fSVivian Wang
emac_mac_multicast_filter_clear(struct emac_priv * priv)855bfec6d7fSVivian Wang static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
856bfec6d7fSVivian Wang {
857bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
858bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
859bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
860bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
861bfec6d7fSVivian Wang }
862bfec6d7fSVivian Wang
863bfec6d7fSVivian Wang /*
864bfec6d7fSVivian Wang * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
865bfec6d7fSVivian Wang * when matching multicast addresses.
866bfec6d7fSVivian Wang */
emac_ether_addr_hash(u8 addr[ETH_ALEN])867bfec6d7fSVivian Wang static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
868bfec6d7fSVivian Wang {
869bfec6d7fSVivian Wang u32 crc32 = ether_crc(ETH_ALEN, addr);
870bfec6d7fSVivian Wang
871bfec6d7fSVivian Wang return crc32 >> 26;
872bfec6d7fSVivian Wang }
873bfec6d7fSVivian Wang
874bfec6d7fSVivian Wang /* Configure Multicast and Promiscuous modes */
emac_set_rx_mode(struct net_device * ndev)875bfec6d7fSVivian Wang static void emac_set_rx_mode(struct net_device *ndev)
876bfec6d7fSVivian Wang {
877bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
878bfec6d7fSVivian Wang struct netdev_hw_addr *ha;
879bfec6d7fSVivian Wang u32 mc_filter[4] = { 0 };
880bfec6d7fSVivian Wang u32 hash, reg, bit, val;
881bfec6d7fSVivian Wang
882bfec6d7fSVivian Wang val = emac_rd(priv, MAC_ADDRESS_CONTROL);
883bfec6d7fSVivian Wang
884bfec6d7fSVivian Wang val &= ~MREGBIT_PROMISCUOUS_MODE;
885bfec6d7fSVivian Wang
886bfec6d7fSVivian Wang if (ndev->flags & IFF_PROMISC) {
887bfec6d7fSVivian Wang /* Enable promisc mode */
888bfec6d7fSVivian Wang val |= MREGBIT_PROMISCUOUS_MODE;
889bfec6d7fSVivian Wang } else if ((ndev->flags & IFF_ALLMULTI) ||
890bfec6d7fSVivian Wang (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
891bfec6d7fSVivian Wang /* Accept all multicast frames by setting every bit */
892bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
893bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
894bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
895bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
896bfec6d7fSVivian Wang } else if (!netdev_mc_empty(ndev)) {
897bfec6d7fSVivian Wang emac_mac_multicast_filter_clear(priv);
898bfec6d7fSVivian Wang netdev_for_each_mc_addr(ha, ndev) {
899bfec6d7fSVivian Wang /*
900bfec6d7fSVivian Wang * The hash table is an array of 4 16-bit registers. It
901bfec6d7fSVivian Wang * is treated like an array of 64 bits (bits[hash]).
902bfec6d7fSVivian Wang */
903bfec6d7fSVivian Wang hash = emac_ether_addr_hash(ha->addr);
904bfec6d7fSVivian Wang reg = hash / 16;
905bfec6d7fSVivian Wang bit = hash % 16;
906bfec6d7fSVivian Wang mc_filter[reg] |= BIT(bit);
907bfec6d7fSVivian Wang }
908bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
909bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
910bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
911bfec6d7fSVivian Wang emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
912bfec6d7fSVivian Wang }
913bfec6d7fSVivian Wang
914bfec6d7fSVivian Wang emac_wr(priv, MAC_ADDRESS_CONTROL, val);
915bfec6d7fSVivian Wang }
916bfec6d7fSVivian Wang
emac_change_mtu(struct net_device * ndev,int mtu)917bfec6d7fSVivian Wang static int emac_change_mtu(struct net_device *ndev, int mtu)
918bfec6d7fSVivian Wang {
919bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
920bfec6d7fSVivian Wang u32 frame_len;
921bfec6d7fSVivian Wang
922bfec6d7fSVivian Wang if (netif_running(ndev)) {
923bfec6d7fSVivian Wang netdev_err(ndev, "must be stopped to change MTU\n");
924bfec6d7fSVivian Wang return -EBUSY;
925bfec6d7fSVivian Wang }
926bfec6d7fSVivian Wang
927bfec6d7fSVivian Wang frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
928bfec6d7fSVivian Wang
929bfec6d7fSVivian Wang if (frame_len <= EMAC_DEFAULT_BUFSIZE)
930bfec6d7fSVivian Wang priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
931bfec6d7fSVivian Wang else if (frame_len <= EMAC_RX_BUF_2K)
932bfec6d7fSVivian Wang priv->dma_buf_sz = EMAC_RX_BUF_2K;
933bfec6d7fSVivian Wang else
934bfec6d7fSVivian Wang priv->dma_buf_sz = EMAC_RX_BUF_4K;
935bfec6d7fSVivian Wang
936bfec6d7fSVivian Wang ndev->mtu = mtu;
937bfec6d7fSVivian Wang
938bfec6d7fSVivian Wang return 0;
939bfec6d7fSVivian Wang }
940bfec6d7fSVivian Wang
emac_tx_timeout(struct net_device * ndev,unsigned int txqueue)941bfec6d7fSVivian Wang static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
942bfec6d7fSVivian Wang {
943bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
944bfec6d7fSVivian Wang
945bfec6d7fSVivian Wang schedule_work(&priv->tx_timeout_task);
946bfec6d7fSVivian Wang }
947bfec6d7fSVivian Wang
emac_mii_read(struct mii_bus * bus,int phy_addr,int regnum)948bfec6d7fSVivian Wang static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
949bfec6d7fSVivian Wang {
950bfec6d7fSVivian Wang struct emac_priv *priv = bus->priv;
951bfec6d7fSVivian Wang u32 cmd = 0, val;
952bfec6d7fSVivian Wang int ret;
953bfec6d7fSVivian Wang
954bfec6d7fSVivian Wang cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
955bfec6d7fSVivian Wang cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
956bfec6d7fSVivian Wang cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
957bfec6d7fSVivian Wang
958bfec6d7fSVivian Wang emac_wr(priv, MAC_MDIO_DATA, 0x0);
959bfec6d7fSVivian Wang emac_wr(priv, MAC_MDIO_CONTROL, cmd);
960bfec6d7fSVivian Wang
961bfec6d7fSVivian Wang ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
962bfec6d7fSVivian Wang !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
963bfec6d7fSVivian Wang
964bfec6d7fSVivian Wang if (ret)
965bfec6d7fSVivian Wang return ret;
966bfec6d7fSVivian Wang
967bfec6d7fSVivian Wang val = emac_rd(priv, MAC_MDIO_DATA);
968bfec6d7fSVivian Wang return FIELD_GET(MREGBIT_MDIO_DATA, val);
969bfec6d7fSVivian Wang }
970bfec6d7fSVivian Wang
emac_mii_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)971bfec6d7fSVivian Wang static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
972bfec6d7fSVivian Wang u16 value)
973bfec6d7fSVivian Wang {
974bfec6d7fSVivian Wang struct emac_priv *priv = bus->priv;
975bfec6d7fSVivian Wang u32 cmd = 0, val;
976bfec6d7fSVivian Wang int ret;
977bfec6d7fSVivian Wang
978bfec6d7fSVivian Wang emac_wr(priv, MAC_MDIO_DATA, value);
979bfec6d7fSVivian Wang
980bfec6d7fSVivian Wang cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
981bfec6d7fSVivian Wang cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
982bfec6d7fSVivian Wang cmd |= MREGBIT_START_MDIO_TRANS;
983bfec6d7fSVivian Wang
984bfec6d7fSVivian Wang emac_wr(priv, MAC_MDIO_CONTROL, cmd);
985bfec6d7fSVivian Wang
986bfec6d7fSVivian Wang ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
987bfec6d7fSVivian Wang !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
988bfec6d7fSVivian Wang
989bfec6d7fSVivian Wang return ret;
990bfec6d7fSVivian Wang }
991bfec6d7fSVivian Wang
emac_mdio_init(struct emac_priv * priv)992bfec6d7fSVivian Wang static int emac_mdio_init(struct emac_priv *priv)
993bfec6d7fSVivian Wang {
994bfec6d7fSVivian Wang struct device *dev = &priv->pdev->dev;
995bfec6d7fSVivian Wang struct device_node *mii_np;
996bfec6d7fSVivian Wang struct mii_bus *mii;
997bfec6d7fSVivian Wang int ret;
998bfec6d7fSVivian Wang
999bfec6d7fSVivian Wang mii = devm_mdiobus_alloc(dev);
1000bfec6d7fSVivian Wang if (!mii)
1001bfec6d7fSVivian Wang return -ENOMEM;
1002bfec6d7fSVivian Wang
1003bfec6d7fSVivian Wang mii->priv = priv;
1004bfec6d7fSVivian Wang mii->name = "k1_emac_mii";
1005bfec6d7fSVivian Wang mii->read = emac_mii_read;
1006bfec6d7fSVivian Wang mii->write = emac_mii_write;
1007bfec6d7fSVivian Wang mii->parent = dev;
1008bfec6d7fSVivian Wang mii->phy_mask = ~0;
1009bfec6d7fSVivian Wang snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
1010bfec6d7fSVivian Wang
1011bfec6d7fSVivian Wang mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
1012bfec6d7fSVivian Wang
1013bfec6d7fSVivian Wang ret = devm_of_mdiobus_register(dev, mii, mii_np);
1014bfec6d7fSVivian Wang if (ret)
1015bfec6d7fSVivian Wang dev_err_probe(dev, ret, "Failed to register mdio bus\n");
1016bfec6d7fSVivian Wang
1017bfec6d7fSVivian Wang of_node_put(mii_np);
1018bfec6d7fSVivian Wang return ret;
1019bfec6d7fSVivian Wang }
1020bfec6d7fSVivian Wang
emac_set_tx_fc(struct emac_priv * priv,bool enable)1021bfec6d7fSVivian Wang static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
1022bfec6d7fSVivian Wang {
1023bfec6d7fSVivian Wang u32 val;
1024bfec6d7fSVivian Wang
1025bfec6d7fSVivian Wang val = emac_rd(priv, MAC_FC_CONTROL);
1026bfec6d7fSVivian Wang
1027bfec6d7fSVivian Wang FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
1028bfec6d7fSVivian Wang FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
1029bfec6d7fSVivian Wang
1030bfec6d7fSVivian Wang emac_wr(priv, MAC_FC_CONTROL, val);
1031bfec6d7fSVivian Wang }
1032bfec6d7fSVivian Wang
emac_set_rx_fc(struct emac_priv * priv,bool enable)1033bfec6d7fSVivian Wang static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
1034bfec6d7fSVivian Wang {
1035bfec6d7fSVivian Wang u32 val = emac_rd(priv, MAC_FC_CONTROL);
1036bfec6d7fSVivian Wang
1037bfec6d7fSVivian Wang FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
1038bfec6d7fSVivian Wang
1039bfec6d7fSVivian Wang emac_wr(priv, MAC_FC_CONTROL, val);
1040bfec6d7fSVivian Wang }
1041bfec6d7fSVivian Wang
emac_set_fc(struct emac_priv * priv,u8 fc)1042bfec6d7fSVivian Wang static void emac_set_fc(struct emac_priv *priv, u8 fc)
1043bfec6d7fSVivian Wang {
1044bfec6d7fSVivian Wang emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
1045bfec6d7fSVivian Wang emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
1046bfec6d7fSVivian Wang priv->flow_control = fc;
1047bfec6d7fSVivian Wang }
1048bfec6d7fSVivian Wang
emac_set_fc_autoneg(struct emac_priv * priv)1049bfec6d7fSVivian Wang static void emac_set_fc_autoneg(struct emac_priv *priv)
1050bfec6d7fSVivian Wang {
1051bfec6d7fSVivian Wang struct phy_device *phydev = priv->ndev->phydev;
1052bfec6d7fSVivian Wang u32 local_adv, remote_adv;
1053bfec6d7fSVivian Wang u8 fc;
1054bfec6d7fSVivian Wang
1055bfec6d7fSVivian Wang local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1056bfec6d7fSVivian Wang
1057bfec6d7fSVivian Wang remote_adv = 0;
1058bfec6d7fSVivian Wang
1059bfec6d7fSVivian Wang if (phydev->pause)
1060bfec6d7fSVivian Wang remote_adv |= LPA_PAUSE_CAP;
1061bfec6d7fSVivian Wang
1062bfec6d7fSVivian Wang if (phydev->asym_pause)
1063bfec6d7fSVivian Wang remote_adv |= LPA_PAUSE_ASYM;
1064bfec6d7fSVivian Wang
1065bfec6d7fSVivian Wang fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
1066bfec6d7fSVivian Wang
1067bfec6d7fSVivian Wang priv->flow_control_autoneg = true;
1068bfec6d7fSVivian Wang
1069bfec6d7fSVivian Wang emac_set_fc(priv, fc);
1070bfec6d7fSVivian Wang }
1071bfec6d7fSVivian Wang
1072bfec6d7fSVivian Wang /*
1073bfec6d7fSVivian Wang * Even though this MAC supports gigabit operation, it only provides 32-bit
1074bfec6d7fSVivian Wang * statistics counters. The most overflow-prone counters are the "bytes" ones,
1075bfec6d7fSVivian Wang * which at gigabit overflow about twice a minute.
1076bfec6d7fSVivian Wang *
1077bfec6d7fSVivian Wang * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
1078bfec6d7fSVivian Wang * every time statistics seem to go backwards. Also, update periodically to
1079bfec6d7fSVivian Wang * catch overflows when we are not otherwise checking the statistics often
1080bfec6d7fSVivian Wang * enough.
1081bfec6d7fSVivian Wang */
1082bfec6d7fSVivian Wang
1083bfec6d7fSVivian Wang #define EMAC_STATS_TIMER_PERIOD 20
1084bfec6d7fSVivian Wang
emac_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res,u32 control_reg,u32 high_reg,u32 low_reg)1085bfec6d7fSVivian Wang static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
1086bfec6d7fSVivian Wang u32 control_reg, u32 high_reg, u32 low_reg)
1087bfec6d7fSVivian Wang {
1088bfec6d7fSVivian Wang u32 val, high, low;
1089bfec6d7fSVivian Wang int ret;
1090bfec6d7fSVivian Wang
1091bfec6d7fSVivian Wang /* The "read" bit is the same for TX and RX */
1092bfec6d7fSVivian Wang
1093bfec6d7fSVivian Wang val = MREGBIT_START_TX_COUNTER_READ | cnt;
1094bfec6d7fSVivian Wang emac_wr(priv, control_reg, val);
1095bfec6d7fSVivian Wang val = emac_rd(priv, control_reg);
1096bfec6d7fSVivian Wang
1097bfec6d7fSVivian Wang ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
1098bfec6d7fSVivian Wang !(val & MREGBIT_START_TX_COUNTER_READ),
1099bfec6d7fSVivian Wang 100, 10000);
1100bfec6d7fSVivian Wang
1101bfec6d7fSVivian Wang if (ret) {
1102bfec6d7fSVivian Wang netdev_err(priv->ndev, "Read stat timeout\n");
1103bfec6d7fSVivian Wang return ret;
1104bfec6d7fSVivian Wang }
1105bfec6d7fSVivian Wang
1106bfec6d7fSVivian Wang high = emac_rd(priv, high_reg);
1107bfec6d7fSVivian Wang low = emac_rd(priv, low_reg);
1108bfec6d7fSVivian Wang *res = high << 16 | lower_16_bits(low);
1109bfec6d7fSVivian Wang
1110bfec6d7fSVivian Wang return 0;
1111bfec6d7fSVivian Wang }
1112bfec6d7fSVivian Wang
emac_tx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1113bfec6d7fSVivian Wang static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1114bfec6d7fSVivian Wang {
1115bfec6d7fSVivian Wang return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
1116bfec6d7fSVivian Wang MAC_TX_STATCTR_DATA_HIGH,
1117bfec6d7fSVivian Wang MAC_TX_STATCTR_DATA_LOW);
1118bfec6d7fSVivian Wang }
1119bfec6d7fSVivian Wang
emac_rx_read_stat_cnt(struct emac_priv * priv,u8 cnt,u32 * res)1120bfec6d7fSVivian Wang static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
1121bfec6d7fSVivian Wang {
1122bfec6d7fSVivian Wang return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
1123bfec6d7fSVivian Wang MAC_RX_STATCTR_DATA_HIGH,
1124bfec6d7fSVivian Wang MAC_RX_STATCTR_DATA_LOW);
1125bfec6d7fSVivian Wang }
1126bfec6d7fSVivian Wang
emac_update_counter(u64 * counter,u32 new_low)1127bfec6d7fSVivian Wang static void emac_update_counter(u64 *counter, u32 new_low)
1128bfec6d7fSVivian Wang {
1129bfec6d7fSVivian Wang u32 old_low = lower_32_bits(*counter);
1130bfec6d7fSVivian Wang u64 high = upper_32_bits(*counter);
1131bfec6d7fSVivian Wang
1132bfec6d7fSVivian Wang if (old_low > new_low) {
1133bfec6d7fSVivian Wang /* Overflowed, increment high 32 bits */
1134bfec6d7fSVivian Wang high++;
1135bfec6d7fSVivian Wang }
1136bfec6d7fSVivian Wang
1137bfec6d7fSVivian Wang *counter = (high << 32) | new_low;
1138bfec6d7fSVivian Wang }
1139bfec6d7fSVivian Wang
emac_stats_update(struct emac_priv * priv)1140bfec6d7fSVivian Wang static void emac_stats_update(struct emac_priv *priv)
1141bfec6d7fSVivian Wang {
1142bfec6d7fSVivian Wang u64 *tx_stats_off = priv->tx_stats_off.array;
1143bfec6d7fSVivian Wang u64 *rx_stats_off = priv->rx_stats_off.array;
1144bfec6d7fSVivian Wang u64 *tx_stats = priv->tx_stats.array;
1145bfec6d7fSVivian Wang u64 *rx_stats = priv->rx_stats.array;
1146bfec6d7fSVivian Wang u32 i, res, offset;
1147bfec6d7fSVivian Wang
1148bfec6d7fSVivian Wang assert_spin_locked(&priv->stats_lock);
1149bfec6d7fSVivian Wang
1150bfec6d7fSVivian Wang if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
1151bfec6d7fSVivian Wang /* Not up, don't try to update */
1152bfec6d7fSVivian Wang return;
1153bfec6d7fSVivian Wang }
1154bfec6d7fSVivian Wang
1155bfec6d7fSVivian Wang for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
1156bfec6d7fSVivian Wang /*
1157bfec6d7fSVivian Wang * If reading stats times out, everything is broken and there's
1158bfec6d7fSVivian Wang * nothing we can do. Reading statistics also can't return an
1159bfec6d7fSVivian Wang * error, so just return without updating and without
1160bfec6d7fSVivian Wang * rescheduling.
1161bfec6d7fSVivian Wang */
1162bfec6d7fSVivian Wang if (emac_tx_read_stat_cnt(priv, i, &res))
1163bfec6d7fSVivian Wang return;
1164bfec6d7fSVivian Wang
1165bfec6d7fSVivian Wang /*
1166bfec6d7fSVivian Wang * Re-initializing while bringing interface up resets counters
1167bfec6d7fSVivian Wang * to zero, so to provide continuity, we add the values saved
1168bfec6d7fSVivian Wang * last time we did emac_down() to the new hardware-provided
1169bfec6d7fSVivian Wang * value.
1170bfec6d7fSVivian Wang */
1171bfec6d7fSVivian Wang offset = lower_32_bits(tx_stats_off[i]);
1172bfec6d7fSVivian Wang emac_update_counter(&tx_stats[i], res + offset);
1173bfec6d7fSVivian Wang }
1174bfec6d7fSVivian Wang
1175bfec6d7fSVivian Wang /* Similar remarks as TX stats */
1176bfec6d7fSVivian Wang for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
1177bfec6d7fSVivian Wang if (emac_rx_read_stat_cnt(priv, i, &res))
1178bfec6d7fSVivian Wang return;
1179bfec6d7fSVivian Wang offset = lower_32_bits(rx_stats_off[i]);
1180bfec6d7fSVivian Wang emac_update_counter(&rx_stats[i], res + offset);
1181bfec6d7fSVivian Wang }
1182bfec6d7fSVivian Wang
1183bfec6d7fSVivian Wang mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
1184bfec6d7fSVivian Wang }
1185bfec6d7fSVivian Wang
emac_stats_timer(struct timer_list * t)1186bfec6d7fSVivian Wang static void emac_stats_timer(struct timer_list *t)
1187bfec6d7fSVivian Wang {
1188bfec6d7fSVivian Wang struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
1189bfec6d7fSVivian Wang
1190bfec6d7fSVivian Wang spin_lock(&priv->stats_lock);
1191bfec6d7fSVivian Wang
1192bfec6d7fSVivian Wang emac_stats_update(priv);
1193bfec6d7fSVivian Wang
1194bfec6d7fSVivian Wang spin_unlock(&priv->stats_lock);
1195bfec6d7fSVivian Wang }
1196bfec6d7fSVivian Wang
1197bfec6d7fSVivian Wang static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
1198bfec6d7fSVivian Wang { 64, 64 },
1199bfec6d7fSVivian Wang { 65, 127 },
1200bfec6d7fSVivian Wang { 128, 255 },
1201bfec6d7fSVivian Wang { 256, 511 },
1202bfec6d7fSVivian Wang { 512, 1023 },
1203bfec6d7fSVivian Wang { 1024, 1518 },
1204bfec6d7fSVivian Wang { 1519, 4096 },
1205bfec6d7fSVivian Wang { /* sentinel */ },
1206bfec6d7fSVivian Wang };
1207bfec6d7fSVivian Wang
1208bfec6d7fSVivian Wang /* Like dev_fetch_dstats(), but we only use tx_drops */
emac_get_stat_tx_drops(struct emac_priv * priv)1209bfec6d7fSVivian Wang static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
1210bfec6d7fSVivian Wang {
1211bfec6d7fSVivian Wang const struct pcpu_dstats *stats;
1212bfec6d7fSVivian Wang u64 tx_drops, total = 0;
1213bfec6d7fSVivian Wang unsigned int start;
1214bfec6d7fSVivian Wang int cpu;
1215bfec6d7fSVivian Wang
1216bfec6d7fSVivian Wang for_each_possible_cpu(cpu) {
1217bfec6d7fSVivian Wang stats = per_cpu_ptr(priv->ndev->dstats, cpu);
1218bfec6d7fSVivian Wang do {
1219bfec6d7fSVivian Wang start = u64_stats_fetch_begin(&stats->syncp);
1220bfec6d7fSVivian Wang tx_drops = u64_stats_read(&stats->tx_drops);
1221bfec6d7fSVivian Wang } while (u64_stats_fetch_retry(&stats->syncp, start));
1222bfec6d7fSVivian Wang
1223bfec6d7fSVivian Wang total += tx_drops;
1224bfec6d7fSVivian Wang }
1225bfec6d7fSVivian Wang
1226bfec6d7fSVivian Wang return total;
1227bfec6d7fSVivian Wang }
1228bfec6d7fSVivian Wang
emac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1229bfec6d7fSVivian Wang static void emac_get_stats64(struct net_device *dev,
1230bfec6d7fSVivian Wang struct rtnl_link_stats64 *storage)
1231bfec6d7fSVivian Wang {
1232bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1233bfec6d7fSVivian Wang union emac_hw_tx_stats *tx_stats;
1234bfec6d7fSVivian Wang union emac_hw_rx_stats *rx_stats;
1235bfec6d7fSVivian Wang
1236bfec6d7fSVivian Wang tx_stats = &priv->tx_stats;
1237bfec6d7fSVivian Wang rx_stats = &priv->rx_stats;
1238bfec6d7fSVivian Wang
1239bfec6d7fSVivian Wang /* This is the only software counter */
1240bfec6d7fSVivian Wang storage->tx_dropped = emac_get_stat_tx_drops(priv);
1241bfec6d7fSVivian Wang
124235626012SVivian Wang spin_lock_bh(&priv->stats_lock);
1243bfec6d7fSVivian Wang
1244bfec6d7fSVivian Wang emac_stats_update(priv);
1245bfec6d7fSVivian Wang
1246bfec6d7fSVivian Wang storage->tx_packets = tx_stats->stats.tx_ok_pkts;
1247bfec6d7fSVivian Wang storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
1248bfec6d7fSVivian Wang storage->tx_errors = tx_stats->stats.tx_err_pkts;
1249bfec6d7fSVivian Wang
1250bfec6d7fSVivian Wang storage->rx_packets = rx_stats->stats.rx_ok_pkts;
1251bfec6d7fSVivian Wang storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
1252bfec6d7fSVivian Wang storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
1253bfec6d7fSVivian Wang storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
1254bfec6d7fSVivian Wang storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
1255bfec6d7fSVivian Wang storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
1256bfec6d7fSVivian Wang
1257bfec6d7fSVivian Wang storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
1258bfec6d7fSVivian Wang storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
1259bfec6d7fSVivian Wang storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
1260bfec6d7fSVivian Wang
1261bfec6d7fSVivian Wang storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
1262bfec6d7fSVivian Wang storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
1263bfec6d7fSVivian Wang
126435626012SVivian Wang spin_unlock_bh(&priv->stats_lock);
1265bfec6d7fSVivian Wang }
1266bfec6d7fSVivian Wang
emac_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1267bfec6d7fSVivian Wang static void emac_get_rmon_stats(struct net_device *dev,
1268bfec6d7fSVivian Wang struct ethtool_rmon_stats *rmon_stats,
1269bfec6d7fSVivian Wang const struct ethtool_rmon_hist_range **ranges)
1270bfec6d7fSVivian Wang {
1271bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1272bfec6d7fSVivian Wang union emac_hw_rx_stats *rx_stats;
1273bfec6d7fSVivian Wang
1274bfec6d7fSVivian Wang rx_stats = &priv->rx_stats;
1275bfec6d7fSVivian Wang
1276bfec6d7fSVivian Wang *ranges = emac_rmon_hist_ranges;
1277bfec6d7fSVivian Wang
127835626012SVivian Wang spin_lock_bh(&priv->stats_lock);
1279bfec6d7fSVivian Wang
1280bfec6d7fSVivian Wang emac_stats_update(priv);
1281bfec6d7fSVivian Wang
1282bfec6d7fSVivian Wang rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
1283bfec6d7fSVivian Wang rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
1284bfec6d7fSVivian Wang rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
1285bfec6d7fSVivian Wang rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
1286bfec6d7fSVivian Wang
1287bfec6d7fSVivian Wang /* Only RX has histogram stats */
1288bfec6d7fSVivian Wang
1289bfec6d7fSVivian Wang rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
1290bfec6d7fSVivian Wang rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
1291bfec6d7fSVivian Wang rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
1292bfec6d7fSVivian Wang rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
1293bfec6d7fSVivian Wang rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
1294bfec6d7fSVivian Wang rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
1295bfec6d7fSVivian Wang rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
1296bfec6d7fSVivian Wang
129735626012SVivian Wang spin_unlock_bh(&priv->stats_lock);
1298bfec6d7fSVivian Wang }
1299bfec6d7fSVivian Wang
emac_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)1300bfec6d7fSVivian Wang static void emac_get_eth_mac_stats(struct net_device *dev,
1301bfec6d7fSVivian Wang struct ethtool_eth_mac_stats *mac_stats)
1302bfec6d7fSVivian Wang {
1303bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1304bfec6d7fSVivian Wang union emac_hw_tx_stats *tx_stats;
1305bfec6d7fSVivian Wang union emac_hw_rx_stats *rx_stats;
1306bfec6d7fSVivian Wang
1307bfec6d7fSVivian Wang tx_stats = &priv->tx_stats;
1308bfec6d7fSVivian Wang rx_stats = &priv->rx_stats;
1309bfec6d7fSVivian Wang
131035626012SVivian Wang spin_lock_bh(&priv->stats_lock);
1311bfec6d7fSVivian Wang
1312bfec6d7fSVivian Wang emac_stats_update(priv);
1313bfec6d7fSVivian Wang
1314bfec6d7fSVivian Wang mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
1315bfec6d7fSVivian Wang mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
1316bfec6d7fSVivian Wang
1317bfec6d7fSVivian Wang mac_stats->MulticastFramesReceivedOK =
1318bfec6d7fSVivian Wang rx_stats->stats.rx_multicast_pkts;
1319bfec6d7fSVivian Wang mac_stats->BroadcastFramesReceivedOK =
1320bfec6d7fSVivian Wang rx_stats->stats.rx_broadcast_pkts;
1321bfec6d7fSVivian Wang
1322bfec6d7fSVivian Wang mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
1323bfec6d7fSVivian Wang mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
1324bfec6d7fSVivian Wang mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
1325bfec6d7fSVivian Wang mac_stats->FramesAbortedDueToXSColls =
1326bfec6d7fSVivian Wang tx_stats->stats.tx_excessclsn_pkts;
1327bfec6d7fSVivian Wang
132835626012SVivian Wang spin_unlock_bh(&priv->stats_lock);
1329bfec6d7fSVivian Wang }
1330bfec6d7fSVivian Wang
emac_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)1331bfec6d7fSVivian Wang static void emac_get_pause_stats(struct net_device *dev,
1332bfec6d7fSVivian Wang struct ethtool_pause_stats *pause_stats)
1333bfec6d7fSVivian Wang {
1334bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1335bfec6d7fSVivian Wang union emac_hw_tx_stats *tx_stats;
1336bfec6d7fSVivian Wang union emac_hw_rx_stats *rx_stats;
1337bfec6d7fSVivian Wang
1338bfec6d7fSVivian Wang tx_stats = &priv->tx_stats;
1339bfec6d7fSVivian Wang rx_stats = &priv->rx_stats;
1340bfec6d7fSVivian Wang
134135626012SVivian Wang spin_lock_bh(&priv->stats_lock);
1342bfec6d7fSVivian Wang
1343bfec6d7fSVivian Wang emac_stats_update(priv);
1344bfec6d7fSVivian Wang
1345bfec6d7fSVivian Wang pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
1346bfec6d7fSVivian Wang pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
1347bfec6d7fSVivian Wang
134835626012SVivian Wang spin_unlock_bh(&priv->stats_lock);
1349bfec6d7fSVivian Wang }
1350bfec6d7fSVivian Wang
1351bfec6d7fSVivian Wang /* Other statistics that are not derivable from standard statistics */
1352bfec6d7fSVivian Wang
1353bfec6d7fSVivian Wang #define EMAC_ETHTOOL_STAT(type, name) \
1354bfec6d7fSVivian Wang { offsetof(type, stats.name) / sizeof(u64), #name }
1355bfec6d7fSVivian Wang
1356bfec6d7fSVivian Wang static const struct emac_ethtool_stats {
1357bfec6d7fSVivian Wang size_t offset;
1358bfec6d7fSVivian Wang char str[ETH_GSTRING_LEN];
1359bfec6d7fSVivian Wang } emac_ethtool_rx_stats[] = {
1360bfec6d7fSVivian Wang EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
1361bfec6d7fSVivian Wang EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
1362bfec6d7fSVivian Wang };
1363bfec6d7fSVivian Wang
emac_get_sset_count(struct net_device * dev,int sset)1364bfec6d7fSVivian Wang static int emac_get_sset_count(struct net_device *dev, int sset)
1365bfec6d7fSVivian Wang {
1366bfec6d7fSVivian Wang switch (sset) {
1367bfec6d7fSVivian Wang case ETH_SS_STATS:
1368bfec6d7fSVivian Wang return ARRAY_SIZE(emac_ethtool_rx_stats);
1369bfec6d7fSVivian Wang default:
1370bfec6d7fSVivian Wang return -EOPNOTSUPP;
1371bfec6d7fSVivian Wang }
1372bfec6d7fSVivian Wang }
1373bfec6d7fSVivian Wang
emac_get_strings(struct net_device * dev,u32 stringset,u8 * data)1374bfec6d7fSVivian Wang static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1375bfec6d7fSVivian Wang {
1376bfec6d7fSVivian Wang int i;
1377bfec6d7fSVivian Wang
1378bfec6d7fSVivian Wang switch (stringset) {
1379bfec6d7fSVivian Wang case ETH_SS_STATS:
1380bfec6d7fSVivian Wang for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
1381bfec6d7fSVivian Wang memcpy(data, emac_ethtool_rx_stats[i].str,
1382bfec6d7fSVivian Wang ETH_GSTRING_LEN);
1383bfec6d7fSVivian Wang data += ETH_GSTRING_LEN;
1384bfec6d7fSVivian Wang }
1385bfec6d7fSVivian Wang break;
1386bfec6d7fSVivian Wang }
1387bfec6d7fSVivian Wang }
1388bfec6d7fSVivian Wang
emac_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1389bfec6d7fSVivian Wang static void emac_get_ethtool_stats(struct net_device *dev,
1390bfec6d7fSVivian Wang struct ethtool_stats *stats, u64 *data)
1391bfec6d7fSVivian Wang {
1392bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1393bfec6d7fSVivian Wang u64 *rx_stats = (u64 *)&priv->rx_stats;
1394bfec6d7fSVivian Wang int i;
1395bfec6d7fSVivian Wang
139635626012SVivian Wang spin_lock_bh(&priv->stats_lock);
1397bfec6d7fSVivian Wang
1398bfec6d7fSVivian Wang emac_stats_update(priv);
1399bfec6d7fSVivian Wang
1400bfec6d7fSVivian Wang for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
1401bfec6d7fSVivian Wang data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
1402bfec6d7fSVivian Wang
140335626012SVivian Wang spin_unlock_bh(&priv->stats_lock);
1404bfec6d7fSVivian Wang }
1405bfec6d7fSVivian Wang
emac_ethtool_get_regs_len(struct net_device * dev)1406bfec6d7fSVivian Wang static int emac_ethtool_get_regs_len(struct net_device *dev)
1407bfec6d7fSVivian Wang {
1408bfec6d7fSVivian Wang return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
1409bfec6d7fSVivian Wang }
1410bfec6d7fSVivian Wang
emac_ethtool_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * space)1411bfec6d7fSVivian Wang static void emac_ethtool_get_regs(struct net_device *dev,
1412bfec6d7fSVivian Wang struct ethtool_regs *regs, void *space)
1413bfec6d7fSVivian Wang {
1414bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1415bfec6d7fSVivian Wang u32 *reg_space = space;
1416bfec6d7fSVivian Wang int i;
1417bfec6d7fSVivian Wang
1418bfec6d7fSVivian Wang regs->version = 1;
1419bfec6d7fSVivian Wang
1420bfec6d7fSVivian Wang for (i = 0; i < EMAC_DMA_REG_CNT; i++)
1421bfec6d7fSVivian Wang reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
1422bfec6d7fSVivian Wang
1423bfec6d7fSVivian Wang for (i = 0; i < EMAC_MAC_REG_CNT; i++)
1424bfec6d7fSVivian Wang reg_space[i + EMAC_DMA_REG_CNT] =
1425bfec6d7fSVivian Wang emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
1426bfec6d7fSVivian Wang }
1427bfec6d7fSVivian Wang
emac_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1428bfec6d7fSVivian Wang static void emac_get_pauseparam(struct net_device *dev,
1429bfec6d7fSVivian Wang struct ethtool_pauseparam *pause)
1430bfec6d7fSVivian Wang {
1431bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1432bfec6d7fSVivian Wang
1433bfec6d7fSVivian Wang pause->autoneg = priv->flow_control_autoneg;
1434bfec6d7fSVivian Wang pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
1435bfec6d7fSVivian Wang pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
1436bfec6d7fSVivian Wang }
1437bfec6d7fSVivian Wang
emac_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)1438bfec6d7fSVivian Wang static int emac_set_pauseparam(struct net_device *dev,
1439bfec6d7fSVivian Wang struct ethtool_pauseparam *pause)
1440bfec6d7fSVivian Wang {
1441bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1442bfec6d7fSVivian Wang u8 fc = 0;
1443bfec6d7fSVivian Wang
1444*5556f234SVivian Wang if (!netif_running(dev))
1445*5556f234SVivian Wang return -ENETDOWN;
1446*5556f234SVivian Wang
1447bfec6d7fSVivian Wang priv->flow_control_autoneg = pause->autoneg;
1448bfec6d7fSVivian Wang
1449bfec6d7fSVivian Wang if (pause->autoneg) {
1450bfec6d7fSVivian Wang emac_set_fc_autoneg(priv);
1451bfec6d7fSVivian Wang } else {
1452bfec6d7fSVivian Wang if (pause->tx_pause)
1453bfec6d7fSVivian Wang fc |= FLOW_CTRL_TX;
1454bfec6d7fSVivian Wang
1455bfec6d7fSVivian Wang if (pause->rx_pause)
1456bfec6d7fSVivian Wang fc |= FLOW_CTRL_RX;
1457bfec6d7fSVivian Wang
1458bfec6d7fSVivian Wang emac_set_fc(priv, fc);
1459bfec6d7fSVivian Wang }
1460bfec6d7fSVivian Wang
1461bfec6d7fSVivian Wang return 0;
1462bfec6d7fSVivian Wang }
1463bfec6d7fSVivian Wang
emac_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1464bfec6d7fSVivian Wang static void emac_get_drvinfo(struct net_device *dev,
1465bfec6d7fSVivian Wang struct ethtool_drvinfo *info)
1466bfec6d7fSVivian Wang {
1467bfec6d7fSVivian Wang strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1468bfec6d7fSVivian Wang info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
1469bfec6d7fSVivian Wang }
1470bfec6d7fSVivian Wang
emac_tx_timeout_task(struct work_struct * work)1471bfec6d7fSVivian Wang static void emac_tx_timeout_task(struct work_struct *work)
1472bfec6d7fSVivian Wang {
1473bfec6d7fSVivian Wang struct net_device *ndev;
1474bfec6d7fSVivian Wang struct emac_priv *priv;
1475bfec6d7fSVivian Wang
1476bfec6d7fSVivian Wang priv = container_of(work, struct emac_priv, tx_timeout_task);
1477bfec6d7fSVivian Wang ndev = priv->ndev;
1478bfec6d7fSVivian Wang
1479bfec6d7fSVivian Wang rtnl_lock();
1480bfec6d7fSVivian Wang
1481bfec6d7fSVivian Wang /* No need to reset if already down */
1482bfec6d7fSVivian Wang if (!netif_running(ndev)) {
1483bfec6d7fSVivian Wang rtnl_unlock();
1484bfec6d7fSVivian Wang return;
1485bfec6d7fSVivian Wang }
1486bfec6d7fSVivian Wang
1487bfec6d7fSVivian Wang netdev_err(ndev, "MAC reset due to TX timeout\n");
1488bfec6d7fSVivian Wang
1489bfec6d7fSVivian Wang netif_trans_update(ndev); /* prevent tx timeout */
1490bfec6d7fSVivian Wang dev_close(ndev);
1491bfec6d7fSVivian Wang dev_open(ndev, NULL);
1492bfec6d7fSVivian Wang
1493bfec6d7fSVivian Wang rtnl_unlock();
1494bfec6d7fSVivian Wang }
1495bfec6d7fSVivian Wang
emac_sw_init(struct emac_priv * priv)1496bfec6d7fSVivian Wang static void emac_sw_init(struct emac_priv *priv)
1497bfec6d7fSVivian Wang {
1498bfec6d7fSVivian Wang priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
1499bfec6d7fSVivian Wang
1500bfec6d7fSVivian Wang priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
1501bfec6d7fSVivian Wang priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
1502bfec6d7fSVivian Wang
1503bfec6d7fSVivian Wang spin_lock_init(&priv->stats_lock);
1504bfec6d7fSVivian Wang
1505bfec6d7fSVivian Wang INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
1506bfec6d7fSVivian Wang
1507bfec6d7fSVivian Wang priv->tx_coal_frames = EMAC_TX_FRAMES;
1508bfec6d7fSVivian Wang priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
1509bfec6d7fSVivian Wang
1510bfec6d7fSVivian Wang timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
1511bfec6d7fSVivian Wang timer_setup(&priv->stats_timer, emac_stats_timer, 0);
1512bfec6d7fSVivian Wang }
1513bfec6d7fSVivian Wang
emac_interrupt_handler(int irq,void * dev_id)1514bfec6d7fSVivian Wang static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
1515bfec6d7fSVivian Wang {
1516bfec6d7fSVivian Wang struct net_device *ndev = (struct net_device *)dev_id;
1517bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
1518bfec6d7fSVivian Wang bool should_schedule = false;
1519bfec6d7fSVivian Wang u32 clr = 0;
1520bfec6d7fSVivian Wang u32 status;
1521bfec6d7fSVivian Wang
1522bfec6d7fSVivian Wang status = emac_rd(priv, DMA_STATUS_IRQ);
1523bfec6d7fSVivian Wang
1524bfec6d7fSVivian Wang if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
1525bfec6d7fSVivian Wang clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
1526bfec6d7fSVivian Wang should_schedule = true;
1527bfec6d7fSVivian Wang }
1528bfec6d7fSVivian Wang
1529bfec6d7fSVivian Wang if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
1530bfec6d7fSVivian Wang clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
1531bfec6d7fSVivian Wang
1532bfec6d7fSVivian Wang if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
1533bfec6d7fSVivian Wang clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
1534bfec6d7fSVivian Wang
1535bfec6d7fSVivian Wang if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
1536bfec6d7fSVivian Wang clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
1537bfec6d7fSVivian Wang should_schedule = true;
1538bfec6d7fSVivian Wang }
1539bfec6d7fSVivian Wang
1540bfec6d7fSVivian Wang if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
1541bfec6d7fSVivian Wang clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
1542bfec6d7fSVivian Wang
1543bfec6d7fSVivian Wang if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
1544bfec6d7fSVivian Wang clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
1545bfec6d7fSVivian Wang
1546bfec6d7fSVivian Wang if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
1547bfec6d7fSVivian Wang clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
1548bfec6d7fSVivian Wang
1549bfec6d7fSVivian Wang if (should_schedule) {
1550bfec6d7fSVivian Wang if (napi_schedule_prep(&priv->napi)) {
1551bfec6d7fSVivian Wang emac_disable_interrupt(priv);
1552bfec6d7fSVivian Wang __napi_schedule_irqoff(&priv->napi);
1553bfec6d7fSVivian Wang }
1554bfec6d7fSVivian Wang }
1555bfec6d7fSVivian Wang
1556bfec6d7fSVivian Wang emac_wr(priv, DMA_STATUS_IRQ, clr);
1557bfec6d7fSVivian Wang
1558bfec6d7fSVivian Wang return IRQ_HANDLED;
1559bfec6d7fSVivian Wang }
1560bfec6d7fSVivian Wang
emac_configure_tx(struct emac_priv * priv)1561bfec6d7fSVivian Wang static void emac_configure_tx(struct emac_priv *priv)
1562bfec6d7fSVivian Wang {
1563bfec6d7fSVivian Wang u32 val;
1564bfec6d7fSVivian Wang
1565bfec6d7fSVivian Wang /* Set base address */
1566bfec6d7fSVivian Wang val = (u32)priv->tx_ring.desc_dma_addr;
1567bfec6d7fSVivian Wang emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
1568bfec6d7fSVivian Wang
1569bfec6d7fSVivian Wang /* Set TX inter-frame gap value, enable transmit */
1570bfec6d7fSVivian Wang val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
1571bfec6d7fSVivian Wang val &= ~MREGBIT_IFG_LEN;
1572bfec6d7fSVivian Wang val |= MREGBIT_TRANSMIT_ENABLE;
1573bfec6d7fSVivian Wang val |= MREGBIT_TRANSMIT_AUTO_RETRY;
1574bfec6d7fSVivian Wang emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
1575bfec6d7fSVivian Wang
1576bfec6d7fSVivian Wang emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
1577bfec6d7fSVivian Wang
1578bfec6d7fSVivian Wang /* Start TX DMA */
1579bfec6d7fSVivian Wang val = emac_rd(priv, DMA_CONTROL);
1580bfec6d7fSVivian Wang val |= MREGBIT_START_STOP_TRANSMIT_DMA;
1581bfec6d7fSVivian Wang emac_wr(priv, DMA_CONTROL, val);
1582bfec6d7fSVivian Wang }
1583bfec6d7fSVivian Wang
emac_configure_rx(struct emac_priv * priv)1584bfec6d7fSVivian Wang static void emac_configure_rx(struct emac_priv *priv)
1585bfec6d7fSVivian Wang {
1586bfec6d7fSVivian Wang u32 val;
1587bfec6d7fSVivian Wang
1588bfec6d7fSVivian Wang /* Set base address */
1589bfec6d7fSVivian Wang val = (u32)priv->rx_ring.desc_dma_addr;
1590bfec6d7fSVivian Wang emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
1591bfec6d7fSVivian Wang
1592bfec6d7fSVivian Wang /* Enable receive */
1593bfec6d7fSVivian Wang val = emac_rd(priv, MAC_RECEIVE_CONTROL);
1594bfec6d7fSVivian Wang val |= MREGBIT_RECEIVE_ENABLE;
1595bfec6d7fSVivian Wang val |= MREGBIT_STORE_FORWARD;
1596bfec6d7fSVivian Wang emac_wr(priv, MAC_RECEIVE_CONTROL, val);
1597bfec6d7fSVivian Wang
1598bfec6d7fSVivian Wang /* Start RX DMA */
1599bfec6d7fSVivian Wang val = emac_rd(priv, DMA_CONTROL);
1600bfec6d7fSVivian Wang val |= MREGBIT_START_STOP_RECEIVE_DMA;
1601bfec6d7fSVivian Wang emac_wr(priv, DMA_CONTROL, val);
1602bfec6d7fSVivian Wang }
1603bfec6d7fSVivian Wang
emac_adjust_link(struct net_device * dev)1604bfec6d7fSVivian Wang static void emac_adjust_link(struct net_device *dev)
1605bfec6d7fSVivian Wang {
1606bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(dev);
1607bfec6d7fSVivian Wang struct phy_device *phydev = dev->phydev;
1608bfec6d7fSVivian Wang u32 ctrl;
1609bfec6d7fSVivian Wang
1610bfec6d7fSVivian Wang if (phydev->link) {
1611bfec6d7fSVivian Wang ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
1612bfec6d7fSVivian Wang
1613bfec6d7fSVivian Wang /* Update duplex and speed from PHY */
1614bfec6d7fSVivian Wang
1615bfec6d7fSVivian Wang FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
1616bfec6d7fSVivian Wang phydev->duplex == DUPLEX_FULL);
1617bfec6d7fSVivian Wang
1618bfec6d7fSVivian Wang ctrl &= ~MREGBIT_SPEED;
1619bfec6d7fSVivian Wang
1620bfec6d7fSVivian Wang switch (phydev->speed) {
1621bfec6d7fSVivian Wang case SPEED_1000:
1622bfec6d7fSVivian Wang ctrl |= MREGBIT_SPEED_1000M;
1623bfec6d7fSVivian Wang break;
1624bfec6d7fSVivian Wang case SPEED_100:
1625bfec6d7fSVivian Wang ctrl |= MREGBIT_SPEED_100M;
1626bfec6d7fSVivian Wang break;
1627bfec6d7fSVivian Wang case SPEED_10:
1628bfec6d7fSVivian Wang ctrl |= MREGBIT_SPEED_10M;
1629bfec6d7fSVivian Wang break;
1630bfec6d7fSVivian Wang default:
1631bfec6d7fSVivian Wang netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
1632bfec6d7fSVivian Wang phydev->speed = SPEED_UNKNOWN;
1633bfec6d7fSVivian Wang break;
1634bfec6d7fSVivian Wang }
1635bfec6d7fSVivian Wang
1636bfec6d7fSVivian Wang emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
1637bfec6d7fSVivian Wang
1638bfec6d7fSVivian Wang emac_set_fc_autoneg(priv);
1639bfec6d7fSVivian Wang }
1640bfec6d7fSVivian Wang
1641bfec6d7fSVivian Wang phy_print_status(phydev);
1642bfec6d7fSVivian Wang }
1643bfec6d7fSVivian Wang
emac_update_delay_line(struct emac_priv * priv)1644bfec6d7fSVivian Wang static void emac_update_delay_line(struct emac_priv *priv)
1645bfec6d7fSVivian Wang {
1646bfec6d7fSVivian Wang u32 mask = 0, val = 0;
1647bfec6d7fSVivian Wang
1648bfec6d7fSVivian Wang mask |= EMAC_RX_DLINE_EN;
1649bfec6d7fSVivian Wang mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
1650bfec6d7fSVivian Wang mask |= EMAC_TX_DLINE_EN;
1651bfec6d7fSVivian Wang mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
1652bfec6d7fSVivian Wang
1653bfec6d7fSVivian Wang if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
1654bfec6d7fSVivian Wang val |= EMAC_RX_DLINE_EN;
1655bfec6d7fSVivian Wang val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
1656bfec6d7fSVivian Wang EMAC_DLINE_STEP_15P6);
1657bfec6d7fSVivian Wang val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
1658bfec6d7fSVivian Wang
1659bfec6d7fSVivian Wang val |= EMAC_TX_DLINE_EN;
1660bfec6d7fSVivian Wang val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
1661bfec6d7fSVivian Wang EMAC_DLINE_STEP_15P6);
1662bfec6d7fSVivian Wang val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
1663bfec6d7fSVivian Wang }
1664bfec6d7fSVivian Wang
1665bfec6d7fSVivian Wang regmap_update_bits(priv->regmap_apmu,
1666bfec6d7fSVivian Wang priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
1667bfec6d7fSVivian Wang mask, val);
1668bfec6d7fSVivian Wang }
1669bfec6d7fSVivian Wang
emac_phy_connect(struct net_device * ndev)1670bfec6d7fSVivian Wang static int emac_phy_connect(struct net_device *ndev)
1671bfec6d7fSVivian Wang {
1672bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
1673bfec6d7fSVivian Wang struct device *dev = &priv->pdev->dev;
1674bfec6d7fSVivian Wang struct phy_device *phydev;
1675bfec6d7fSVivian Wang struct device_node *np;
1676bfec6d7fSVivian Wang int ret;
1677bfec6d7fSVivian Wang
1678bfec6d7fSVivian Wang ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
1679bfec6d7fSVivian Wang if (ret) {
1680bfec6d7fSVivian Wang netdev_err(ndev, "No phy-mode found");
1681bfec6d7fSVivian Wang return ret;
1682bfec6d7fSVivian Wang }
1683bfec6d7fSVivian Wang
1684bfec6d7fSVivian Wang switch (priv->phy_interface) {
1685bfec6d7fSVivian Wang case PHY_INTERFACE_MODE_RMII:
1686bfec6d7fSVivian Wang case PHY_INTERFACE_MODE_RGMII:
1687bfec6d7fSVivian Wang case PHY_INTERFACE_MODE_RGMII_ID:
1688bfec6d7fSVivian Wang case PHY_INTERFACE_MODE_RGMII_RXID:
1689bfec6d7fSVivian Wang case PHY_INTERFACE_MODE_RGMII_TXID:
1690bfec6d7fSVivian Wang break;
1691bfec6d7fSVivian Wang default:
1692bfec6d7fSVivian Wang netdev_err(ndev, "Unsupported PHY interface %s",
1693bfec6d7fSVivian Wang phy_modes(priv->phy_interface));
1694bfec6d7fSVivian Wang return -EINVAL;
1695bfec6d7fSVivian Wang }
1696bfec6d7fSVivian Wang
1697bfec6d7fSVivian Wang np = of_parse_phandle(dev->of_node, "phy-handle", 0);
1698bfec6d7fSVivian Wang if (!np && of_phy_is_fixed_link(dev->of_node))
1699bfec6d7fSVivian Wang np = of_node_get(dev->of_node);
1700bfec6d7fSVivian Wang
1701bfec6d7fSVivian Wang if (!np) {
1702bfec6d7fSVivian Wang netdev_err(ndev, "No PHY specified");
1703bfec6d7fSVivian Wang return -ENODEV;
1704bfec6d7fSVivian Wang }
1705bfec6d7fSVivian Wang
1706bfec6d7fSVivian Wang ret = emac_phy_interface_config(priv);
1707bfec6d7fSVivian Wang if (ret)
1708bfec6d7fSVivian Wang goto err_node_put;
1709bfec6d7fSVivian Wang
1710bfec6d7fSVivian Wang phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
1711bfec6d7fSVivian Wang priv->phy_interface);
1712bfec6d7fSVivian Wang if (!phydev) {
1713bfec6d7fSVivian Wang netdev_err(ndev, "Could not attach to PHY\n");
1714bfec6d7fSVivian Wang ret = -ENODEV;
1715bfec6d7fSVivian Wang goto err_node_put;
1716bfec6d7fSVivian Wang }
1717bfec6d7fSVivian Wang
1718bfec6d7fSVivian Wang phy_support_asym_pause(phydev);
1719bfec6d7fSVivian Wang
1720bfec6d7fSVivian Wang phydev->mac_managed_pm = true;
1721bfec6d7fSVivian Wang
1722bfec6d7fSVivian Wang emac_update_delay_line(priv);
1723bfec6d7fSVivian Wang
1724bfec6d7fSVivian Wang err_node_put:
1725bfec6d7fSVivian Wang of_node_put(np);
1726bfec6d7fSVivian Wang return ret;
1727bfec6d7fSVivian Wang }
1728bfec6d7fSVivian Wang
emac_up(struct emac_priv * priv)1729bfec6d7fSVivian Wang static int emac_up(struct emac_priv *priv)
1730bfec6d7fSVivian Wang {
1731bfec6d7fSVivian Wang struct platform_device *pdev = priv->pdev;
1732bfec6d7fSVivian Wang struct net_device *ndev = priv->ndev;
1733bfec6d7fSVivian Wang int ret;
1734bfec6d7fSVivian Wang
1735bfec6d7fSVivian Wang pm_runtime_get_sync(&pdev->dev);
1736bfec6d7fSVivian Wang
1737bfec6d7fSVivian Wang ret = emac_phy_connect(ndev);
1738bfec6d7fSVivian Wang if (ret) {
1739bfec6d7fSVivian Wang dev_err(&pdev->dev, "emac_phy_connect failed\n");
1740bfec6d7fSVivian Wang goto err_pm_put;
1741bfec6d7fSVivian Wang }
1742bfec6d7fSVivian Wang
1743bfec6d7fSVivian Wang emac_init_hw(priv);
1744bfec6d7fSVivian Wang
1745bfec6d7fSVivian Wang emac_set_mac_addr(priv, ndev->dev_addr);
1746bfec6d7fSVivian Wang emac_configure_tx(priv);
1747bfec6d7fSVivian Wang emac_configure_rx(priv);
1748bfec6d7fSVivian Wang
1749bfec6d7fSVivian Wang emac_alloc_rx_desc_buffers(priv);
1750bfec6d7fSVivian Wang
1751bfec6d7fSVivian Wang phy_start(ndev->phydev);
1752bfec6d7fSVivian Wang
1753bfec6d7fSVivian Wang ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
1754bfec6d7fSVivian Wang ndev->name, ndev);
1755bfec6d7fSVivian Wang if (ret) {
1756bfec6d7fSVivian Wang dev_err(&pdev->dev, "request_irq failed\n");
1757bfec6d7fSVivian Wang goto err_reset_disconnect_phy;
1758bfec6d7fSVivian Wang }
1759bfec6d7fSVivian Wang
1760bfec6d7fSVivian Wang /* Don't enable MAC interrupts */
1761bfec6d7fSVivian Wang emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1762bfec6d7fSVivian Wang
1763bfec6d7fSVivian Wang /* Enable DMA interrupts */
1764bfec6d7fSVivian Wang emac_wr(priv, DMA_INTERRUPT_ENABLE,
1765bfec6d7fSVivian Wang MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
1766bfec6d7fSVivian Wang MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
1767bfec6d7fSVivian Wang MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
1768bfec6d7fSVivian Wang MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
1769bfec6d7fSVivian Wang MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
1770bfec6d7fSVivian Wang
1771bfec6d7fSVivian Wang napi_enable(&priv->napi);
1772bfec6d7fSVivian Wang
1773bfec6d7fSVivian Wang netif_start_queue(ndev);
1774bfec6d7fSVivian Wang
177535626012SVivian Wang mod_timer(&priv->stats_timer, jiffies);
1776bfec6d7fSVivian Wang
1777bfec6d7fSVivian Wang return 0;
1778bfec6d7fSVivian Wang
1779bfec6d7fSVivian Wang err_reset_disconnect_phy:
1780bfec6d7fSVivian Wang emac_reset_hw(priv);
1781bfec6d7fSVivian Wang phy_disconnect(ndev->phydev);
1782bfec6d7fSVivian Wang
1783bfec6d7fSVivian Wang err_pm_put:
1784bfec6d7fSVivian Wang pm_runtime_put_sync(&pdev->dev);
1785bfec6d7fSVivian Wang return ret;
1786bfec6d7fSVivian Wang }
1787bfec6d7fSVivian Wang
emac_down(struct emac_priv * priv)1788bfec6d7fSVivian Wang static int emac_down(struct emac_priv *priv)
1789bfec6d7fSVivian Wang {
1790bfec6d7fSVivian Wang struct platform_device *pdev = priv->pdev;
1791bfec6d7fSVivian Wang struct net_device *ndev = priv->ndev;
1792bfec6d7fSVivian Wang
1793bfec6d7fSVivian Wang netif_stop_queue(ndev);
1794bfec6d7fSVivian Wang
1795bfec6d7fSVivian Wang phy_disconnect(ndev->phydev);
1796bfec6d7fSVivian Wang
1797bfec6d7fSVivian Wang emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
1798bfec6d7fSVivian Wang emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
1799bfec6d7fSVivian Wang
1800bfec6d7fSVivian Wang free_irq(priv->irq, ndev);
1801bfec6d7fSVivian Wang
1802bfec6d7fSVivian Wang napi_disable(&priv->napi);
1803bfec6d7fSVivian Wang
1804bfec6d7fSVivian Wang timer_delete_sync(&priv->txtimer);
1805bfec6d7fSVivian Wang cancel_work_sync(&priv->tx_timeout_task);
1806bfec6d7fSVivian Wang
1807bfec6d7fSVivian Wang timer_delete_sync(&priv->stats_timer);
1808bfec6d7fSVivian Wang
1809bfec6d7fSVivian Wang emac_reset_hw(priv);
1810bfec6d7fSVivian Wang
1811bfec6d7fSVivian Wang /* Update and save current stats, see emac_stats_update() for usage */
1812bfec6d7fSVivian Wang
181335626012SVivian Wang spin_lock_bh(&priv->stats_lock);
1814bfec6d7fSVivian Wang
1815bfec6d7fSVivian Wang emac_stats_update(priv);
1816bfec6d7fSVivian Wang
1817bfec6d7fSVivian Wang priv->tx_stats_off = priv->tx_stats;
1818bfec6d7fSVivian Wang priv->rx_stats_off = priv->rx_stats;
1819bfec6d7fSVivian Wang
182035626012SVivian Wang spin_unlock_bh(&priv->stats_lock);
1821bfec6d7fSVivian Wang
1822bfec6d7fSVivian Wang pm_runtime_put_sync(&pdev->dev);
1823bfec6d7fSVivian Wang return 0;
1824bfec6d7fSVivian Wang }
1825bfec6d7fSVivian Wang
1826bfec6d7fSVivian Wang /* Called when net interface is brought up. */
emac_open(struct net_device * ndev)1827bfec6d7fSVivian Wang static int emac_open(struct net_device *ndev)
1828bfec6d7fSVivian Wang {
1829bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
1830bfec6d7fSVivian Wang struct device *dev = &priv->pdev->dev;
1831bfec6d7fSVivian Wang int ret;
1832bfec6d7fSVivian Wang
1833bfec6d7fSVivian Wang ret = emac_alloc_tx_resources(priv);
1834bfec6d7fSVivian Wang if (ret) {
1835bfec6d7fSVivian Wang dev_err(dev, "Cannot allocate TX resources\n");
1836bfec6d7fSVivian Wang return ret;
1837bfec6d7fSVivian Wang }
1838bfec6d7fSVivian Wang
1839bfec6d7fSVivian Wang ret = emac_alloc_rx_resources(priv);
1840bfec6d7fSVivian Wang if (ret) {
1841bfec6d7fSVivian Wang dev_err(dev, "Cannot allocate RX resources\n");
1842bfec6d7fSVivian Wang goto err_free_tx;
1843bfec6d7fSVivian Wang }
1844bfec6d7fSVivian Wang
1845bfec6d7fSVivian Wang ret = emac_up(priv);
1846bfec6d7fSVivian Wang if (ret) {
1847bfec6d7fSVivian Wang dev_err(dev, "Error when bringing interface up\n");
1848bfec6d7fSVivian Wang goto err_free_rx;
1849bfec6d7fSVivian Wang }
1850bfec6d7fSVivian Wang return 0;
1851bfec6d7fSVivian Wang
1852bfec6d7fSVivian Wang err_free_rx:
1853bfec6d7fSVivian Wang emac_free_rx_resources(priv);
1854bfec6d7fSVivian Wang err_free_tx:
1855bfec6d7fSVivian Wang emac_free_tx_resources(priv);
1856bfec6d7fSVivian Wang
1857bfec6d7fSVivian Wang return ret;
1858bfec6d7fSVivian Wang }
1859bfec6d7fSVivian Wang
1860bfec6d7fSVivian Wang /* Called when interface is brought down. */
emac_stop(struct net_device * ndev)1861bfec6d7fSVivian Wang static int emac_stop(struct net_device *ndev)
1862bfec6d7fSVivian Wang {
1863bfec6d7fSVivian Wang struct emac_priv *priv = netdev_priv(ndev);
1864bfec6d7fSVivian Wang
1865bfec6d7fSVivian Wang emac_down(priv);
1866bfec6d7fSVivian Wang emac_free_tx_resources(priv);
1867bfec6d7fSVivian Wang emac_free_rx_resources(priv);
1868bfec6d7fSVivian Wang
1869bfec6d7fSVivian Wang return 0;
1870bfec6d7fSVivian Wang }
1871bfec6d7fSVivian Wang
1872bfec6d7fSVivian Wang static const struct ethtool_ops emac_ethtool_ops = {
1873bfec6d7fSVivian Wang .get_link_ksettings = phy_ethtool_get_link_ksettings,
1874bfec6d7fSVivian Wang .set_link_ksettings = phy_ethtool_set_link_ksettings,
1875bfec6d7fSVivian Wang .nway_reset = phy_ethtool_nway_reset,
1876bfec6d7fSVivian Wang .get_drvinfo = emac_get_drvinfo,
1877bfec6d7fSVivian Wang .get_link = ethtool_op_get_link,
1878bfec6d7fSVivian Wang
1879bfec6d7fSVivian Wang .get_regs = emac_ethtool_get_regs,
1880bfec6d7fSVivian Wang .get_regs_len = emac_ethtool_get_regs_len,
1881bfec6d7fSVivian Wang
1882bfec6d7fSVivian Wang .get_rmon_stats = emac_get_rmon_stats,
1883bfec6d7fSVivian Wang .get_pause_stats = emac_get_pause_stats,
1884bfec6d7fSVivian Wang .get_eth_mac_stats = emac_get_eth_mac_stats,
1885bfec6d7fSVivian Wang
1886bfec6d7fSVivian Wang .get_sset_count = emac_get_sset_count,
1887bfec6d7fSVivian Wang .get_strings = emac_get_strings,
1888bfec6d7fSVivian Wang .get_ethtool_stats = emac_get_ethtool_stats,
1889bfec6d7fSVivian Wang
1890bfec6d7fSVivian Wang .get_pauseparam = emac_get_pauseparam,
1891bfec6d7fSVivian Wang .set_pauseparam = emac_set_pauseparam,
1892bfec6d7fSVivian Wang };
1893bfec6d7fSVivian Wang
1894bfec6d7fSVivian Wang static const struct net_device_ops emac_netdev_ops = {
1895bfec6d7fSVivian Wang .ndo_open = emac_open,
1896bfec6d7fSVivian Wang .ndo_stop = emac_stop,
1897bfec6d7fSVivian Wang .ndo_start_xmit = emac_start_xmit,
1898bfec6d7fSVivian Wang .ndo_validate_addr = eth_validate_addr,
1899bfec6d7fSVivian Wang .ndo_set_mac_address = emac_set_mac_address,
1900bfec6d7fSVivian Wang .ndo_eth_ioctl = phy_do_ioctl_running,
1901bfec6d7fSVivian Wang .ndo_change_mtu = emac_change_mtu,
1902bfec6d7fSVivian Wang .ndo_tx_timeout = emac_tx_timeout,
1903bfec6d7fSVivian Wang .ndo_set_rx_mode = emac_set_rx_mode,
1904bfec6d7fSVivian Wang .ndo_get_stats64 = emac_get_stats64,
1905bfec6d7fSVivian Wang };
1906bfec6d7fSVivian Wang
1907bfec6d7fSVivian Wang /* Currently we always use 15.6 ps/step for the delay line */
1908bfec6d7fSVivian Wang
delay_ps_to_unit(u32 ps)1909bfec6d7fSVivian Wang static u32 delay_ps_to_unit(u32 ps)
1910bfec6d7fSVivian Wang {
1911bfec6d7fSVivian Wang return DIV_ROUND_CLOSEST(ps * 10, 156);
1912bfec6d7fSVivian Wang }
1913bfec6d7fSVivian Wang
delay_unit_to_ps(u32 unit)1914bfec6d7fSVivian Wang static u32 delay_unit_to_ps(u32 unit)
1915bfec6d7fSVivian Wang {
1916bfec6d7fSVivian Wang return DIV_ROUND_CLOSEST(unit * 156, 10);
1917bfec6d7fSVivian Wang }
1918bfec6d7fSVivian Wang
1919bfec6d7fSVivian Wang #define EMAC_MAX_DELAY_UNIT FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
1920bfec6d7fSVivian Wang
1921bfec6d7fSVivian Wang /* Minus one just to be safe from rounding errors */
1922bfec6d7fSVivian Wang #define EMAC_MAX_DELAY_PS (delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
1923bfec6d7fSVivian Wang
emac_config_dt(struct platform_device * pdev,struct emac_priv * priv)1924bfec6d7fSVivian Wang static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
1925bfec6d7fSVivian Wang {
1926bfec6d7fSVivian Wang struct device_node *np = pdev->dev.of_node;
1927bfec6d7fSVivian Wang struct device *dev = &pdev->dev;
1928bfec6d7fSVivian Wang u8 mac_addr[ETH_ALEN] = { 0 };
1929bfec6d7fSVivian Wang int ret;
1930bfec6d7fSVivian Wang
1931bfec6d7fSVivian Wang priv->iobase = devm_platform_ioremap_resource(pdev, 0);
1932bfec6d7fSVivian Wang if (IS_ERR(priv->iobase))
1933bfec6d7fSVivian Wang return dev_err_probe(dev, PTR_ERR(priv->iobase),
1934bfec6d7fSVivian Wang "ioremap failed\n");
1935bfec6d7fSVivian Wang
1936bfec6d7fSVivian Wang priv->regmap_apmu =
1937bfec6d7fSVivian Wang syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
1938bfec6d7fSVivian Wang &priv->regmap_apmu_offset);
1939bfec6d7fSVivian Wang
1940bfec6d7fSVivian Wang if (IS_ERR(priv->regmap_apmu))
1941bfec6d7fSVivian Wang return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
1942bfec6d7fSVivian Wang "failed to get syscon\n");
1943bfec6d7fSVivian Wang
1944bfec6d7fSVivian Wang priv->irq = platform_get_irq(pdev, 0);
1945bfec6d7fSVivian Wang if (priv->irq < 0)
1946bfec6d7fSVivian Wang return priv->irq;
1947bfec6d7fSVivian Wang
1948bfec6d7fSVivian Wang ret = of_get_mac_address(np, mac_addr);
1949bfec6d7fSVivian Wang if (ret) {
1950bfec6d7fSVivian Wang if (ret == -EPROBE_DEFER)
1951bfec6d7fSVivian Wang return dev_err_probe(dev, ret,
1952bfec6d7fSVivian Wang "Can't get MAC address\n");
1953bfec6d7fSVivian Wang
1954bfec6d7fSVivian Wang dev_info(&pdev->dev, "Using random MAC address\n");
1955bfec6d7fSVivian Wang eth_hw_addr_random(priv->ndev);
1956bfec6d7fSVivian Wang } else {
1957bfec6d7fSVivian Wang eth_hw_addr_set(priv->ndev, mac_addr);
1958bfec6d7fSVivian Wang }
1959bfec6d7fSVivian Wang
1960bfec6d7fSVivian Wang priv->tx_delay = 0;
1961bfec6d7fSVivian Wang priv->rx_delay = 0;
1962bfec6d7fSVivian Wang
1963bfec6d7fSVivian Wang of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
1964bfec6d7fSVivian Wang of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
1965bfec6d7fSVivian Wang
1966bfec6d7fSVivian Wang if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
1967bfec6d7fSVivian Wang dev_err(&pdev->dev,
1968bfec6d7fSVivian Wang "tx-internal-delay-ps too large: max %d, got %d",
1969bfec6d7fSVivian Wang EMAC_MAX_DELAY_PS, priv->tx_delay);
1970bfec6d7fSVivian Wang return -EINVAL;
1971bfec6d7fSVivian Wang }
1972bfec6d7fSVivian Wang
1973bfec6d7fSVivian Wang if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
1974bfec6d7fSVivian Wang dev_err(&pdev->dev,
1975bfec6d7fSVivian Wang "rx-internal-delay-ps too large: max %d, got %d",
1976bfec6d7fSVivian Wang EMAC_MAX_DELAY_PS, priv->rx_delay);
1977bfec6d7fSVivian Wang return -EINVAL;
1978bfec6d7fSVivian Wang }
1979bfec6d7fSVivian Wang
1980bfec6d7fSVivian Wang priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
1981bfec6d7fSVivian Wang priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
1982bfec6d7fSVivian Wang
1983bfec6d7fSVivian Wang return 0;
1984bfec6d7fSVivian Wang }
1985bfec6d7fSVivian Wang
emac_phy_deregister_fixed_link(void * data)1986bfec6d7fSVivian Wang static void emac_phy_deregister_fixed_link(void *data)
1987bfec6d7fSVivian Wang {
1988bfec6d7fSVivian Wang struct device_node *of_node = data;
1989bfec6d7fSVivian Wang
1990bfec6d7fSVivian Wang of_phy_deregister_fixed_link(of_node);
1991bfec6d7fSVivian Wang }
1992bfec6d7fSVivian Wang
emac_probe(struct platform_device * pdev)1993bfec6d7fSVivian Wang static int emac_probe(struct platform_device *pdev)
1994bfec6d7fSVivian Wang {
1995bfec6d7fSVivian Wang struct device *dev = &pdev->dev;
1996bfec6d7fSVivian Wang struct reset_control *reset;
1997bfec6d7fSVivian Wang struct net_device *ndev;
1998bfec6d7fSVivian Wang struct emac_priv *priv;
1999bfec6d7fSVivian Wang int ret;
2000bfec6d7fSVivian Wang
2001bfec6d7fSVivian Wang ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
2002bfec6d7fSVivian Wang if (!ndev)
2003bfec6d7fSVivian Wang return -ENOMEM;
2004bfec6d7fSVivian Wang
2005bfec6d7fSVivian Wang ndev->hw_features = NETIF_F_SG;
2006bfec6d7fSVivian Wang ndev->features |= ndev->hw_features;
2007bfec6d7fSVivian Wang
2008bfec6d7fSVivian Wang ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
2009bfec6d7fSVivian Wang ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
2010bfec6d7fSVivian Wang
2011bfec6d7fSVivian Wang priv = netdev_priv(ndev);
2012bfec6d7fSVivian Wang priv->ndev = ndev;
2013bfec6d7fSVivian Wang priv->pdev = pdev;
2014bfec6d7fSVivian Wang platform_set_drvdata(pdev, priv);
2015bfec6d7fSVivian Wang
2016bfec6d7fSVivian Wang ret = emac_config_dt(pdev, priv);
2017bfec6d7fSVivian Wang if (ret < 0)
2018bfec6d7fSVivian Wang return dev_err_probe(dev, ret, "Configuration failed\n");
2019bfec6d7fSVivian Wang
2020bfec6d7fSVivian Wang ndev->watchdog_timeo = 5 * HZ;
2021bfec6d7fSVivian Wang ndev->base_addr = (unsigned long)priv->iobase;
2022bfec6d7fSVivian Wang ndev->irq = priv->irq;
2023bfec6d7fSVivian Wang
2024bfec6d7fSVivian Wang ndev->ethtool_ops = &emac_ethtool_ops;
2025bfec6d7fSVivian Wang ndev->netdev_ops = &emac_netdev_ops;
2026bfec6d7fSVivian Wang
2027bfec6d7fSVivian Wang devm_pm_runtime_enable(&pdev->dev);
2028bfec6d7fSVivian Wang
2029bfec6d7fSVivian Wang priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
2030bfec6d7fSVivian Wang if (IS_ERR(priv->bus_clk))
2031bfec6d7fSVivian Wang return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
2032bfec6d7fSVivian Wang "Failed to get clock\n");
2033bfec6d7fSVivian Wang
2034bfec6d7fSVivian Wang reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
2035bfec6d7fSVivian Wang NULL);
2036bfec6d7fSVivian Wang if (IS_ERR(reset))
2037bfec6d7fSVivian Wang return dev_err_probe(dev, PTR_ERR(reset),
2038bfec6d7fSVivian Wang "Failed to get reset\n");
2039bfec6d7fSVivian Wang
2040bfec6d7fSVivian Wang if (of_phy_is_fixed_link(dev->of_node)) {
2041bfec6d7fSVivian Wang ret = of_phy_register_fixed_link(dev->of_node);
2042bfec6d7fSVivian Wang if (ret)
2043bfec6d7fSVivian Wang return dev_err_probe(dev, ret,
2044bfec6d7fSVivian Wang "Failed to register fixed-link\n");
2045bfec6d7fSVivian Wang
2046bfec6d7fSVivian Wang ret = devm_add_action_or_reset(dev,
2047bfec6d7fSVivian Wang emac_phy_deregister_fixed_link,
2048bfec6d7fSVivian Wang dev->of_node);
2049bfec6d7fSVivian Wang
2050bfec6d7fSVivian Wang if (ret) {
2051bfec6d7fSVivian Wang dev_err(dev, "devm_add_action_or_reset failed\n");
2052bfec6d7fSVivian Wang return ret;
2053bfec6d7fSVivian Wang }
2054bfec6d7fSVivian Wang }
2055bfec6d7fSVivian Wang
2056bfec6d7fSVivian Wang emac_sw_init(priv);
2057bfec6d7fSVivian Wang
2058bfec6d7fSVivian Wang ret = emac_mdio_init(priv);
2059bfec6d7fSVivian Wang if (ret)
2060bfec6d7fSVivian Wang goto err_timer_delete;
2061bfec6d7fSVivian Wang
2062bfec6d7fSVivian Wang SET_NETDEV_DEV(ndev, &pdev->dev);
2063bfec6d7fSVivian Wang
2064bfec6d7fSVivian Wang ret = devm_register_netdev(dev, ndev);
2065bfec6d7fSVivian Wang if (ret) {
2066bfec6d7fSVivian Wang dev_err(dev, "devm_register_netdev failed\n");
2067bfec6d7fSVivian Wang goto err_timer_delete;
2068bfec6d7fSVivian Wang }
2069bfec6d7fSVivian Wang
2070bfec6d7fSVivian Wang netif_napi_add(ndev, &priv->napi, emac_rx_poll);
2071bfec6d7fSVivian Wang netif_carrier_off(ndev);
2072bfec6d7fSVivian Wang
2073bfec6d7fSVivian Wang return 0;
2074bfec6d7fSVivian Wang
2075bfec6d7fSVivian Wang err_timer_delete:
2076bfec6d7fSVivian Wang timer_delete_sync(&priv->txtimer);
2077bfec6d7fSVivian Wang timer_delete_sync(&priv->stats_timer);
2078bfec6d7fSVivian Wang
2079bfec6d7fSVivian Wang return ret;
2080bfec6d7fSVivian Wang }
2081bfec6d7fSVivian Wang
emac_remove(struct platform_device * pdev)2082bfec6d7fSVivian Wang static void emac_remove(struct platform_device *pdev)
2083bfec6d7fSVivian Wang {
2084bfec6d7fSVivian Wang struct emac_priv *priv = platform_get_drvdata(pdev);
2085bfec6d7fSVivian Wang
2086bfec6d7fSVivian Wang timer_shutdown_sync(&priv->txtimer);
2087bfec6d7fSVivian Wang cancel_work_sync(&priv->tx_timeout_task);
2088bfec6d7fSVivian Wang
2089bfec6d7fSVivian Wang timer_shutdown_sync(&priv->stats_timer);
2090bfec6d7fSVivian Wang
2091bfec6d7fSVivian Wang emac_reset_hw(priv);
2092bfec6d7fSVivian Wang }
2093bfec6d7fSVivian Wang
emac_resume(struct device * dev)2094bfec6d7fSVivian Wang static int emac_resume(struct device *dev)
2095bfec6d7fSVivian Wang {
2096bfec6d7fSVivian Wang struct emac_priv *priv = dev_get_drvdata(dev);
2097bfec6d7fSVivian Wang struct net_device *ndev = priv->ndev;
2098bfec6d7fSVivian Wang int ret;
2099bfec6d7fSVivian Wang
2100bfec6d7fSVivian Wang ret = clk_prepare_enable(priv->bus_clk);
2101bfec6d7fSVivian Wang if (ret < 0) {
2102bfec6d7fSVivian Wang dev_err(dev, "Failed to enable bus clock: %d\n", ret);
2103bfec6d7fSVivian Wang return ret;
2104bfec6d7fSVivian Wang }
2105bfec6d7fSVivian Wang
2106bfec6d7fSVivian Wang if (!netif_running(ndev))
2107bfec6d7fSVivian Wang return 0;
2108bfec6d7fSVivian Wang
2109bfec6d7fSVivian Wang ret = emac_open(ndev);
2110bfec6d7fSVivian Wang if (ret) {
2111bfec6d7fSVivian Wang clk_disable_unprepare(priv->bus_clk);
2112bfec6d7fSVivian Wang return ret;
2113bfec6d7fSVivian Wang }
2114bfec6d7fSVivian Wang
2115bfec6d7fSVivian Wang netif_device_attach(ndev);
2116bfec6d7fSVivian Wang
211735626012SVivian Wang mod_timer(&priv->stats_timer, jiffies);
2118bfec6d7fSVivian Wang
2119bfec6d7fSVivian Wang return 0;
2120bfec6d7fSVivian Wang }
2121bfec6d7fSVivian Wang
emac_suspend(struct device * dev)2122bfec6d7fSVivian Wang static int emac_suspend(struct device *dev)
2123bfec6d7fSVivian Wang {
2124bfec6d7fSVivian Wang struct emac_priv *priv = dev_get_drvdata(dev);
2125bfec6d7fSVivian Wang struct net_device *ndev = priv->ndev;
2126bfec6d7fSVivian Wang
2127bfec6d7fSVivian Wang if (!ndev || !netif_running(ndev)) {
2128bfec6d7fSVivian Wang clk_disable_unprepare(priv->bus_clk);
2129bfec6d7fSVivian Wang return 0;
2130bfec6d7fSVivian Wang }
2131bfec6d7fSVivian Wang
2132bfec6d7fSVivian Wang emac_stop(ndev);
2133bfec6d7fSVivian Wang
2134bfec6d7fSVivian Wang clk_disable_unprepare(priv->bus_clk);
2135bfec6d7fSVivian Wang netif_device_detach(ndev);
2136bfec6d7fSVivian Wang return 0;
2137bfec6d7fSVivian Wang }
2138bfec6d7fSVivian Wang
2139bfec6d7fSVivian Wang static const struct dev_pm_ops emac_pm_ops = {
2140bfec6d7fSVivian Wang SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
2141bfec6d7fSVivian Wang };
2142bfec6d7fSVivian Wang
2143bfec6d7fSVivian Wang static const struct of_device_id emac_of_match[] = {
2144bfec6d7fSVivian Wang { .compatible = "spacemit,k1-emac" },
2145bfec6d7fSVivian Wang { /* sentinel */ },
2146bfec6d7fSVivian Wang };
2147bfec6d7fSVivian Wang MODULE_DEVICE_TABLE(of, emac_of_match);
2148bfec6d7fSVivian Wang
2149bfec6d7fSVivian Wang static struct platform_driver emac_driver = {
2150bfec6d7fSVivian Wang .probe = emac_probe,
2151bfec6d7fSVivian Wang .remove = emac_remove,
2152bfec6d7fSVivian Wang .driver = {
2153bfec6d7fSVivian Wang .name = DRIVER_NAME,
2154bfec6d7fSVivian Wang .of_match_table = of_match_ptr(emac_of_match),
2155bfec6d7fSVivian Wang .pm = &emac_pm_ops,
2156bfec6d7fSVivian Wang },
2157bfec6d7fSVivian Wang };
2158bfec6d7fSVivian Wang module_platform_driver(emac_driver);
2159bfec6d7fSVivian Wang
2160bfec6d7fSVivian Wang MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
2161bfec6d7fSVivian Wang MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
2162bfec6d7fSVivian Wang MODULE_LICENSE("GPL");
2163