xref: /linux/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c (revision 151ebcf0797b1a3ba53c8843dc21748c80e098c7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Linaro Ltd.
3  * Copyright (c) 2014 Hisilicon Limited.
4  */
5 
6 #include <linux/module.h>
7 #include <linux/interrupt.h>
8 #include <linux/etherdevice.h>
9 #include <linux/platform_device.h>
10 #include <linux/property.h>
11 #include <linux/of.h>
12 #include <linux/of_net.h>
13 #include <linux/of_mdio.h>
14 #include <linux/reset.h>
15 #include <linux/clk.h>
16 #include <linux/circ_buf.h>
17 
18 #define STATION_ADDR_LOW		0x0000
19 #define STATION_ADDR_HIGH		0x0004
20 #define MAC_DUPLEX_HALF_CTRL		0x0008
21 #define MAX_FRM_SIZE			0x003c
22 #define PORT_MODE			0x0040
23 #define PORT_EN				0x0044
24 #define BITS_TX_EN			BIT(2)
25 #define BITS_RX_EN			BIT(1)
26 #define REC_FILT_CONTROL		0x0064
27 #define BIT_CRC_ERR_PASS		BIT(5)
28 #define BIT_PAUSE_FRM_PASS		BIT(4)
29 #define BIT_VLAN_DROP_EN		BIT(3)
30 #define BIT_BC_DROP_EN			BIT(2)
31 #define BIT_MC_MATCH_EN			BIT(1)
32 #define BIT_UC_MATCH_EN			BIT(0)
33 #define PORT_MC_ADDR_LOW		0x0068
34 #define PORT_MC_ADDR_HIGH		0x006C
35 #define CF_CRC_STRIP			0x01b0
36 #define MODE_CHANGE_EN			0x01b4
37 #define BIT_MODE_CHANGE_EN		BIT(0)
38 #define COL_SLOT_TIME			0x01c0
39 #define RECV_CONTROL			0x01e0
40 #define BIT_STRIP_PAD_EN		BIT(3)
41 #define BIT_RUNT_PKT_EN			BIT(4)
42 #define CONTROL_WORD			0x0214
43 #define MDIO_SINGLE_CMD			0x03c0
44 #define MDIO_SINGLE_DATA		0x03c4
45 #define MDIO_CTRL			0x03cc
46 #define MDIO_RDATA_STATUS		0x03d0
47 
48 #define MDIO_START			BIT(20)
49 #define MDIO_R_VALID			BIT(0)
50 #define MDIO_READ			(BIT(17) | MDIO_START)
51 #define MDIO_WRITE			(BIT(16) | MDIO_START)
52 
53 #define RX_FQ_START_ADDR		0x0500
54 #define RX_FQ_DEPTH			0x0504
55 #define RX_FQ_WR_ADDR			0x0508
56 #define RX_FQ_RD_ADDR			0x050c
57 #define RX_FQ_VLDDESC_CNT		0x0510
58 #define RX_FQ_ALEMPTY_TH		0x0514
59 #define RX_FQ_REG_EN			0x0518
60 #define BITS_RX_FQ_START_ADDR_EN	BIT(2)
61 #define BITS_RX_FQ_DEPTH_EN		BIT(1)
62 #define BITS_RX_FQ_RD_ADDR_EN		BIT(0)
63 #define RX_FQ_ALFULL_TH			0x051c
64 #define RX_BQ_START_ADDR		0x0520
65 #define RX_BQ_DEPTH			0x0524
66 #define RX_BQ_WR_ADDR			0x0528
67 #define RX_BQ_RD_ADDR			0x052c
68 #define RX_BQ_FREE_DESC_CNT		0x0530
69 #define RX_BQ_ALEMPTY_TH		0x0534
70 #define RX_BQ_REG_EN			0x0538
71 #define BITS_RX_BQ_START_ADDR_EN	BIT(2)
72 #define BITS_RX_BQ_DEPTH_EN		BIT(1)
73 #define BITS_RX_BQ_WR_ADDR_EN		BIT(0)
74 #define RX_BQ_ALFULL_TH			0x053c
75 #define TX_BQ_START_ADDR		0x0580
76 #define TX_BQ_DEPTH			0x0584
77 #define TX_BQ_WR_ADDR			0x0588
78 #define TX_BQ_RD_ADDR			0x058c
79 #define TX_BQ_VLDDESC_CNT		0x0590
80 #define TX_BQ_ALEMPTY_TH		0x0594
81 #define TX_BQ_REG_EN			0x0598
82 #define BITS_TX_BQ_START_ADDR_EN	BIT(2)
83 #define BITS_TX_BQ_DEPTH_EN		BIT(1)
84 #define BITS_TX_BQ_RD_ADDR_EN		BIT(0)
85 #define TX_BQ_ALFULL_TH			0x059c
86 #define TX_RQ_START_ADDR		0x05a0
87 #define TX_RQ_DEPTH			0x05a4
88 #define TX_RQ_WR_ADDR			0x05a8
89 #define TX_RQ_RD_ADDR			0x05ac
90 #define TX_RQ_FREE_DESC_CNT		0x05b0
91 #define TX_RQ_ALEMPTY_TH		0x05b4
92 #define TX_RQ_REG_EN			0x05b8
93 #define BITS_TX_RQ_START_ADDR_EN	BIT(2)
94 #define BITS_TX_RQ_DEPTH_EN		BIT(1)
95 #define BITS_TX_RQ_WR_ADDR_EN		BIT(0)
96 #define TX_RQ_ALFULL_TH			0x05bc
97 #define RAW_PMU_INT			0x05c0
98 #define ENA_PMU_INT			0x05c4
99 #define STATUS_PMU_INT			0x05c8
100 #define MAC_FIFO_ERR_IN			BIT(30)
101 #define TX_RQ_IN_TIMEOUT_INT		BIT(29)
102 #define RX_BQ_IN_TIMEOUT_INT		BIT(28)
103 #define TXOUTCFF_FULL_INT		BIT(27)
104 #define TXOUTCFF_EMPTY_INT		BIT(26)
105 #define TXCFF_FULL_INT			BIT(25)
106 #define TXCFF_EMPTY_INT			BIT(24)
107 #define RXOUTCFF_FULL_INT		BIT(23)
108 #define RXOUTCFF_EMPTY_INT		BIT(22)
109 #define RXCFF_FULL_INT			BIT(21)
110 #define RXCFF_EMPTY_INT			BIT(20)
111 #define TX_RQ_IN_INT			BIT(19)
112 #define TX_BQ_OUT_INT			BIT(18)
113 #define RX_BQ_IN_INT			BIT(17)
114 #define RX_FQ_OUT_INT			BIT(16)
115 #define TX_RQ_EMPTY_INT			BIT(15)
116 #define TX_RQ_FULL_INT			BIT(14)
117 #define TX_RQ_ALEMPTY_INT		BIT(13)
118 #define TX_RQ_ALFULL_INT		BIT(12)
119 #define TX_BQ_EMPTY_INT			BIT(11)
120 #define TX_BQ_FULL_INT			BIT(10)
121 #define TX_BQ_ALEMPTY_INT		BIT(9)
122 #define TX_BQ_ALFULL_INT		BIT(8)
123 #define RX_BQ_EMPTY_INT			BIT(7)
124 #define RX_BQ_FULL_INT			BIT(6)
125 #define RX_BQ_ALEMPTY_INT		BIT(5)
126 #define RX_BQ_ALFULL_INT		BIT(4)
127 #define RX_FQ_EMPTY_INT			BIT(3)
128 #define RX_FQ_FULL_INT			BIT(2)
129 #define RX_FQ_ALEMPTY_INT		BIT(1)
130 #define RX_FQ_ALFULL_INT		BIT(0)
131 
132 #define DEF_INT_MASK			(RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
133 					TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
134 
135 #define DESC_WR_RD_ENA			0x05cc
136 #define IN_QUEUE_TH			0x05d8
137 #define OUT_QUEUE_TH			0x05dc
138 #define QUEUE_TX_BQ_SHIFT		16
139 #define RX_BQ_IN_TIMEOUT_TH		0x05e0
140 #define TX_RQ_IN_TIMEOUT_TH		0x05e4
141 #define STOP_CMD			0x05e8
142 #define BITS_TX_STOP			BIT(1)
143 #define BITS_RX_STOP			BIT(0)
144 #define FLUSH_CMD			0x05eC
145 #define BITS_TX_FLUSH_CMD		BIT(5)
146 #define BITS_RX_FLUSH_CMD		BIT(4)
147 #define BITS_TX_FLUSH_FLAG_DOWN		BIT(3)
148 #define BITS_TX_FLUSH_FLAG_UP		BIT(2)
149 #define BITS_RX_FLUSH_FLAG_DOWN		BIT(1)
150 #define BITS_RX_FLUSH_FLAG_UP		BIT(0)
151 #define RX_CFF_NUM_REG			0x05f0
152 #define PMU_FSM_REG			0x05f8
153 #define RX_FIFO_PKT_IN_NUM		0x05fc
154 #define RX_FIFO_PKT_OUT_NUM		0x0600
155 
156 #define RGMII_SPEED_1000		0x2c
157 #define RGMII_SPEED_100			0x2f
158 #define RGMII_SPEED_10			0x2d
159 #define MII_SPEED_100			0x0f
160 #define MII_SPEED_10			0x0d
161 #define GMAC_SPEED_1000			0x05
162 #define GMAC_SPEED_100			0x01
163 #define GMAC_SPEED_10			0x00
164 #define GMAC_FULL_DUPLEX		BIT(4)
165 
166 #define RX_BQ_INT_THRESHOLD		0x01
167 #define TX_RQ_INT_THRESHOLD		0x01
168 #define RX_BQ_IN_TIMEOUT		0x10000
169 #define TX_RQ_IN_TIMEOUT		0x50000
170 
171 #define MAC_MAX_FRAME_SIZE		1600
172 #define DESC_SIZE			32
173 #define RX_DESC_NUM			1024
174 #define TX_DESC_NUM			1024
175 
176 #define DESC_VLD_FREE			0
177 #define DESC_VLD_BUSY			0x80000000
178 #define DESC_FL_MID			0
179 #define DESC_FL_LAST			0x20000000
180 #define DESC_FL_FIRST			0x40000000
181 #define DESC_FL_FULL			0x60000000
182 #define DESC_DATA_LEN_OFF		16
183 #define DESC_BUFF_LEN_OFF		0
184 #define DESC_DATA_MASK			0x7ff
185 #define DESC_SG				BIT(30)
186 #define DESC_FRAGS_NUM_OFF		11
187 
188 /* DMA descriptor ring helpers */
189 #define dma_ring_incr(n, s)		(((n) + 1) & ((s) - 1))
190 #define dma_cnt(n)			((n) >> 5)
191 #define dma_byte(n)			((n) << 5)
192 
193 #define HW_CAP_TSO			BIT(0)
194 #define GEMAC_V1			0
195 #define GEMAC_V2			(GEMAC_V1 | HW_CAP_TSO)
196 #define HAS_CAP_TSO(hw_cap)		((hw_cap) & HW_CAP_TSO)
197 
198 #define PHY_RESET_DELAYS_PROPERTY	"hisilicon,phy-reset-delays-us"
199 
200 enum phy_reset_delays {
201 	PRE_DELAY,
202 	PULSE,
203 	POST_DELAY,
204 	DELAYS_NUM,
205 };
206 
207 struct hix5hd2_desc {
208 	__le32 buff_addr;
209 	__le32 cmd;
210 } __aligned(32);
211 
212 struct hix5hd2_desc_sw {
213 	struct hix5hd2_desc *desc;
214 	dma_addr_t	phys_addr;
215 	unsigned int	count;
216 	unsigned int	size;
217 };
218 
219 struct hix5hd2_sg_desc_ring {
220 	struct sg_desc *desc;
221 	dma_addr_t phys_addr;
222 };
223 
224 struct frags_info {
225 	__le32 addr;
226 	__le32 size;
227 };
228 
229 /* hardware supported max skb frags num */
230 #define SG_MAX_SKB_FRAGS	17
231 struct sg_desc {
232 	__le32 total_len;
233 	__le32 resvd0;
234 	__le32 linear_addr;
235 	__le32 linear_len;
236 	/* reserve one more frags for memory alignment */
237 	struct frags_info frags[SG_MAX_SKB_FRAGS + 1];
238 };
239 
240 #define QUEUE_NUMS	4
241 struct hix5hd2_priv {
242 	struct hix5hd2_desc_sw pool[QUEUE_NUMS];
243 #define rx_fq		pool[0]
244 #define rx_bq		pool[1]
245 #define tx_bq		pool[2]
246 #define tx_rq		pool[3]
247 	struct hix5hd2_sg_desc_ring tx_ring;
248 
249 	void __iomem *base;
250 	void __iomem *ctrl_base;
251 
252 	struct sk_buff *tx_skb[TX_DESC_NUM];
253 	struct sk_buff *rx_skb[RX_DESC_NUM];
254 
255 	struct device *dev;
256 	struct net_device *netdev;
257 
258 	struct device_node *phy_node;
259 	phy_interface_t	phy_mode;
260 
261 	unsigned long hw_cap;
262 	unsigned int speed;
263 	unsigned int duplex;
264 
265 	struct clk *mac_core_clk;
266 	struct clk *mac_ifc_clk;
267 	struct reset_control *mac_core_rst;
268 	struct reset_control *mac_ifc_rst;
269 	struct reset_control *phy_rst;
270 	u32 phy_reset_delays[DELAYS_NUM];
271 	struct mii_bus *bus;
272 	struct napi_struct napi;
273 	struct work_struct tx_timeout_task;
274 };
275 
276 static inline void hix5hd2_mac_interface_reset(struct hix5hd2_priv *priv)
277 {
278 	if (!priv->mac_ifc_rst)
279 		return;
280 
281 	reset_control_assert(priv->mac_ifc_rst);
282 	reset_control_deassert(priv->mac_ifc_rst);
283 }
284 
285 static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
286 {
287 	struct hix5hd2_priv *priv = netdev_priv(dev);
288 	u32 val;
289 
290 	priv->speed = speed;
291 	priv->duplex = duplex;
292 
293 	switch (priv->phy_mode) {
294 	case PHY_INTERFACE_MODE_RGMII:
295 		if (speed == SPEED_1000)
296 			val = RGMII_SPEED_1000;
297 		else if (speed == SPEED_100)
298 			val = RGMII_SPEED_100;
299 		else
300 			val = RGMII_SPEED_10;
301 		break;
302 	case PHY_INTERFACE_MODE_MII:
303 		if (speed == SPEED_100)
304 			val = MII_SPEED_100;
305 		else
306 			val = MII_SPEED_10;
307 		break;
308 	default:
309 		netdev_warn(dev, "not supported mode\n");
310 		val = MII_SPEED_10;
311 		break;
312 	}
313 
314 	if (duplex)
315 		val |= GMAC_FULL_DUPLEX;
316 	writel_relaxed(val, priv->ctrl_base);
317 	hix5hd2_mac_interface_reset(priv);
318 
319 	writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
320 	if (speed == SPEED_1000)
321 		val = GMAC_SPEED_1000;
322 	else if (speed == SPEED_100)
323 		val = GMAC_SPEED_100;
324 	else
325 		val = GMAC_SPEED_10;
326 	writel_relaxed(val, priv->base + PORT_MODE);
327 	writel_relaxed(0, priv->base + MODE_CHANGE_EN);
328 	writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
329 }
330 
331 static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
332 {
333 	writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
334 	writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
335 	writel_relaxed(0, priv->base + RX_FQ_REG_EN);
336 
337 	writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
338 	writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
339 	writel_relaxed(0, priv->base + RX_BQ_REG_EN);
340 
341 	writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
342 	writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
343 	writel_relaxed(0, priv->base + TX_BQ_REG_EN);
344 
345 	writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
346 	writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
347 	writel_relaxed(0, priv->base + TX_RQ_REG_EN);
348 }
349 
350 static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
351 {
352 	writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
353 	writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
354 	writel_relaxed(0, priv->base + RX_FQ_REG_EN);
355 }
356 
357 static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
358 {
359 	writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
360 	writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
361 	writel_relaxed(0, priv->base + RX_BQ_REG_EN);
362 }
363 
364 static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
365 {
366 	writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
367 	writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
368 	writel_relaxed(0, priv->base + TX_BQ_REG_EN);
369 }
370 
371 static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
372 {
373 	writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
374 	writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
375 	writel_relaxed(0, priv->base + TX_RQ_REG_EN);
376 }
377 
378 static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
379 {
380 	hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
381 	hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
382 	hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
383 	hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
384 }
385 
386 static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
387 {
388 	u32 val;
389 
390 	/* disable and clear all interrupts */
391 	writel_relaxed(0, priv->base + ENA_PMU_INT);
392 	writel_relaxed(~0, priv->base + RAW_PMU_INT);
393 
394 	writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
395 	writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
396 	writel_relaxed(0, priv->base + COL_SLOT_TIME);
397 
398 	val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
399 	writel_relaxed(val, priv->base + IN_QUEUE_TH);
400 
401 	writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
402 	writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
403 
404 	hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
405 	hix5hd2_set_desc_addr(priv);
406 }
407 
408 static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
409 {
410 	writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
411 }
412 
413 static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
414 {
415 	writel_relaxed(0, priv->base + ENA_PMU_INT);
416 }
417 
418 static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
419 {
420 	writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
421 	writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
422 }
423 
424 static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
425 {
426 	writel_relaxed(~(u32)(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
427 	writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
428 }
429 
430 static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
431 {
432 	struct hix5hd2_priv *priv = netdev_priv(dev);
433 	const unsigned char *mac = dev->dev_addr;
434 	u32 val;
435 
436 	val = mac[1] | (mac[0] << 8);
437 	writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
438 
439 	val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
440 	writel_relaxed(val, priv->base + STATION_ADDR_LOW);
441 }
442 
443 static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
444 {
445 	int ret;
446 
447 	ret = eth_mac_addr(dev, p);
448 	if (!ret)
449 		hix5hd2_hw_set_mac_addr(dev);
450 
451 	return ret;
452 }
453 
454 static void hix5hd2_adjust_link(struct net_device *dev)
455 {
456 	struct hix5hd2_priv *priv = netdev_priv(dev);
457 	struct phy_device *phy = dev->phydev;
458 
459 	if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
460 		hix5hd2_config_port(dev, phy->speed, phy->duplex);
461 		phy_print_status(phy);
462 	}
463 }
464 
465 static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
466 {
467 	struct hix5hd2_desc *desc;
468 	struct sk_buff *skb;
469 	u32 start, end, num, pos, i;
470 	u32 len = MAC_MAX_FRAME_SIZE;
471 	dma_addr_t addr;
472 
473 	/* software write pointer */
474 	start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
475 	/* logic read pointer */
476 	end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
477 	num = CIRC_SPACE(start, end, RX_DESC_NUM);
478 
479 	for (i = 0, pos = start; i < num; i++) {
480 		if (priv->rx_skb[pos]) {
481 			break;
482 		} else {
483 			skb = netdev_alloc_skb_ip_align(priv->netdev, len);
484 			if (unlikely(skb == NULL))
485 				break;
486 		}
487 
488 		addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
489 		if (dma_mapping_error(priv->dev, addr)) {
490 			dev_kfree_skb_any(skb);
491 			break;
492 		}
493 
494 		desc = priv->rx_fq.desc + pos;
495 		desc->buff_addr = cpu_to_le32(addr);
496 		priv->rx_skb[pos] = skb;
497 		desc->cmd = cpu_to_le32(DESC_VLD_FREE |
498 					(len - 1) << DESC_BUFF_LEN_OFF);
499 		pos = dma_ring_incr(pos, RX_DESC_NUM);
500 	}
501 
502 	/* ensure desc updated */
503 	wmb();
504 
505 	if (pos != start)
506 		writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
507 }
508 
509 static int hix5hd2_rx(struct net_device *dev, int limit)
510 {
511 	struct hix5hd2_priv *priv = netdev_priv(dev);
512 	struct sk_buff *skb;
513 	struct hix5hd2_desc *desc;
514 	dma_addr_t addr;
515 	u32 start, end, num, pos, i, len;
516 
517 	/* software read pointer */
518 	start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
519 	/* logic write pointer */
520 	end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
521 	num = CIRC_CNT(end, start, RX_DESC_NUM);
522 	if (num > limit)
523 		num = limit;
524 
525 	/* ensure get updated desc */
526 	rmb();
527 	for (i = 0, pos = start; i < num; i++) {
528 		skb = priv->rx_skb[pos];
529 		if (unlikely(!skb)) {
530 			netdev_err(dev, "inconsistent rx_skb\n");
531 			break;
532 		}
533 		priv->rx_skb[pos] = NULL;
534 
535 		desc = priv->rx_bq.desc + pos;
536 		len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
537 		       DESC_DATA_MASK;
538 		addr = le32_to_cpu(desc->buff_addr);
539 		dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
540 				 DMA_FROM_DEVICE);
541 
542 		skb_put(skb, len);
543 		if (skb->len > MAC_MAX_FRAME_SIZE) {
544 			netdev_err(dev, "rcv len err, len = %d\n", skb->len);
545 			dev->stats.rx_errors++;
546 			dev->stats.rx_length_errors++;
547 			dev_kfree_skb_any(skb);
548 			goto next;
549 		}
550 
551 		skb->protocol = eth_type_trans(skb, dev);
552 		napi_gro_receive(&priv->napi, skb);
553 		dev->stats.rx_packets++;
554 		dev->stats.rx_bytes += len;
555 next:
556 		pos = dma_ring_incr(pos, RX_DESC_NUM);
557 	}
558 
559 	if (pos != start)
560 		writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
561 
562 	hix5hd2_rx_refill(priv);
563 
564 	return num;
565 }
566 
567 static void hix5hd2_clean_sg_desc(struct hix5hd2_priv *priv,
568 				  struct sk_buff *skb, u32 pos)
569 {
570 	struct sg_desc *desc;
571 	dma_addr_t addr;
572 	u32 len;
573 	int i;
574 
575 	desc = priv->tx_ring.desc + pos;
576 
577 	addr = le32_to_cpu(desc->linear_addr);
578 	len = le32_to_cpu(desc->linear_len);
579 	dma_unmap_single(priv->dev, addr, len, DMA_TO_DEVICE);
580 
581 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
582 		addr = le32_to_cpu(desc->frags[i].addr);
583 		len = le32_to_cpu(desc->frags[i].size);
584 		dma_unmap_page(priv->dev, addr, len, DMA_TO_DEVICE);
585 	}
586 }
587 
588 static void hix5hd2_xmit_reclaim(struct net_device *dev)
589 {
590 	struct sk_buff *skb;
591 	struct hix5hd2_desc *desc;
592 	struct hix5hd2_priv *priv = netdev_priv(dev);
593 	unsigned int bytes_compl = 0, pkts_compl = 0;
594 	u32 start, end, num, pos, i;
595 	dma_addr_t addr;
596 
597 	netif_tx_lock(dev);
598 
599 	/* software read */
600 	start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
601 	/* logic write */
602 	end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
603 	num = CIRC_CNT(end, start, TX_DESC_NUM);
604 
605 	for (i = 0, pos = start; i < num; i++) {
606 		skb = priv->tx_skb[pos];
607 		if (unlikely(!skb)) {
608 			netdev_err(dev, "inconsistent tx_skb\n");
609 			break;
610 		}
611 
612 		pkts_compl++;
613 		bytes_compl += skb->len;
614 		desc = priv->tx_rq.desc + pos;
615 
616 		if (skb_shinfo(skb)->nr_frags) {
617 			hix5hd2_clean_sg_desc(priv, skb, pos);
618 		} else {
619 			addr = le32_to_cpu(desc->buff_addr);
620 			dma_unmap_single(priv->dev, addr, skb->len,
621 					 DMA_TO_DEVICE);
622 		}
623 
624 		priv->tx_skb[pos] = NULL;
625 		dev_consume_skb_any(skb);
626 		pos = dma_ring_incr(pos, TX_DESC_NUM);
627 	}
628 
629 	if (pos != start)
630 		writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
631 
632 	netif_tx_unlock(dev);
633 
634 	if (pkts_compl || bytes_compl)
635 		netdev_completed_queue(dev, pkts_compl, bytes_compl);
636 
637 	if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
638 		netif_wake_queue(priv->netdev);
639 }
640 
641 static int hix5hd2_poll(struct napi_struct *napi, int budget)
642 {
643 	struct hix5hd2_priv *priv = container_of(napi,
644 				struct hix5hd2_priv, napi);
645 	struct net_device *dev = priv->netdev;
646 	int work_done = 0, task = budget;
647 	int ints, num;
648 
649 	do {
650 		hix5hd2_xmit_reclaim(dev);
651 		num = hix5hd2_rx(dev, task);
652 		work_done += num;
653 		task -= num;
654 		if ((work_done >= budget) || (num == 0))
655 			break;
656 
657 		ints = readl_relaxed(priv->base + RAW_PMU_INT);
658 		writel_relaxed(ints, priv->base + RAW_PMU_INT);
659 	} while (ints & DEF_INT_MASK);
660 
661 	if (work_done < budget) {
662 		napi_complete_done(napi, work_done);
663 		hix5hd2_irq_enable(priv);
664 	}
665 
666 	return work_done;
667 }
668 
669 static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
670 {
671 	struct net_device *dev = (struct net_device *)dev_id;
672 	struct hix5hd2_priv *priv = netdev_priv(dev);
673 	int ints = readl_relaxed(priv->base + RAW_PMU_INT);
674 
675 	writel_relaxed(ints, priv->base + RAW_PMU_INT);
676 	if (likely(ints & DEF_INT_MASK)) {
677 		hix5hd2_irq_disable(priv);
678 		napi_schedule(&priv->napi);
679 	}
680 
681 	return IRQ_HANDLED;
682 }
683 
684 static u32 hix5hd2_get_desc_cmd(struct sk_buff *skb, unsigned long hw_cap)
685 {
686 	u32 cmd = 0;
687 
688 	if (HAS_CAP_TSO(hw_cap)) {
689 		if (skb_shinfo(skb)->nr_frags)
690 			cmd |= DESC_SG;
691 		cmd |= skb_shinfo(skb)->nr_frags << DESC_FRAGS_NUM_OFF;
692 	} else {
693 		cmd |= DESC_FL_FULL |
694 			((skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
695 	}
696 
697 	cmd |= (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF;
698 	cmd |= DESC_VLD_BUSY;
699 
700 	return cmd;
701 }
702 
703 static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
704 				struct sk_buff *skb, u32 pos)
705 {
706 	struct sg_desc *desc;
707 	dma_addr_t addr;
708 	int ret;
709 	int i;
710 
711 	desc = priv->tx_ring.desc + pos;
712 
713 	desc->total_len = cpu_to_le32(skb->len);
714 	addr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
715 			      DMA_TO_DEVICE);
716 	if (unlikely(dma_mapping_error(priv->dev, addr)))
717 		return -EINVAL;
718 	desc->linear_addr = cpu_to_le32(addr);
719 	desc->linear_len = cpu_to_le32(skb_headlen(skb));
720 
721 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
722 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
723 		int len = skb_frag_size(frag);
724 
725 		addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
726 		ret = dma_mapping_error(priv->dev, addr);
727 		if (unlikely(ret))
728 			return -EINVAL;
729 		desc->frags[i].addr = cpu_to_le32(addr);
730 		desc->frags[i].size = cpu_to_le32(len);
731 	}
732 
733 	return 0;
734 }
735 
736 static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
737 {
738 	struct hix5hd2_priv *priv = netdev_priv(dev);
739 	struct hix5hd2_desc *desc;
740 	dma_addr_t addr;
741 	u32 pos;
742 	u32 cmd;
743 	int ret;
744 
745 	/* software write pointer */
746 	pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
747 	if (unlikely(priv->tx_skb[pos])) {
748 		dev->stats.tx_dropped++;
749 		dev->stats.tx_fifo_errors++;
750 		netif_stop_queue(dev);
751 		return NETDEV_TX_BUSY;
752 	}
753 
754 	desc = priv->tx_bq.desc + pos;
755 
756 	cmd = hix5hd2_get_desc_cmd(skb, priv->hw_cap);
757 	desc->cmd = cpu_to_le32(cmd);
758 
759 	if (skb_shinfo(skb)->nr_frags) {
760 		ret = hix5hd2_fill_sg_desc(priv, skb, pos);
761 		if (unlikely(ret)) {
762 			dev_kfree_skb_any(skb);
763 			dev->stats.tx_dropped++;
764 			return NETDEV_TX_OK;
765 		}
766 		addr = priv->tx_ring.phys_addr + pos * sizeof(struct sg_desc);
767 	} else {
768 		addr = dma_map_single(priv->dev, skb->data, skb->len,
769 				      DMA_TO_DEVICE);
770 		if (unlikely(dma_mapping_error(priv->dev, addr))) {
771 			dev_kfree_skb_any(skb);
772 			dev->stats.tx_dropped++;
773 			return NETDEV_TX_OK;
774 		}
775 	}
776 	desc->buff_addr = cpu_to_le32(addr);
777 
778 	priv->tx_skb[pos] = skb;
779 
780 	/* ensure desc updated */
781 	wmb();
782 
783 	pos = dma_ring_incr(pos, TX_DESC_NUM);
784 	writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
785 
786 	netif_trans_update(dev);
787 	dev->stats.tx_packets++;
788 	dev->stats.tx_bytes += skb->len;
789 	netdev_sent_queue(dev, skb->len);
790 
791 	return NETDEV_TX_OK;
792 }
793 
794 static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
795 {
796 	struct hix5hd2_desc *desc;
797 	dma_addr_t addr;
798 	int i;
799 
800 	for (i = 0; i < RX_DESC_NUM; i++) {
801 		struct sk_buff *skb = priv->rx_skb[i];
802 		if (skb == NULL)
803 			continue;
804 
805 		desc = priv->rx_fq.desc + i;
806 		addr = le32_to_cpu(desc->buff_addr);
807 		dma_unmap_single(priv->dev, addr,
808 				 MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
809 		dev_kfree_skb_any(skb);
810 		priv->rx_skb[i] = NULL;
811 	}
812 
813 	for (i = 0; i < TX_DESC_NUM; i++) {
814 		struct sk_buff *skb = priv->tx_skb[i];
815 		if (skb == NULL)
816 			continue;
817 
818 		desc = priv->tx_rq.desc + i;
819 		addr = le32_to_cpu(desc->buff_addr);
820 		dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
821 		dev_kfree_skb_any(skb);
822 		priv->tx_skb[i] = NULL;
823 	}
824 }
825 
826 static int hix5hd2_net_open(struct net_device *dev)
827 {
828 	struct hix5hd2_priv *priv = netdev_priv(dev);
829 	struct phy_device *phy;
830 	int ret;
831 
832 	ret = clk_prepare_enable(priv->mac_core_clk);
833 	if (ret < 0) {
834 		netdev_err(dev, "failed to enable mac core clk %d\n", ret);
835 		return ret;
836 	}
837 
838 	ret = clk_prepare_enable(priv->mac_ifc_clk);
839 	if (ret < 0) {
840 		clk_disable_unprepare(priv->mac_core_clk);
841 		netdev_err(dev, "failed to enable mac ifc clk %d\n", ret);
842 		return ret;
843 	}
844 
845 	phy = of_phy_connect(dev, priv->phy_node,
846 			     &hix5hd2_adjust_link, 0, priv->phy_mode);
847 	if (!phy) {
848 		clk_disable_unprepare(priv->mac_ifc_clk);
849 		clk_disable_unprepare(priv->mac_core_clk);
850 		return -ENODEV;
851 	}
852 
853 	phy_start(phy);
854 	hix5hd2_hw_init(priv);
855 	hix5hd2_rx_refill(priv);
856 
857 	netdev_reset_queue(dev);
858 	netif_start_queue(dev);
859 	napi_enable(&priv->napi);
860 
861 	hix5hd2_port_enable(priv);
862 	hix5hd2_irq_enable(priv);
863 
864 	return 0;
865 }
866 
867 static int hix5hd2_net_close(struct net_device *dev)
868 {
869 	struct hix5hd2_priv *priv = netdev_priv(dev);
870 
871 	hix5hd2_port_disable(priv);
872 	hix5hd2_irq_disable(priv);
873 	napi_disable(&priv->napi);
874 	netif_stop_queue(dev);
875 	hix5hd2_free_dma_desc_rings(priv);
876 
877 	if (dev->phydev) {
878 		phy_stop(dev->phydev);
879 		phy_disconnect(dev->phydev);
880 	}
881 
882 	clk_disable_unprepare(priv->mac_ifc_clk);
883 	clk_disable_unprepare(priv->mac_core_clk);
884 
885 	return 0;
886 }
887 
888 static void hix5hd2_tx_timeout_task(struct work_struct *work)
889 {
890 	struct hix5hd2_priv *priv;
891 
892 	priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
893 	hix5hd2_net_close(priv->netdev);
894 	hix5hd2_net_open(priv->netdev);
895 }
896 
897 static void hix5hd2_net_timeout(struct net_device *dev, unsigned int txqueue)
898 {
899 	struct hix5hd2_priv *priv = netdev_priv(dev);
900 
901 	schedule_work(&priv->tx_timeout_task);
902 }
903 
904 static const struct net_device_ops hix5hd2_netdev_ops = {
905 	.ndo_open		= hix5hd2_net_open,
906 	.ndo_stop		= hix5hd2_net_close,
907 	.ndo_start_xmit		= hix5hd2_net_xmit,
908 	.ndo_tx_timeout		= hix5hd2_net_timeout,
909 	.ndo_set_mac_address	= hix5hd2_net_set_mac_address,
910 };
911 
912 static const struct ethtool_ops hix5hd2_ethtools_ops = {
913 	.get_link		= ethtool_op_get_link,
914 	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
915 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
916 };
917 
918 static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
919 {
920 	struct hix5hd2_priv *priv = bus->priv;
921 	void __iomem *base = priv->base;
922 	int i, timeout = 10000;
923 
924 	for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
925 		if (i == timeout)
926 			return -ETIMEDOUT;
927 		usleep_range(10, 20);
928 	}
929 
930 	return 0;
931 }
932 
933 static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
934 {
935 	struct hix5hd2_priv *priv = bus->priv;
936 	void __iomem *base = priv->base;
937 	int val, ret;
938 
939 	ret = hix5hd2_mdio_wait_ready(bus);
940 	if (ret < 0)
941 		goto out;
942 
943 	writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
944 	ret = hix5hd2_mdio_wait_ready(bus);
945 	if (ret < 0)
946 		goto out;
947 
948 	val = readl_relaxed(base + MDIO_RDATA_STATUS);
949 	if (val & MDIO_R_VALID) {
950 		dev_err(bus->parent, "SMI bus read not valid\n");
951 		ret = -ENODEV;
952 		goto out;
953 	}
954 
955 	val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
956 	ret = (val >> 16) & 0xFFFF;
957 out:
958 	return ret;
959 }
960 
961 static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
962 {
963 	struct hix5hd2_priv *priv = bus->priv;
964 	void __iomem *base = priv->base;
965 	int ret;
966 
967 	ret = hix5hd2_mdio_wait_ready(bus);
968 	if (ret < 0)
969 		goto out;
970 
971 	writel_relaxed(val, base + MDIO_SINGLE_DATA);
972 	writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
973 	ret = hix5hd2_mdio_wait_ready(bus);
974 out:
975 	return ret;
976 }
977 
978 static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
979 {
980 	int i;
981 
982 	for (i = 0; i < QUEUE_NUMS; i++) {
983 		if (priv->pool[i].desc) {
984 			dma_free_coherent(priv->dev, priv->pool[i].size,
985 					  priv->pool[i].desc,
986 					  priv->pool[i].phys_addr);
987 			priv->pool[i].desc = NULL;
988 		}
989 	}
990 }
991 
992 static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
993 {
994 	struct device *dev = priv->dev;
995 	struct hix5hd2_desc *virt_addr;
996 	dma_addr_t phys_addr;
997 	int size, i;
998 
999 	priv->rx_fq.count = RX_DESC_NUM;
1000 	priv->rx_bq.count = RX_DESC_NUM;
1001 	priv->tx_bq.count = TX_DESC_NUM;
1002 	priv->tx_rq.count = TX_DESC_NUM;
1003 
1004 	for (i = 0; i < QUEUE_NUMS; i++) {
1005 		size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
1006 		virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
1007 					       GFP_KERNEL);
1008 		if (virt_addr == NULL)
1009 			goto error_free_pool;
1010 
1011 		priv->pool[i].size = size;
1012 		priv->pool[i].desc = virt_addr;
1013 		priv->pool[i].phys_addr = phys_addr;
1014 	}
1015 	return 0;
1016 
1017 error_free_pool:
1018 	hix5hd2_destroy_hw_desc_queue(priv);
1019 
1020 	return -ENOMEM;
1021 }
1022 
1023 static int hix5hd2_init_sg_desc_queue(struct hix5hd2_priv *priv)
1024 {
1025 	struct sg_desc *desc;
1026 	dma_addr_t phys_addr;
1027 
1028 	desc = dma_alloc_coherent(priv->dev,
1029 				  TX_DESC_NUM * sizeof(struct sg_desc),
1030 				  &phys_addr, GFP_KERNEL);
1031 	if (!desc)
1032 		return -ENOMEM;
1033 
1034 	priv->tx_ring.desc = desc;
1035 	priv->tx_ring.phys_addr = phys_addr;
1036 
1037 	return 0;
1038 }
1039 
1040 static void hix5hd2_destroy_sg_desc_queue(struct hix5hd2_priv *priv)
1041 {
1042 	if (priv->tx_ring.desc) {
1043 		dma_free_coherent(priv->dev,
1044 				  TX_DESC_NUM * sizeof(struct sg_desc),
1045 				  priv->tx_ring.desc, priv->tx_ring.phys_addr);
1046 		priv->tx_ring.desc = NULL;
1047 	}
1048 }
1049 
1050 static inline void hix5hd2_mac_core_reset(struct hix5hd2_priv *priv)
1051 {
1052 	if (!priv->mac_core_rst)
1053 		return;
1054 
1055 	reset_control_assert(priv->mac_core_rst);
1056 	reset_control_deassert(priv->mac_core_rst);
1057 }
1058 
1059 static void hix5hd2_sleep_us(u32 time_us)
1060 {
1061 	u32 time_ms;
1062 
1063 	if (!time_us)
1064 		return;
1065 
1066 	time_ms = DIV_ROUND_UP(time_us, 1000);
1067 	if (time_ms < 20)
1068 		usleep_range(time_us, time_us + 500);
1069 	else
1070 		msleep(time_ms);
1071 }
1072 
1073 static void hix5hd2_phy_reset(struct hix5hd2_priv *priv)
1074 {
1075 	/* To make sure PHY hardware reset success,
1076 	 * we must keep PHY in deassert state first and
1077 	 * then complete the hardware reset operation
1078 	 */
1079 	reset_control_deassert(priv->phy_rst);
1080 	hix5hd2_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
1081 
1082 	reset_control_assert(priv->phy_rst);
1083 	/* delay some time to ensure reset ok,
1084 	 * this depends on PHY hardware feature
1085 	 */
1086 	hix5hd2_sleep_us(priv->phy_reset_delays[PULSE]);
1087 	reset_control_deassert(priv->phy_rst);
1088 	/* delay some time to ensure later MDIO access */
1089 	hix5hd2_sleep_us(priv->phy_reset_delays[POST_DELAY]);
1090 }
1091 
1092 static const struct of_device_id hix5hd2_of_match[];
1093 
1094 static int hix5hd2_dev_probe(struct platform_device *pdev)
1095 {
1096 	struct device *dev = &pdev->dev;
1097 	struct device_node *node = dev->of_node;
1098 	struct net_device *ndev;
1099 	struct hix5hd2_priv *priv;
1100 	struct mii_bus *bus;
1101 	int ret;
1102 
1103 	ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
1104 	if (!ndev)
1105 		return -ENOMEM;
1106 
1107 	platform_set_drvdata(pdev, ndev);
1108 
1109 	priv = netdev_priv(ndev);
1110 	priv->dev = dev;
1111 	priv->netdev = ndev;
1112 
1113 	priv->hw_cap = (unsigned long)device_get_match_data(dev);
1114 
1115 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1116 	if (IS_ERR(priv->base)) {
1117 		ret = PTR_ERR(priv->base);
1118 		goto out_free_netdev;
1119 	}
1120 
1121 	priv->ctrl_base = devm_platform_ioremap_resource(pdev, 1);
1122 	if (IS_ERR(priv->ctrl_base)) {
1123 		ret = PTR_ERR(priv->ctrl_base);
1124 		goto out_free_netdev;
1125 	}
1126 
1127 	priv->mac_core_clk = devm_clk_get(&pdev->dev, "mac_core");
1128 	if (IS_ERR(priv->mac_core_clk)) {
1129 		netdev_err(ndev, "failed to get mac core clk\n");
1130 		ret = -ENODEV;
1131 		goto out_free_netdev;
1132 	}
1133 
1134 	ret = clk_prepare_enable(priv->mac_core_clk);
1135 	if (ret < 0) {
1136 		netdev_err(ndev, "failed to enable mac core clk %d\n", ret);
1137 		goto out_free_netdev;
1138 	}
1139 
1140 	priv->mac_ifc_clk = devm_clk_get(&pdev->dev, "mac_ifc");
1141 	if (IS_ERR(priv->mac_ifc_clk))
1142 		priv->mac_ifc_clk = NULL;
1143 
1144 	ret = clk_prepare_enable(priv->mac_ifc_clk);
1145 	if (ret < 0) {
1146 		netdev_err(ndev, "failed to enable mac ifc clk %d\n", ret);
1147 		goto out_disable_mac_core_clk;
1148 	}
1149 
1150 	priv->mac_core_rst = devm_reset_control_get(dev, "mac_core");
1151 	if (IS_ERR(priv->mac_core_rst))
1152 		priv->mac_core_rst = NULL;
1153 	hix5hd2_mac_core_reset(priv);
1154 
1155 	priv->mac_ifc_rst = devm_reset_control_get(dev, "mac_ifc");
1156 	if (IS_ERR(priv->mac_ifc_rst))
1157 		priv->mac_ifc_rst = NULL;
1158 
1159 	priv->phy_rst = devm_reset_control_get(dev, "phy");
1160 	if (IS_ERR(priv->phy_rst)) {
1161 		priv->phy_rst = NULL;
1162 	} else {
1163 		ret = of_property_read_u32_array(node,
1164 						 PHY_RESET_DELAYS_PROPERTY,
1165 						 priv->phy_reset_delays,
1166 						 DELAYS_NUM);
1167 		if (ret)
1168 			goto out_disable_clk;
1169 		hix5hd2_phy_reset(priv);
1170 	}
1171 
1172 	bus = mdiobus_alloc();
1173 	if (bus == NULL) {
1174 		ret = -ENOMEM;
1175 		goto out_disable_clk;
1176 	}
1177 
1178 	bus->priv = priv;
1179 	bus->name = "hix5hd2_mii_bus";
1180 	bus->read = hix5hd2_mdio_read;
1181 	bus->write = hix5hd2_mdio_write;
1182 	bus->parent = &pdev->dev;
1183 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
1184 	priv->bus = bus;
1185 
1186 	ret = of_mdiobus_register(bus, node);
1187 	if (ret)
1188 		goto err_free_mdio;
1189 
1190 	ret = of_get_phy_mode(node, &priv->phy_mode);
1191 	if (ret) {
1192 		netdev_err(ndev, "not find phy-mode\n");
1193 		goto err_mdiobus;
1194 	}
1195 
1196 	priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
1197 	if (!priv->phy_node) {
1198 		netdev_err(ndev, "not find phy-handle\n");
1199 		ret = -EINVAL;
1200 		goto err_mdiobus;
1201 	}
1202 
1203 	ndev->irq = platform_get_irq(pdev, 0);
1204 	if (ndev->irq < 0) {
1205 		ret = ndev->irq;
1206 		goto out_phy_node;
1207 	}
1208 
1209 	ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
1210 			       0, pdev->name, ndev);
1211 	if (ret) {
1212 		netdev_err(ndev, "devm_request_irq failed\n");
1213 		goto out_phy_node;
1214 	}
1215 
1216 	ret = of_get_ethdev_address(node, ndev);
1217 	if (ret) {
1218 		eth_hw_addr_random(ndev);
1219 		netdev_warn(ndev, "using random MAC address %pM\n",
1220 			    ndev->dev_addr);
1221 	}
1222 
1223 	INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
1224 	ndev->watchdog_timeo = 6 * HZ;
1225 	ndev->priv_flags |= IFF_UNICAST_FLT;
1226 	ndev->netdev_ops = &hix5hd2_netdev_ops;
1227 	ndev->ethtool_ops = &hix5hd2_ethtools_ops;
1228 	SET_NETDEV_DEV(ndev, dev);
1229 
1230 	if (HAS_CAP_TSO(priv->hw_cap))
1231 		ndev->hw_features |= NETIF_F_SG;
1232 
1233 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1234 	ndev->vlan_features |= ndev->features;
1235 
1236 	ret = hix5hd2_init_hw_desc_queue(priv);
1237 	if (ret)
1238 		goto out_phy_node;
1239 
1240 	netif_napi_add(ndev, &priv->napi, hix5hd2_poll);
1241 
1242 	if (HAS_CAP_TSO(priv->hw_cap)) {
1243 		ret = hix5hd2_init_sg_desc_queue(priv);
1244 		if (ret)
1245 			goto out_destroy_queue;
1246 	}
1247 
1248 	ret = register_netdev(priv->netdev);
1249 	if (ret) {
1250 		netdev_err(ndev, "register_netdev failed!");
1251 		goto out_destroy_queue;
1252 	}
1253 
1254 	clk_disable_unprepare(priv->mac_ifc_clk);
1255 	clk_disable_unprepare(priv->mac_core_clk);
1256 
1257 	return ret;
1258 
1259 out_destroy_queue:
1260 	if (HAS_CAP_TSO(priv->hw_cap))
1261 		hix5hd2_destroy_sg_desc_queue(priv);
1262 	netif_napi_del(&priv->napi);
1263 	hix5hd2_destroy_hw_desc_queue(priv);
1264 out_phy_node:
1265 	of_node_put(priv->phy_node);
1266 err_mdiobus:
1267 	mdiobus_unregister(bus);
1268 err_free_mdio:
1269 	mdiobus_free(bus);
1270 out_disable_clk:
1271 	clk_disable_unprepare(priv->mac_ifc_clk);
1272 out_disable_mac_core_clk:
1273 	clk_disable_unprepare(priv->mac_core_clk);
1274 out_free_netdev:
1275 	free_netdev(ndev);
1276 
1277 	return ret;
1278 }
1279 
1280 static void hix5hd2_dev_remove(struct platform_device *pdev)
1281 {
1282 	struct net_device *ndev = platform_get_drvdata(pdev);
1283 	struct hix5hd2_priv *priv = netdev_priv(ndev);
1284 
1285 	netif_napi_del(&priv->napi);
1286 	unregister_netdev(ndev);
1287 	mdiobus_unregister(priv->bus);
1288 	mdiobus_free(priv->bus);
1289 
1290 	if (HAS_CAP_TSO(priv->hw_cap))
1291 		hix5hd2_destroy_sg_desc_queue(priv);
1292 	hix5hd2_destroy_hw_desc_queue(priv);
1293 	of_node_put(priv->phy_node);
1294 	cancel_work_sync(&priv->tx_timeout_task);
1295 	free_netdev(ndev);
1296 }
1297 
1298 static const struct of_device_id hix5hd2_of_match[] = {
1299 	{ .compatible = "hisilicon,hisi-gmac-v1", .data = (void *)GEMAC_V1 },
1300 	{ .compatible = "hisilicon,hisi-gmac-v2", .data = (void *)GEMAC_V2 },
1301 	{ .compatible = "hisilicon,hix5hd2-gmac", .data = (void *)GEMAC_V1 },
1302 	{ .compatible = "hisilicon,hi3798cv200-gmac", .data = (void *)GEMAC_V2 },
1303 	{ .compatible = "hisilicon,hi3516a-gmac", .data = (void *)GEMAC_V2 },
1304 	{},
1305 };
1306 
1307 MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
1308 
1309 static struct platform_driver hix5hd2_dev_driver = {
1310 	.driver = {
1311 		.name = "hisi-gmac",
1312 		.of_match_table = hix5hd2_of_match,
1313 	},
1314 	.probe = hix5hd2_dev_probe,
1315 	.remove_new = hix5hd2_dev_remove,
1316 };
1317 
1318 module_platform_driver(hix5hd2_dev_driver);
1319 
1320 MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
1321 MODULE_LICENSE("GPL v2");
1322 MODULE_ALIAS("platform:hisi-gmac");
1323