xref: /linux/drivers/net/ethernet/hisilicon/hisi_femac.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Hisilicon Fast Ethernet MAC Driver
4  *
5  * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
6  */
7 
8 #include <linux/circ_buf.h>
9 #include <linux/clk.h>
10 #include <linux/etherdevice.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/of_mdio.h>
14 #include <linux/of_net.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17 
18 /* MAC control register list */
19 #define MAC_PORTSEL			0x0200
20 #define MAC_PORTSEL_STAT_CPU		BIT(0)
21 #define MAC_PORTSEL_RMII		BIT(1)
22 #define MAC_PORTSET			0x0208
23 #define MAC_PORTSET_DUPLEX_FULL		BIT(0)
24 #define MAC_PORTSET_LINKED		BIT(1)
25 #define MAC_PORTSET_SPEED_100M		BIT(2)
26 #define MAC_SET				0x0210
27 #define MAX_FRAME_SIZE			1600
28 #define MAX_FRAME_SIZE_MASK		GENMASK(10, 0)
29 #define BIT_PAUSE_EN			BIT(18)
30 #define RX_COALESCE_SET			0x0340
31 #define RX_COALESCED_FRAME_OFFSET	24
32 #define RX_COALESCED_FRAMES		8
33 #define RX_COALESCED_TIMER		0x74
34 #define QLEN_SET			0x0344
35 #define RX_DEPTH_OFFSET			8
36 #define MAX_HW_FIFO_DEPTH		64
37 #define HW_TX_FIFO_DEPTH		12
38 #define HW_RX_FIFO_DEPTH		(MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH)
39 #define IQFRM_DES			0x0354
40 #define RX_FRAME_LEN_MASK		GENMASK(11, 0)
41 #define IQ_ADDR				0x0358
42 #define EQ_ADDR				0x0360
43 #define EQFRM_LEN			0x0364
44 #define ADDRQ_STAT			0x036C
45 #define TX_CNT_INUSE_MASK		GENMASK(5, 0)
46 #define BIT_TX_READY			BIT(24)
47 #define BIT_RX_READY			BIT(25)
48 /* global control register list */
49 #define GLB_HOSTMAC_L32			0x0000
50 #define GLB_HOSTMAC_H16			0x0004
51 #define GLB_SOFT_RESET			0x0008
52 #define SOFT_RESET_ALL			BIT(0)
53 #define GLB_FWCTRL			0x0010
54 #define FWCTRL_VLAN_ENABLE		BIT(0)
55 #define FWCTRL_FW2CPU_ENA		BIT(5)
56 #define FWCTRL_FWALL2CPU		BIT(7)
57 #define GLB_MACTCTRL			0x0014
58 #define MACTCTRL_UNI2CPU		BIT(1)
59 #define MACTCTRL_MULTI2CPU		BIT(3)
60 #define MACTCTRL_BROAD2CPU		BIT(5)
61 #define MACTCTRL_MACT_ENA		BIT(7)
62 #define GLB_IRQ_STAT			0x0030
63 #define GLB_IRQ_ENA			0x0034
64 #define IRQ_ENA_PORT0_MASK		GENMASK(7, 0)
65 #define IRQ_ENA_PORT0			BIT(18)
66 #define IRQ_ENA_ALL			BIT(19)
67 #define GLB_IRQ_RAW			0x0038
68 #define IRQ_INT_RX_RDY			BIT(0)
69 #define IRQ_INT_TX_PER_PACKET		BIT(1)
70 #define IRQ_INT_TX_FIFO_EMPTY		BIT(6)
71 #define IRQ_INT_MULTI_RXRDY		BIT(7)
72 #define DEF_INT_MASK			(IRQ_INT_MULTI_RXRDY | \
73 					IRQ_INT_TX_PER_PACKET | \
74 					IRQ_INT_TX_FIFO_EMPTY)
75 #define GLB_MAC_L32_BASE		0x0100
76 #define GLB_MAC_H16_BASE		0x0104
77 #define MACFLT_HI16_MASK		GENMASK(15, 0)
78 #define BIT_MACFLT_ENA			BIT(17)
79 #define BIT_MACFLT_FW2CPU		BIT(21)
80 #define GLB_MAC_H16(reg)		(GLB_MAC_H16_BASE + ((reg) * 0x8))
81 #define GLB_MAC_L32(reg)		(GLB_MAC_L32_BASE + ((reg) * 0x8))
82 #define MAX_MAC_FILTER_NUM		8
83 #define MAX_UNICAST_ADDRESSES		2
84 #define MAX_MULTICAST_ADDRESSES		(MAX_MAC_FILTER_NUM - \
85 					MAX_UNICAST_ADDRESSES)
86 /* software tx and rx queue number, should be power of 2 */
87 #define TXQ_NUM				64
88 #define RXQ_NUM				128
89 #define FEMAC_POLL_WEIGHT		16
90 
91 #define PHY_RESET_DELAYS_PROPERTY	"hisilicon,phy-reset-delays-us"
92 
93 enum phy_reset_delays {
94 	PRE_DELAY,
95 	PULSE,
96 	POST_DELAY,
97 	DELAYS_NUM,
98 };
99 
100 struct hisi_femac_queue {
101 	struct sk_buff **skb;
102 	dma_addr_t *dma_phys;
103 	int num;
104 	unsigned int head;
105 	unsigned int tail;
106 };
107 
108 struct hisi_femac_priv {
109 	void __iomem *port_base;
110 	void __iomem *glb_base;
111 	struct clk *clk;
112 	struct reset_control *mac_rst;
113 	struct reset_control *phy_rst;
114 	u32 phy_reset_delays[DELAYS_NUM];
115 	u32 link_status;
116 
117 	struct device *dev;
118 	struct net_device *ndev;
119 
120 	struct hisi_femac_queue txq;
121 	struct hisi_femac_queue rxq;
122 	u32 tx_fifo_used_cnt;
123 	struct napi_struct napi;
124 };
125 
126 static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs)
127 {
128 	u32 val;
129 
130 	val = readl(priv->glb_base + GLB_IRQ_ENA);
131 	writel(val | irqs, priv->glb_base + GLB_IRQ_ENA);
132 }
133 
134 static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs)
135 {
136 	u32 val;
137 
138 	val = readl(priv->glb_base + GLB_IRQ_ENA);
139 	writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA);
140 }
141 
142 static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv,
143 				    struct sk_buff *skb, unsigned int pos)
144 {
145 	dma_addr_t dma_addr;
146 
147 	dma_addr = priv->txq.dma_phys[pos];
148 	dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
149 }
150 
151 static void hisi_femac_xmit_reclaim(struct net_device *dev)
152 {
153 	struct sk_buff *skb;
154 	struct hisi_femac_priv *priv = netdev_priv(dev);
155 	struct hisi_femac_queue *txq = &priv->txq;
156 	unsigned int bytes_compl = 0, pkts_compl = 0;
157 	u32 val;
158 
159 	netif_tx_lock(dev);
160 
161 	val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
162 	while (val < priv->tx_fifo_used_cnt) {
163 		skb = txq->skb[txq->tail];
164 		if (unlikely(!skb)) {
165 			netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
166 				   val, priv->tx_fifo_used_cnt);
167 			break;
168 		}
169 		hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
170 		pkts_compl++;
171 		bytes_compl += skb->len;
172 		dev_kfree_skb_any(skb);
173 
174 		priv->tx_fifo_used_cnt--;
175 
176 		val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
177 		txq->skb[txq->tail] = NULL;
178 		txq->tail = (txq->tail + 1) % txq->num;
179 	}
180 
181 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
182 
183 	if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
184 		netif_wake_queue(dev);
185 
186 	netif_tx_unlock(dev);
187 }
188 
189 static void hisi_femac_adjust_link(struct net_device *dev)
190 {
191 	struct hisi_femac_priv *priv = netdev_priv(dev);
192 	struct phy_device *phy = dev->phydev;
193 	u32 status = 0;
194 
195 	if (phy->link)
196 		status |= MAC_PORTSET_LINKED;
197 	if (phy->duplex == DUPLEX_FULL)
198 		status |= MAC_PORTSET_DUPLEX_FULL;
199 	if (phy->speed == SPEED_100)
200 		status |= MAC_PORTSET_SPEED_100M;
201 
202 	if ((status != priv->link_status) &&
203 	    ((status | priv->link_status) & MAC_PORTSET_LINKED)) {
204 		writel(status, priv->port_base + MAC_PORTSET);
205 		priv->link_status = status;
206 		phy_print_status(phy);
207 	}
208 }
209 
210 static void hisi_femac_rx_refill(struct hisi_femac_priv *priv)
211 {
212 	struct hisi_femac_queue *rxq = &priv->rxq;
213 	struct sk_buff *skb;
214 	u32 pos;
215 	u32 len = MAX_FRAME_SIZE;
216 	dma_addr_t addr;
217 
218 	pos = rxq->head;
219 	while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
220 		if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
221 			break;
222 		if (unlikely(rxq->skb[pos])) {
223 			netdev_err(priv->ndev, "err skb[%d]=%p\n",
224 				   pos, rxq->skb[pos]);
225 			break;
226 		}
227 		skb = netdev_alloc_skb_ip_align(priv->ndev, len);
228 		if (unlikely(!skb))
229 			break;
230 
231 		addr = dma_map_single(priv->dev, skb->data, len,
232 				      DMA_FROM_DEVICE);
233 		if (dma_mapping_error(priv->dev, addr)) {
234 			dev_kfree_skb_any(skb);
235 			break;
236 		}
237 		rxq->dma_phys[pos] = addr;
238 		rxq->skb[pos] = skb;
239 		writel(addr, priv->port_base + IQ_ADDR);
240 		pos = (pos + 1) % rxq->num;
241 	}
242 	rxq->head = pos;
243 }
244 
245 static int hisi_femac_rx(struct net_device *dev, int limit)
246 {
247 	struct hisi_femac_priv *priv = netdev_priv(dev);
248 	struct hisi_femac_queue *rxq = &priv->rxq;
249 	struct sk_buff *skb;
250 	dma_addr_t addr;
251 	u32 rx_pkt_info, pos, len, rx_pkts_num = 0;
252 
253 	pos = rxq->tail;
254 	while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
255 		rx_pkt_info = readl(priv->port_base + IQFRM_DES);
256 		len = rx_pkt_info & RX_FRAME_LEN_MASK;
257 		len -= ETH_FCS_LEN;
258 
259 		/* tell hardware we will deal with this packet */
260 		writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
261 
262 		rx_pkts_num++;
263 
264 		skb = rxq->skb[pos];
265 		if (unlikely(!skb)) {
266 			netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
267 			break;
268 		}
269 		rxq->skb[pos] = NULL;
270 
271 		addr = rxq->dma_phys[pos];
272 		dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE,
273 				 DMA_FROM_DEVICE);
274 		skb_put(skb, len);
275 		if (unlikely(skb->len > MAX_FRAME_SIZE)) {
276 			netdev_err(dev, "rcv len err, len = %d\n", skb->len);
277 			dev->stats.rx_errors++;
278 			dev->stats.rx_length_errors++;
279 			dev_kfree_skb_any(skb);
280 			goto next;
281 		}
282 
283 		skb->protocol = eth_type_trans(skb, dev);
284 		napi_gro_receive(&priv->napi, skb);
285 		dev->stats.rx_packets++;
286 		dev->stats.rx_bytes += skb->len;
287 next:
288 		pos = (pos + 1) % rxq->num;
289 		if (rx_pkts_num >= limit)
290 			break;
291 	}
292 	rxq->tail = pos;
293 
294 	hisi_femac_rx_refill(priv);
295 
296 	return rx_pkts_num;
297 }
298 
299 static int hisi_femac_poll(struct napi_struct *napi, int budget)
300 {
301 	struct hisi_femac_priv *priv = container_of(napi,
302 					struct hisi_femac_priv, napi);
303 	struct net_device *dev = priv->ndev;
304 	int work_done = 0, task = budget;
305 	int ints, num;
306 
307 	do {
308 		hisi_femac_xmit_reclaim(dev);
309 		num = hisi_femac_rx(dev, task);
310 		work_done += num;
311 		task -= num;
312 		if (work_done >= budget)
313 			break;
314 
315 		ints = readl(priv->glb_base + GLB_IRQ_RAW);
316 		writel(ints & DEF_INT_MASK,
317 		       priv->glb_base + GLB_IRQ_RAW);
318 	} while (ints & DEF_INT_MASK);
319 
320 	if (work_done < budget) {
321 		napi_complete_done(napi, work_done);
322 		hisi_femac_irq_enable(priv, DEF_INT_MASK &
323 					(~IRQ_INT_TX_PER_PACKET));
324 	}
325 
326 	return work_done;
327 }
328 
329 static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id)
330 {
331 	int ints;
332 	struct net_device *dev = (struct net_device *)dev_id;
333 	struct hisi_femac_priv *priv = netdev_priv(dev);
334 
335 	ints = readl(priv->glb_base + GLB_IRQ_RAW);
336 
337 	if (likely(ints & DEF_INT_MASK)) {
338 		writel(ints & DEF_INT_MASK,
339 		       priv->glb_base + GLB_IRQ_RAW);
340 		hisi_femac_irq_disable(priv, DEF_INT_MASK);
341 		napi_schedule(&priv->napi);
342 	}
343 
344 	return IRQ_HANDLED;
345 }
346 
347 static int hisi_femac_init_queue(struct device *dev,
348 				 struct hisi_femac_queue *queue,
349 				 unsigned int num)
350 {
351 	queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
352 				  GFP_KERNEL);
353 	if (!queue->skb)
354 		return -ENOMEM;
355 
356 	queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t),
357 				       GFP_KERNEL);
358 	if (!queue->dma_phys)
359 		return -ENOMEM;
360 
361 	queue->num = num;
362 	queue->head = 0;
363 	queue->tail = 0;
364 
365 	return 0;
366 }
367 
368 static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv)
369 {
370 	int ret;
371 
372 	ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM);
373 	if (ret)
374 		return ret;
375 
376 	ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
377 	if (ret)
378 		return ret;
379 
380 	priv->tx_fifo_used_cnt = 0;
381 
382 	return 0;
383 }
384 
385 static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
386 {
387 	struct hisi_femac_queue *txq = &priv->txq;
388 	struct hisi_femac_queue *rxq = &priv->rxq;
389 	struct sk_buff *skb;
390 	dma_addr_t dma_addr;
391 	u32 pos;
392 
393 	pos = rxq->tail;
394 	while (pos != rxq->head) {
395 		skb = rxq->skb[pos];
396 		if (unlikely(!skb)) {
397 			netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n",
398 				   pos, rxq->head);
399 			continue;
400 		}
401 
402 		dma_addr = rxq->dma_phys[pos];
403 		dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE,
404 				 DMA_FROM_DEVICE);
405 
406 		dev_kfree_skb_any(skb);
407 		rxq->skb[pos] = NULL;
408 		pos = (pos + 1) % rxq->num;
409 	}
410 	rxq->tail = pos;
411 
412 	pos = txq->tail;
413 	while (pos != txq->head) {
414 		skb = txq->skb[pos];
415 		if (unlikely(!skb)) {
416 			netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n",
417 				   pos, txq->head);
418 			continue;
419 		}
420 		hisi_femac_tx_dma_unmap(priv, skb, pos);
421 		dev_kfree_skb_any(skb);
422 		txq->skb[pos] = NULL;
423 		pos = (pos + 1) % txq->num;
424 	}
425 	txq->tail = pos;
426 	priv->tx_fifo_used_cnt = 0;
427 }
428 
429 static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
430 				      unsigned char *mac)
431 {
432 	u32 reg;
433 
434 	reg = mac[1] | (mac[0] << 8);
435 	writel(reg, priv->glb_base + GLB_HOSTMAC_H16);
436 
437 	reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
438 	writel(reg, priv->glb_base + GLB_HOSTMAC_L32);
439 
440 	return 0;
441 }
442 
443 static int hisi_femac_port_reset(struct hisi_femac_priv *priv)
444 {
445 	u32 val;
446 
447 	val = readl(priv->glb_base + GLB_SOFT_RESET);
448 	val |= SOFT_RESET_ALL;
449 	writel(val, priv->glb_base + GLB_SOFT_RESET);
450 
451 	usleep_range(500, 800);
452 
453 	val &= ~SOFT_RESET_ALL;
454 	writel(val, priv->glb_base + GLB_SOFT_RESET);
455 
456 	return 0;
457 }
458 
459 static int hisi_femac_net_open(struct net_device *dev)
460 {
461 	struct hisi_femac_priv *priv = netdev_priv(dev);
462 
463 	hisi_femac_port_reset(priv);
464 	hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
465 	hisi_femac_rx_refill(priv);
466 
467 	netif_carrier_off(dev);
468 	netdev_reset_queue(dev);
469 	netif_start_queue(dev);
470 	napi_enable(&priv->napi);
471 
472 	priv->link_status = 0;
473 	if (dev->phydev)
474 		phy_start(dev->phydev);
475 
476 	writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
477 	hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);
478 
479 	return 0;
480 }
481 
482 static int hisi_femac_net_close(struct net_device *dev)
483 {
484 	struct hisi_femac_priv *priv = netdev_priv(dev);
485 
486 	hisi_femac_irq_disable(priv, IRQ_ENA_PORT0);
487 
488 	if (dev->phydev)
489 		phy_stop(dev->phydev);
490 
491 	netif_stop_queue(dev);
492 	napi_disable(&priv->napi);
493 
494 	hisi_femac_free_skb_rings(priv);
495 
496 	return 0;
497 }
498 
499 static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
500 				       struct net_device *dev)
501 {
502 	struct hisi_femac_priv *priv = netdev_priv(dev);
503 	struct hisi_femac_queue *txq = &priv->txq;
504 	dma_addr_t addr;
505 	u32 val;
506 
507 	val = readl(priv->port_base + ADDRQ_STAT);
508 	val &= BIT_TX_READY;
509 	if (!val) {
510 		hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
511 		dev->stats.tx_dropped++;
512 		dev->stats.tx_fifo_errors++;
513 		netif_stop_queue(dev);
514 		return NETDEV_TX_BUSY;
515 	}
516 
517 	if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
518 				 txq->num))) {
519 		hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
520 		dev->stats.tx_dropped++;
521 		dev->stats.tx_fifo_errors++;
522 		netif_stop_queue(dev);
523 		return NETDEV_TX_BUSY;
524 	}
525 
526 	addr = dma_map_single(priv->dev, skb->data,
527 			      skb->len, DMA_TO_DEVICE);
528 	if (unlikely(dma_mapping_error(priv->dev, addr))) {
529 		dev_kfree_skb_any(skb);
530 		dev->stats.tx_dropped++;
531 		return NETDEV_TX_OK;
532 	}
533 	txq->dma_phys[txq->head] = addr;
534 
535 	txq->skb[txq->head] = skb;
536 	txq->head = (txq->head + 1) % txq->num;
537 
538 	writel(addr, priv->port_base + EQ_ADDR);
539 	writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);
540 
541 	priv->tx_fifo_used_cnt++;
542 
543 	dev->stats.tx_packets++;
544 	dev->stats.tx_bytes += skb->len;
545 	netdev_sent_queue(dev, skb->len);
546 
547 	return NETDEV_TX_OK;
548 }
549 
550 static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
551 {
552 	struct hisi_femac_priv *priv = netdev_priv(dev);
553 	struct sockaddr *skaddr = p;
554 
555 	if (!is_valid_ether_addr(skaddr->sa_data))
556 		return -EADDRNOTAVAIL;
557 
558 	memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
559 	dev->addr_assign_type &= ~NET_ADDR_RANDOM;
560 
561 	hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
562 
563 	return 0;
564 }
565 
566 static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv,
567 					     unsigned int reg_n, bool enable)
568 {
569 	u32 val;
570 
571 	val = readl(priv->glb_base + GLB_MAC_H16(reg_n));
572 	if (enable)
573 		val |= BIT_MACFLT_ENA;
574 	else
575 		val &= ~BIT_MACFLT_ENA;
576 	writel(val, priv->glb_base + GLB_MAC_H16(reg_n));
577 }
578 
579 static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv,
580 					  unsigned char *addr,
581 					  unsigned int reg_n)
582 {
583 	unsigned int high, low;
584 	u32 val;
585 
586 	high = GLB_MAC_H16(reg_n);
587 	low = GLB_MAC_L32(reg_n);
588 
589 	val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
590 	writel(val, priv->glb_base + low);
591 
592 	val = readl(priv->glb_base + high);
593 	val &= ~MACFLT_HI16_MASK;
594 	val |= ((addr[0] << 8) | addr[1]);
595 	val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU);
596 	writel(val, priv->glb_base + high);
597 }
598 
599 static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv,
600 					bool promisc_mode)
601 {
602 	u32 val;
603 
604 	val = readl(priv->glb_base + GLB_FWCTRL);
605 	if (promisc_mode)
606 		val |= FWCTRL_FWALL2CPU;
607 	else
608 		val &= ~FWCTRL_FWALL2CPU;
609 	writel(val, priv->glb_base + GLB_FWCTRL);
610 }
611 
612 /* Handle multiple multicast addresses (perfect filtering)*/
613 static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv)
614 {
615 	struct net_device *dev = priv->ndev;
616 	u32 val;
617 
618 	val = readl(priv->glb_base + GLB_MACTCTRL);
619 	if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) ||
620 	    (dev->flags & IFF_ALLMULTI)) {
621 		val |= MACTCTRL_MULTI2CPU;
622 	} else {
623 		int reg = MAX_UNICAST_ADDRESSES;
624 		int i;
625 		struct netdev_hw_addr *ha;
626 
627 		for (i = reg; i < MAX_MAC_FILTER_NUM; i++)
628 			hisi_femac_enable_hw_addr_filter(priv, i, false);
629 
630 		netdev_for_each_mc_addr(ha, dev) {
631 			hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
632 			reg++;
633 		}
634 		val &= ~MACTCTRL_MULTI2CPU;
635 	}
636 	writel(val, priv->glb_base + GLB_MACTCTRL);
637 }
638 
639 /* Handle multiple unicast addresses (perfect filtering)*/
640 static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv)
641 {
642 	struct net_device *dev = priv->ndev;
643 	u32 val;
644 
645 	val = readl(priv->glb_base + GLB_MACTCTRL);
646 	if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) {
647 		val |= MACTCTRL_UNI2CPU;
648 	} else {
649 		int reg = 0;
650 		int i;
651 		struct netdev_hw_addr *ha;
652 
653 		for (i = reg; i < MAX_UNICAST_ADDRESSES; i++)
654 			hisi_femac_enable_hw_addr_filter(priv, i, false);
655 
656 		netdev_for_each_uc_addr(ha, dev) {
657 			hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
658 			reg++;
659 		}
660 		val &= ~MACTCTRL_UNI2CPU;
661 	}
662 	writel(val, priv->glb_base + GLB_MACTCTRL);
663 }
664 
665 static void hisi_femac_net_set_rx_mode(struct net_device *dev)
666 {
667 	struct hisi_femac_priv *priv = netdev_priv(dev);
668 
669 	if (dev->flags & IFF_PROMISC) {
670 		hisi_femac_set_promisc_mode(priv, true);
671 	} else {
672 		hisi_femac_set_promisc_mode(priv, false);
673 		hisi_femac_set_mc_addr_filter(priv);
674 		hisi_femac_set_uc_addr_filter(priv);
675 	}
676 }
677 
678 static int hisi_femac_net_ioctl(struct net_device *dev,
679 				struct ifreq *ifreq, int cmd)
680 {
681 	if (!netif_running(dev))
682 		return -EINVAL;
683 
684 	if (!dev->phydev)
685 		return -EINVAL;
686 
687 	return phy_mii_ioctl(dev->phydev, ifreq, cmd);
688 }
689 
690 static const struct ethtool_ops hisi_femac_ethtools_ops = {
691 	.get_link		= ethtool_op_get_link,
692 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
693 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
694 };
695 
696 static const struct net_device_ops hisi_femac_netdev_ops = {
697 	.ndo_open		= hisi_femac_net_open,
698 	.ndo_stop		= hisi_femac_net_close,
699 	.ndo_start_xmit		= hisi_femac_net_xmit,
700 	.ndo_do_ioctl		= hisi_femac_net_ioctl,
701 	.ndo_set_mac_address	= hisi_femac_set_mac_address,
702 	.ndo_set_rx_mode	= hisi_femac_net_set_rx_mode,
703 };
704 
705 static void hisi_femac_core_reset(struct hisi_femac_priv *priv)
706 {
707 	reset_control_assert(priv->mac_rst);
708 	reset_control_deassert(priv->mac_rst);
709 }
710 
711 static void hisi_femac_sleep_us(u32 time_us)
712 {
713 	u32 time_ms;
714 
715 	if (!time_us)
716 		return;
717 
718 	time_ms = DIV_ROUND_UP(time_us, 1000);
719 	if (time_ms < 20)
720 		usleep_range(time_us, time_us + 500);
721 	else
722 		msleep(time_ms);
723 }
724 
725 static void hisi_femac_phy_reset(struct hisi_femac_priv *priv)
726 {
727 	/* To make sure PHY hardware reset success,
728 	 * we must keep PHY in deassert state first and
729 	 * then complete the hardware reset operation
730 	 */
731 	reset_control_deassert(priv->phy_rst);
732 	hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
733 
734 	reset_control_assert(priv->phy_rst);
735 	/* delay some time to ensure reset ok,
736 	 * this depends on PHY hardware feature
737 	 */
738 	hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]);
739 	reset_control_deassert(priv->phy_rst);
740 	/* delay some time to ensure later MDIO access */
741 	hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]);
742 }
743 
744 static void hisi_femac_port_init(struct hisi_femac_priv *priv)
745 {
746 	u32 val;
747 
748 	/* MAC gets link status info and phy mode by software config */
749 	val = MAC_PORTSEL_STAT_CPU;
750 	if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII)
751 		val |= MAC_PORTSEL_RMII;
752 	writel(val, priv->port_base + MAC_PORTSEL);
753 
754 	/*clear all interrupt status */
755 	writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
756 	hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0);
757 
758 	val = readl(priv->glb_base + GLB_FWCTRL);
759 	val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU);
760 	val |= FWCTRL_FW2CPU_ENA;
761 	writel(val, priv->glb_base + GLB_FWCTRL);
762 
763 	val = readl(priv->glb_base + GLB_MACTCTRL);
764 	val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA);
765 	writel(val, priv->glb_base + GLB_MACTCTRL);
766 
767 	val = readl(priv->port_base + MAC_SET);
768 	val &= ~MAX_FRAME_SIZE_MASK;
769 	val |= MAX_FRAME_SIZE;
770 	writel(val, priv->port_base + MAC_SET);
771 
772 	val = RX_COALESCED_TIMER |
773 		(RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET);
774 	writel(val, priv->port_base + RX_COALESCE_SET);
775 
776 	val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH;
777 	writel(val, priv->port_base + QLEN_SET);
778 }
779 
780 static int hisi_femac_drv_probe(struct platform_device *pdev)
781 {
782 	struct device *dev = &pdev->dev;
783 	struct device_node *node = dev->of_node;
784 	struct net_device *ndev;
785 	struct hisi_femac_priv *priv;
786 	struct phy_device *phy;
787 	const char *mac_addr;
788 	int ret;
789 
790 	ndev = alloc_etherdev(sizeof(*priv));
791 	if (!ndev)
792 		return -ENOMEM;
793 
794 	platform_set_drvdata(pdev, ndev);
795 	SET_NETDEV_DEV(ndev, &pdev->dev);
796 
797 	priv = netdev_priv(ndev);
798 	priv->dev = dev;
799 	priv->ndev = ndev;
800 
801 	priv->port_base = devm_platform_ioremap_resource(pdev, 0);
802 	if (IS_ERR(priv->port_base)) {
803 		ret = PTR_ERR(priv->port_base);
804 		goto out_free_netdev;
805 	}
806 
807 	priv->glb_base = devm_platform_ioremap_resource(pdev, 1);
808 	if (IS_ERR(priv->glb_base)) {
809 		ret = PTR_ERR(priv->glb_base);
810 		goto out_free_netdev;
811 	}
812 
813 	priv->clk = devm_clk_get(&pdev->dev, NULL);
814 	if (IS_ERR(priv->clk)) {
815 		dev_err(dev, "failed to get clk\n");
816 		ret = -ENODEV;
817 		goto out_free_netdev;
818 	}
819 
820 	ret = clk_prepare_enable(priv->clk);
821 	if (ret) {
822 		dev_err(dev, "failed to enable clk %d\n", ret);
823 		goto out_free_netdev;
824 	}
825 
826 	priv->mac_rst = devm_reset_control_get(dev, "mac");
827 	if (IS_ERR(priv->mac_rst)) {
828 		ret = PTR_ERR(priv->mac_rst);
829 		goto out_disable_clk;
830 	}
831 	hisi_femac_core_reset(priv);
832 
833 	priv->phy_rst = devm_reset_control_get(dev, "phy");
834 	if (IS_ERR(priv->phy_rst)) {
835 		priv->phy_rst = NULL;
836 	} else {
837 		ret = of_property_read_u32_array(node,
838 						 PHY_RESET_DELAYS_PROPERTY,
839 						 priv->phy_reset_delays,
840 						 DELAYS_NUM);
841 		if (ret)
842 			goto out_disable_clk;
843 		hisi_femac_phy_reset(priv);
844 	}
845 
846 	phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link);
847 	if (!phy) {
848 		dev_err(dev, "connect to PHY failed!\n");
849 		ret = -ENODEV;
850 		goto out_disable_clk;
851 	}
852 
853 	phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
854 			   (unsigned long)phy->phy_id,
855 			   phy_modes(phy->interface));
856 
857 	mac_addr = of_get_mac_address(node);
858 	if (!IS_ERR(mac_addr))
859 		ether_addr_copy(ndev->dev_addr, mac_addr);
860 	if (!is_valid_ether_addr(ndev->dev_addr)) {
861 		eth_hw_addr_random(ndev);
862 		dev_warn(dev, "using random MAC address %pM\n",
863 			 ndev->dev_addr);
864 	}
865 
866 	ndev->watchdog_timeo = 6 * HZ;
867 	ndev->priv_flags |= IFF_UNICAST_FLT;
868 	ndev->netdev_ops = &hisi_femac_netdev_ops;
869 	ndev->ethtool_ops = &hisi_femac_ethtools_ops;
870 	netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
871 
872 	hisi_femac_port_init(priv);
873 
874 	ret = hisi_femac_init_tx_and_rx_queues(priv);
875 	if (ret)
876 		goto out_disconnect_phy;
877 
878 	ndev->irq = platform_get_irq(pdev, 0);
879 	if (ndev->irq <= 0) {
880 		ret = -ENODEV;
881 		goto out_disconnect_phy;
882 	}
883 
884 	ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt,
885 			       IRQF_SHARED, pdev->name, ndev);
886 	if (ret) {
887 		dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq);
888 		goto out_disconnect_phy;
889 	}
890 
891 	ret = register_netdev(ndev);
892 	if (ret) {
893 		dev_err(dev, "register_netdev failed!\n");
894 		goto out_disconnect_phy;
895 	}
896 
897 	return ret;
898 
899 out_disconnect_phy:
900 	netif_napi_del(&priv->napi);
901 	phy_disconnect(phy);
902 out_disable_clk:
903 	clk_disable_unprepare(priv->clk);
904 out_free_netdev:
905 	free_netdev(ndev);
906 
907 	return ret;
908 }
909 
910 static int hisi_femac_drv_remove(struct platform_device *pdev)
911 {
912 	struct net_device *ndev = platform_get_drvdata(pdev);
913 	struct hisi_femac_priv *priv = netdev_priv(ndev);
914 
915 	netif_napi_del(&priv->napi);
916 	unregister_netdev(ndev);
917 
918 	phy_disconnect(ndev->phydev);
919 	clk_disable_unprepare(priv->clk);
920 	free_netdev(ndev);
921 
922 	return 0;
923 }
924 
925 #ifdef CONFIG_PM
926 static int hisi_femac_drv_suspend(struct platform_device *pdev,
927 				  pm_message_t state)
928 {
929 	struct net_device *ndev = platform_get_drvdata(pdev);
930 	struct hisi_femac_priv *priv = netdev_priv(ndev);
931 
932 	disable_irq(ndev->irq);
933 	if (netif_running(ndev)) {
934 		hisi_femac_net_close(ndev);
935 		netif_device_detach(ndev);
936 	}
937 
938 	clk_disable_unprepare(priv->clk);
939 
940 	return 0;
941 }
942 
943 static int hisi_femac_drv_resume(struct platform_device *pdev)
944 {
945 	struct net_device *ndev = platform_get_drvdata(pdev);
946 	struct hisi_femac_priv *priv = netdev_priv(ndev);
947 
948 	clk_prepare_enable(priv->clk);
949 	if (priv->phy_rst)
950 		hisi_femac_phy_reset(priv);
951 
952 	if (netif_running(ndev)) {
953 		hisi_femac_port_init(priv);
954 		hisi_femac_net_open(ndev);
955 		netif_device_attach(ndev);
956 	}
957 	enable_irq(ndev->irq);
958 
959 	return 0;
960 }
961 #endif
962 
963 static const struct of_device_id hisi_femac_match[] = {
964 	{.compatible = "hisilicon,hisi-femac-v1",},
965 	{.compatible = "hisilicon,hisi-femac-v2",},
966 	{.compatible = "hisilicon,hi3516cv300-femac",},
967 	{},
968 };
969 
970 MODULE_DEVICE_TABLE(of, hisi_femac_match);
971 
972 static struct platform_driver hisi_femac_driver = {
973 	.driver = {
974 		.name = "hisi-femac",
975 		.of_match_table = hisi_femac_match,
976 	},
977 	.probe = hisi_femac_drv_probe,
978 	.remove = hisi_femac_drv_remove,
979 #ifdef CONFIG_PM
980 	.suspend = hisi_femac_drv_suspend,
981 	.resume = hisi_femac_drv_resume,
982 #endif
983 };
984 
985 module_platform_driver(hisi_femac_driver);
986 
987 MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver");
988 MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
989 MODULE_LICENSE("GPL v2");
990 MODULE_ALIAS("platform:hisi-femac");
991