xref: /linux/drivers/net/ethernet/ethoc.c (revision f7be345515ab6d5c3a0973bb2b32510fcb7c0481)
1 /*
2  * linux/drivers/net/ethoc.c
3  *
4  * Copyright (C) 2007-2008 Avionic Design Development GmbH
5  * Copyright (C) 2008-2009 Avionic Design GmbH
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Written by Thierry Reding <thierry.reding@avionic-design.de>
12  */
13 
14 #include <linux/dma-mapping.h>
15 #include <linux/etherdevice.h>
16 #include <linux/crc32.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/mii.h>
20 #include <linux/phy.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/of.h>
25 #include <net/ethoc.h>
26 
27 static int buffer_size = 0x8000; /* 32 KBytes */
28 module_param(buffer_size, int, 0);
29 MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
30 
31 /* register offsets */
32 #define	MODER		0x00
33 #define	INT_SOURCE	0x04
34 #define	INT_MASK	0x08
35 #define	IPGT		0x0c
36 #define	IPGR1		0x10
37 #define	IPGR2		0x14
38 #define	PACKETLEN	0x18
39 #define	COLLCONF	0x1c
40 #define	TX_BD_NUM	0x20
41 #define	CTRLMODER	0x24
42 #define	MIIMODER	0x28
43 #define	MIICOMMAND	0x2c
44 #define	MIIADDRESS	0x30
45 #define	MIITX_DATA	0x34
46 #define	MIIRX_DATA	0x38
47 #define	MIISTATUS	0x3c
48 #define	MAC_ADDR0	0x40
49 #define	MAC_ADDR1	0x44
50 #define	ETH_HASH0	0x48
51 #define	ETH_HASH1	0x4c
52 #define	ETH_TXCTRL	0x50
53 
54 /* mode register */
55 #define	MODER_RXEN	(1 <<  0) /* receive enable */
56 #define	MODER_TXEN	(1 <<  1) /* transmit enable */
57 #define	MODER_NOPRE	(1 <<  2) /* no preamble */
58 #define	MODER_BRO	(1 <<  3) /* broadcast address */
59 #define	MODER_IAM	(1 <<  4) /* individual address mode */
60 #define	MODER_PRO	(1 <<  5) /* promiscuous mode */
61 #define	MODER_IFG	(1 <<  6) /* interframe gap for incoming frames */
62 #define	MODER_LOOP	(1 <<  7) /* loopback */
63 #define	MODER_NBO	(1 <<  8) /* no back-off */
64 #define	MODER_EDE	(1 <<  9) /* excess defer enable */
65 #define	MODER_FULLD	(1 << 10) /* full duplex */
66 #define	MODER_RESET	(1 << 11) /* FIXME: reset (undocumented) */
67 #define	MODER_DCRC	(1 << 12) /* delayed CRC enable */
68 #define	MODER_CRC	(1 << 13) /* CRC enable */
69 #define	MODER_HUGE	(1 << 14) /* huge packets enable */
70 #define	MODER_PAD	(1 << 15) /* padding enabled */
71 #define	MODER_RSM	(1 << 16) /* receive small packets */
72 
73 /* interrupt source and mask registers */
74 #define	INT_MASK_TXF	(1 << 0) /* transmit frame */
75 #define	INT_MASK_TXE	(1 << 1) /* transmit error */
76 #define	INT_MASK_RXF	(1 << 2) /* receive frame */
77 #define	INT_MASK_RXE	(1 << 3) /* receive error */
78 #define	INT_MASK_BUSY	(1 << 4)
79 #define	INT_MASK_TXC	(1 << 5) /* transmit control frame */
80 #define	INT_MASK_RXC	(1 << 6) /* receive control frame */
81 
82 #define	INT_MASK_TX	(INT_MASK_TXF | INT_MASK_TXE)
83 #define	INT_MASK_RX	(INT_MASK_RXF | INT_MASK_RXE)
84 
85 #define	INT_MASK_ALL ( \
86 		INT_MASK_TXF | INT_MASK_TXE | \
87 		INT_MASK_RXF | INT_MASK_RXE | \
88 		INT_MASK_TXC | INT_MASK_RXC | \
89 		INT_MASK_BUSY \
90 	)
91 
92 /* packet length register */
93 #define	PACKETLEN_MIN(min)		(((min) & 0xffff) << 16)
94 #define	PACKETLEN_MAX(max)		(((max) & 0xffff) <<  0)
95 #define	PACKETLEN_MIN_MAX(min, max)	(PACKETLEN_MIN(min) | \
96 					PACKETLEN_MAX(max))
97 
98 /* transmit buffer number register */
99 #define	TX_BD_NUM_VAL(x)	(((x) <= 0x80) ? (x) : 0x80)
100 
101 /* control module mode register */
102 #define	CTRLMODER_PASSALL	(1 << 0) /* pass all receive frames */
103 #define	CTRLMODER_RXFLOW	(1 << 1) /* receive control flow */
104 #define	CTRLMODER_TXFLOW	(1 << 2) /* transmit control flow */
105 
106 /* MII mode register */
107 #define	MIIMODER_CLKDIV(x)	((x) & 0xfe) /* needs to be an even number */
108 #define	MIIMODER_NOPRE		(1 << 8) /* no preamble */
109 
110 /* MII command register */
111 #define	MIICOMMAND_SCAN		(1 << 0) /* scan status */
112 #define	MIICOMMAND_READ		(1 << 1) /* read status */
113 #define	MIICOMMAND_WRITE	(1 << 2) /* write control data */
114 
115 /* MII address register */
116 #define	MIIADDRESS_FIAD(x)		(((x) & 0x1f) << 0)
117 #define	MIIADDRESS_RGAD(x)		(((x) & 0x1f) << 8)
118 #define	MIIADDRESS_ADDR(phy, reg)	(MIIADDRESS_FIAD(phy) | \
119 					MIIADDRESS_RGAD(reg))
120 
121 /* MII transmit data register */
122 #define	MIITX_DATA_VAL(x)	((x) & 0xffff)
123 
124 /* MII receive data register */
125 #define	MIIRX_DATA_VAL(x)	((x) & 0xffff)
126 
127 /* MII status register */
128 #define	MIISTATUS_LINKFAIL	(1 << 0)
129 #define	MIISTATUS_BUSY		(1 << 1)
130 #define	MIISTATUS_INVALID	(1 << 2)
131 
132 /* TX buffer descriptor */
133 #define	TX_BD_CS		(1 <<  0) /* carrier sense lost */
134 #define	TX_BD_DF		(1 <<  1) /* defer indication */
135 #define	TX_BD_LC		(1 <<  2) /* late collision */
136 #define	TX_BD_RL		(1 <<  3) /* retransmission limit */
137 #define	TX_BD_RETRY_MASK	(0x00f0)
138 #define	TX_BD_RETRY(x)		(((x) & 0x00f0) >>  4)
139 #define	TX_BD_UR		(1 <<  8) /* transmitter underrun */
140 #define	TX_BD_CRC		(1 << 11) /* TX CRC enable */
141 #define	TX_BD_PAD		(1 << 12) /* pad enable for short packets */
142 #define	TX_BD_WRAP		(1 << 13)
143 #define	TX_BD_IRQ		(1 << 14) /* interrupt request enable */
144 #define	TX_BD_READY		(1 << 15) /* TX buffer ready */
145 #define	TX_BD_LEN(x)		(((x) & 0xffff) << 16)
146 #define	TX_BD_LEN_MASK		(0xffff << 16)
147 
148 #define	TX_BD_STATS		(TX_BD_CS | TX_BD_DF | TX_BD_LC | \
149 				TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
150 
151 /* RX buffer descriptor */
152 #define	RX_BD_LC	(1 <<  0) /* late collision */
153 #define	RX_BD_CRC	(1 <<  1) /* RX CRC error */
154 #define	RX_BD_SF	(1 <<  2) /* short frame */
155 #define	RX_BD_TL	(1 <<  3) /* too long */
156 #define	RX_BD_DN	(1 <<  4) /* dribble nibble */
157 #define	RX_BD_IS	(1 <<  5) /* invalid symbol */
158 #define	RX_BD_OR	(1 <<  6) /* receiver overrun */
159 #define	RX_BD_MISS	(1 <<  7)
160 #define	RX_BD_CF	(1 <<  8) /* control frame */
161 #define	RX_BD_WRAP	(1 << 13)
162 #define	RX_BD_IRQ	(1 << 14) /* interrupt request enable */
163 #define	RX_BD_EMPTY	(1 << 15)
164 #define	RX_BD_LEN(x)	(((x) & 0xffff) << 16)
165 
166 #define	RX_BD_STATS	(RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
167 			RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
168 
169 #define	ETHOC_BUFSIZ		1536
170 #define	ETHOC_ZLEN		64
171 #define	ETHOC_BD_BASE		0x400
172 #define	ETHOC_TIMEOUT		(HZ / 2)
173 #define	ETHOC_MII_TIMEOUT	(1 + (HZ / 5))
174 
175 /**
176  * struct ethoc - driver-private device structure
177  * @iobase:	pointer to I/O memory region
178  * @membase:	pointer to buffer memory region
179  * @dma_alloc:	dma allocated buffer size
180  * @io_region_size:	I/O memory region size
181  * @num_tx:	number of send buffers
182  * @cur_tx:	last send buffer written
183  * @dty_tx:	last buffer actually sent
184  * @num_rx:	number of receive buffers
185  * @cur_rx:	current receive buffer
186  * @vma:        pointer to array of virtual memory addresses for buffers
187  * @netdev:	pointer to network device structure
188  * @napi:	NAPI structure
189  * @msg_enable:	device state flags
190  * @lock:	device lock
191  * @phy:	attached PHY
192  * @mdio:	MDIO bus for PHY access
193  * @phy_id:	address of attached PHY
194  */
195 struct ethoc {
196 	void __iomem *iobase;
197 	void __iomem *membase;
198 	int dma_alloc;
199 	resource_size_t io_region_size;
200 
201 	unsigned int num_tx;
202 	unsigned int cur_tx;
203 	unsigned int dty_tx;
204 
205 	unsigned int num_rx;
206 	unsigned int cur_rx;
207 
208 	void** vma;
209 
210 	struct net_device *netdev;
211 	struct napi_struct napi;
212 	u32 msg_enable;
213 
214 	spinlock_t lock;
215 
216 	struct phy_device *phy;
217 	struct mii_bus *mdio;
218 	s8 phy_id;
219 };
220 
221 /**
222  * struct ethoc_bd - buffer descriptor
223  * @stat:	buffer statistics
224  * @addr:	physical memory address
225  */
226 struct ethoc_bd {
227 	u32 stat;
228 	u32 addr;
229 };
230 
231 static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
232 {
233 	return ioread32(dev->iobase + offset);
234 }
235 
236 static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
237 {
238 	iowrite32(data, dev->iobase + offset);
239 }
240 
241 static inline void ethoc_read_bd(struct ethoc *dev, int index,
242 		struct ethoc_bd *bd)
243 {
244 	loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
245 	bd->stat = ethoc_read(dev, offset + 0);
246 	bd->addr = ethoc_read(dev, offset + 4);
247 }
248 
249 static inline void ethoc_write_bd(struct ethoc *dev, int index,
250 		const struct ethoc_bd *bd)
251 {
252 	loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
253 	ethoc_write(dev, offset + 0, bd->stat);
254 	ethoc_write(dev, offset + 4, bd->addr);
255 }
256 
257 static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
258 {
259 	u32 imask = ethoc_read(dev, INT_MASK);
260 	imask |= mask;
261 	ethoc_write(dev, INT_MASK, imask);
262 }
263 
264 static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
265 {
266 	u32 imask = ethoc_read(dev, INT_MASK);
267 	imask &= ~mask;
268 	ethoc_write(dev, INT_MASK, imask);
269 }
270 
271 static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
272 {
273 	ethoc_write(dev, INT_SOURCE, mask);
274 }
275 
276 static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
277 {
278 	u32 mode = ethoc_read(dev, MODER);
279 	mode |= MODER_RXEN | MODER_TXEN;
280 	ethoc_write(dev, MODER, mode);
281 }
282 
283 static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
284 {
285 	u32 mode = ethoc_read(dev, MODER);
286 	mode &= ~(MODER_RXEN | MODER_TXEN);
287 	ethoc_write(dev, MODER, mode);
288 }
289 
290 static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
291 {
292 	struct ethoc_bd bd;
293 	int i;
294 	void* vma;
295 
296 	dev->cur_tx = 0;
297 	dev->dty_tx = 0;
298 	dev->cur_rx = 0;
299 
300 	ethoc_write(dev, TX_BD_NUM, dev->num_tx);
301 
302 	/* setup transmission buffers */
303 	bd.addr = mem_start;
304 	bd.stat = TX_BD_IRQ | TX_BD_CRC;
305 	vma = dev->membase;
306 
307 	for (i = 0; i < dev->num_tx; i++) {
308 		if (i == dev->num_tx - 1)
309 			bd.stat |= TX_BD_WRAP;
310 
311 		ethoc_write_bd(dev, i, &bd);
312 		bd.addr += ETHOC_BUFSIZ;
313 
314 		dev->vma[i] = vma;
315 		vma += ETHOC_BUFSIZ;
316 	}
317 
318 	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
319 
320 	for (i = 0; i < dev->num_rx; i++) {
321 		if (i == dev->num_rx - 1)
322 			bd.stat |= RX_BD_WRAP;
323 
324 		ethoc_write_bd(dev, dev->num_tx + i, &bd);
325 		bd.addr += ETHOC_BUFSIZ;
326 
327 		dev->vma[dev->num_tx + i] = vma;
328 		vma += ETHOC_BUFSIZ;
329 	}
330 
331 	return 0;
332 }
333 
334 static int ethoc_reset(struct ethoc *dev)
335 {
336 	u32 mode;
337 
338 	/* TODO: reset controller? */
339 
340 	ethoc_disable_rx_and_tx(dev);
341 
342 	/* TODO: setup registers */
343 
344 	/* enable FCS generation and automatic padding */
345 	mode = ethoc_read(dev, MODER);
346 	mode |= MODER_CRC | MODER_PAD;
347 	ethoc_write(dev, MODER, mode);
348 
349 	/* set full-duplex mode */
350 	mode = ethoc_read(dev, MODER);
351 	mode |= MODER_FULLD;
352 	ethoc_write(dev, MODER, mode);
353 	ethoc_write(dev, IPGT, 0x15);
354 
355 	ethoc_ack_irq(dev, INT_MASK_ALL);
356 	ethoc_enable_irq(dev, INT_MASK_ALL);
357 	ethoc_enable_rx_and_tx(dev);
358 	return 0;
359 }
360 
361 static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
362 		struct ethoc_bd *bd)
363 {
364 	struct net_device *netdev = dev->netdev;
365 	unsigned int ret = 0;
366 
367 	if (bd->stat & RX_BD_TL) {
368 		dev_err(&netdev->dev, "RX: frame too long\n");
369 		netdev->stats.rx_length_errors++;
370 		ret++;
371 	}
372 
373 	if (bd->stat & RX_BD_SF) {
374 		dev_err(&netdev->dev, "RX: frame too short\n");
375 		netdev->stats.rx_length_errors++;
376 		ret++;
377 	}
378 
379 	if (bd->stat & RX_BD_DN) {
380 		dev_err(&netdev->dev, "RX: dribble nibble\n");
381 		netdev->stats.rx_frame_errors++;
382 	}
383 
384 	if (bd->stat & RX_BD_CRC) {
385 		dev_err(&netdev->dev, "RX: wrong CRC\n");
386 		netdev->stats.rx_crc_errors++;
387 		ret++;
388 	}
389 
390 	if (bd->stat & RX_BD_OR) {
391 		dev_err(&netdev->dev, "RX: overrun\n");
392 		netdev->stats.rx_over_errors++;
393 		ret++;
394 	}
395 
396 	if (bd->stat & RX_BD_MISS)
397 		netdev->stats.rx_missed_errors++;
398 
399 	if (bd->stat & RX_BD_LC) {
400 		dev_err(&netdev->dev, "RX: late collision\n");
401 		netdev->stats.collisions++;
402 		ret++;
403 	}
404 
405 	return ret;
406 }
407 
408 static int ethoc_rx(struct net_device *dev, int limit)
409 {
410 	struct ethoc *priv = netdev_priv(dev);
411 	int count;
412 
413 	for (count = 0; count < limit; ++count) {
414 		unsigned int entry;
415 		struct ethoc_bd bd;
416 
417 		entry = priv->num_tx + priv->cur_rx;
418 		ethoc_read_bd(priv, entry, &bd);
419 		if (bd.stat & RX_BD_EMPTY) {
420 			ethoc_ack_irq(priv, INT_MASK_RX);
421 			/* If packet (interrupt) came in between checking
422 			 * BD_EMTPY and clearing the interrupt source, then we
423 			 * risk missing the packet as the RX interrupt won't
424 			 * trigger right away when we reenable it; hence, check
425 			 * BD_EMTPY here again to make sure there isn't such a
426 			 * packet waiting for us...
427 			 */
428 			ethoc_read_bd(priv, entry, &bd);
429 			if (bd.stat & RX_BD_EMPTY)
430 				break;
431 		}
432 
433 		if (ethoc_update_rx_stats(priv, &bd) == 0) {
434 			int size = bd.stat >> 16;
435 			struct sk_buff *skb;
436 
437 			size -= 4; /* strip the CRC */
438 			skb = netdev_alloc_skb_ip_align(dev, size);
439 
440 			if (likely(skb)) {
441 				void *src = priv->vma[entry];
442 				memcpy_fromio(skb_put(skb, size), src, size);
443 				skb->protocol = eth_type_trans(skb, dev);
444 				dev->stats.rx_packets++;
445 				dev->stats.rx_bytes += size;
446 				netif_receive_skb(skb);
447 			} else {
448 				if (net_ratelimit())
449 					dev_warn(&dev->dev, "low on memory - "
450 							"packet dropped\n");
451 
452 				dev->stats.rx_dropped++;
453 				break;
454 			}
455 		}
456 
457 		/* clear the buffer descriptor so it can be reused */
458 		bd.stat &= ~RX_BD_STATS;
459 		bd.stat |=  RX_BD_EMPTY;
460 		ethoc_write_bd(priv, entry, &bd);
461 		if (++priv->cur_rx == priv->num_rx)
462 			priv->cur_rx = 0;
463 	}
464 
465 	return count;
466 }
467 
468 static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
469 {
470 	struct net_device *netdev = dev->netdev;
471 
472 	if (bd->stat & TX_BD_LC) {
473 		dev_err(&netdev->dev, "TX: late collision\n");
474 		netdev->stats.tx_window_errors++;
475 	}
476 
477 	if (bd->stat & TX_BD_RL) {
478 		dev_err(&netdev->dev, "TX: retransmit limit\n");
479 		netdev->stats.tx_aborted_errors++;
480 	}
481 
482 	if (bd->stat & TX_BD_UR) {
483 		dev_err(&netdev->dev, "TX: underrun\n");
484 		netdev->stats.tx_fifo_errors++;
485 	}
486 
487 	if (bd->stat & TX_BD_CS) {
488 		dev_err(&netdev->dev, "TX: carrier sense lost\n");
489 		netdev->stats.tx_carrier_errors++;
490 	}
491 
492 	if (bd->stat & TX_BD_STATS)
493 		netdev->stats.tx_errors++;
494 
495 	netdev->stats.collisions += (bd->stat >> 4) & 0xf;
496 	netdev->stats.tx_bytes += bd->stat >> 16;
497 	netdev->stats.tx_packets++;
498 }
499 
500 static int ethoc_tx(struct net_device *dev, int limit)
501 {
502 	struct ethoc *priv = netdev_priv(dev);
503 	int count;
504 	struct ethoc_bd bd;
505 
506 	for (count = 0; count < limit; ++count) {
507 		unsigned int entry;
508 
509 		entry = priv->dty_tx & (priv->num_tx-1);
510 
511 		ethoc_read_bd(priv, entry, &bd);
512 
513 		if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
514 			ethoc_ack_irq(priv, INT_MASK_TX);
515 			/* If interrupt came in between reading in the BD
516 			 * and clearing the interrupt source, then we risk
517 			 * missing the event as the TX interrupt won't trigger
518 			 * right away when we reenable it; hence, check
519 			 * BD_EMPTY here again to make sure there isn't such an
520 			 * event pending...
521 			 */
522 			ethoc_read_bd(priv, entry, &bd);
523 			if (bd.stat & TX_BD_READY ||
524 			    (priv->dty_tx == priv->cur_tx))
525 				break;
526 		}
527 
528 		ethoc_update_tx_stats(priv, &bd);
529 		priv->dty_tx++;
530 	}
531 
532 	if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
533 		netif_wake_queue(dev);
534 
535 	return count;
536 }
537 
538 static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
539 {
540 	struct net_device *dev = dev_id;
541 	struct ethoc *priv = netdev_priv(dev);
542 	u32 pending;
543 	u32 mask;
544 
545 	/* Figure out what triggered the interrupt...
546 	 * The tricky bit here is that the interrupt source bits get
547 	 * set in INT_SOURCE for an event regardless of whether that
548 	 * event is masked or not.  Thus, in order to figure out what
549 	 * triggered the interrupt, we need to remove the sources
550 	 * for all events that are currently masked.  This behaviour
551 	 * is not particularly well documented but reasonable...
552 	 */
553 	mask = ethoc_read(priv, INT_MASK);
554 	pending = ethoc_read(priv, INT_SOURCE);
555 	pending &= mask;
556 
557 	if (unlikely(pending == 0)) {
558 		return IRQ_NONE;
559 	}
560 
561 	ethoc_ack_irq(priv, pending);
562 
563 	/* We always handle the dropped packet interrupt */
564 	if (pending & INT_MASK_BUSY) {
565 		dev_err(&dev->dev, "packet dropped\n");
566 		dev->stats.rx_dropped++;
567 	}
568 
569 	/* Handle receive/transmit event by switching to polling */
570 	if (pending & (INT_MASK_TX | INT_MASK_RX)) {
571 		ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
572 		napi_schedule(&priv->napi);
573 	}
574 
575 	return IRQ_HANDLED;
576 }
577 
578 static int ethoc_get_mac_address(struct net_device *dev, void *addr)
579 {
580 	struct ethoc *priv = netdev_priv(dev);
581 	u8 *mac = (u8 *)addr;
582 	u32 reg;
583 
584 	reg = ethoc_read(priv, MAC_ADDR0);
585 	mac[2] = (reg >> 24) & 0xff;
586 	mac[3] = (reg >> 16) & 0xff;
587 	mac[4] = (reg >>  8) & 0xff;
588 	mac[5] = (reg >>  0) & 0xff;
589 
590 	reg = ethoc_read(priv, MAC_ADDR1);
591 	mac[0] = (reg >>  8) & 0xff;
592 	mac[1] = (reg >>  0) & 0xff;
593 
594 	return 0;
595 }
596 
597 static int ethoc_poll(struct napi_struct *napi, int budget)
598 {
599 	struct ethoc *priv = container_of(napi, struct ethoc, napi);
600 	int rx_work_done = 0;
601 	int tx_work_done = 0;
602 
603 	rx_work_done = ethoc_rx(priv->netdev, budget);
604 	tx_work_done = ethoc_tx(priv->netdev, budget);
605 
606 	if (rx_work_done < budget && tx_work_done < budget) {
607 		napi_complete(napi);
608 		ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
609 	}
610 
611 	return rx_work_done;
612 }
613 
614 static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
615 {
616 	struct ethoc *priv = bus->priv;
617 	int i;
618 
619 	ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
620 	ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
621 
622 	for (i=0; i < 5; i++) {
623 		u32 status = ethoc_read(priv, MIISTATUS);
624 		if (!(status & MIISTATUS_BUSY)) {
625 			u32 data = ethoc_read(priv, MIIRX_DATA);
626 			/* reset MII command register */
627 			ethoc_write(priv, MIICOMMAND, 0);
628 			return data;
629 		}
630 		usleep_range(100,200);
631 	}
632 
633 	return -EBUSY;
634 }
635 
636 static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
637 {
638 	struct ethoc *priv = bus->priv;
639 	int i;
640 
641 	ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
642 	ethoc_write(priv, MIITX_DATA, val);
643 	ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
644 
645 	for (i=0; i < 5; i++) {
646 		u32 stat = ethoc_read(priv, MIISTATUS);
647 		if (!(stat & MIISTATUS_BUSY)) {
648 			/* reset MII command register */
649 			ethoc_write(priv, MIICOMMAND, 0);
650 			return 0;
651 		}
652 		usleep_range(100,200);
653 	}
654 
655 	return -EBUSY;
656 }
657 
658 static int ethoc_mdio_reset(struct mii_bus *bus)
659 {
660 	return 0;
661 }
662 
663 static void ethoc_mdio_poll(struct net_device *dev)
664 {
665 }
666 
667 static int __devinit ethoc_mdio_probe(struct net_device *dev)
668 {
669 	struct ethoc *priv = netdev_priv(dev);
670 	struct phy_device *phy;
671 	int err;
672 
673 	if (priv->phy_id != -1) {
674 		phy = priv->mdio->phy_map[priv->phy_id];
675 	} else {
676 		phy = phy_find_first(priv->mdio);
677 	}
678 
679 	if (!phy) {
680 		dev_err(&dev->dev, "no PHY found\n");
681 		return -ENXIO;
682 	}
683 
684 	err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
685 			PHY_INTERFACE_MODE_GMII);
686 	if (err) {
687 		dev_err(&dev->dev, "could not attach to PHY\n");
688 		return err;
689 	}
690 
691 	priv->phy = phy;
692 	return 0;
693 }
694 
695 static int ethoc_open(struct net_device *dev)
696 {
697 	struct ethoc *priv = netdev_priv(dev);
698 	int ret;
699 
700 	ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
701 			dev->name, dev);
702 	if (ret)
703 		return ret;
704 
705 	ethoc_init_ring(priv, dev->mem_start);
706 	ethoc_reset(priv);
707 
708 	if (netif_queue_stopped(dev)) {
709 		dev_dbg(&dev->dev, " resuming queue\n");
710 		netif_wake_queue(dev);
711 	} else {
712 		dev_dbg(&dev->dev, " starting queue\n");
713 		netif_start_queue(dev);
714 	}
715 
716 	phy_start(priv->phy);
717 	napi_enable(&priv->napi);
718 
719 	if (netif_msg_ifup(priv)) {
720 		dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
721 				dev->base_addr, dev->mem_start, dev->mem_end);
722 	}
723 
724 	return 0;
725 }
726 
727 static int ethoc_stop(struct net_device *dev)
728 {
729 	struct ethoc *priv = netdev_priv(dev);
730 
731 	napi_disable(&priv->napi);
732 
733 	if (priv->phy)
734 		phy_stop(priv->phy);
735 
736 	ethoc_disable_rx_and_tx(priv);
737 	free_irq(dev->irq, dev);
738 
739 	if (!netif_queue_stopped(dev))
740 		netif_stop_queue(dev);
741 
742 	return 0;
743 }
744 
745 static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
746 {
747 	struct ethoc *priv = netdev_priv(dev);
748 	struct mii_ioctl_data *mdio = if_mii(ifr);
749 	struct phy_device *phy = NULL;
750 
751 	if (!netif_running(dev))
752 		return -EINVAL;
753 
754 	if (cmd != SIOCGMIIPHY) {
755 		if (mdio->phy_id >= PHY_MAX_ADDR)
756 			return -ERANGE;
757 
758 		phy = priv->mdio->phy_map[mdio->phy_id];
759 		if (!phy)
760 			return -ENODEV;
761 	} else {
762 		phy = priv->phy;
763 	}
764 
765 	return phy_mii_ioctl(phy, ifr, cmd);
766 }
767 
768 static int ethoc_config(struct net_device *dev, struct ifmap *map)
769 {
770 	return -ENOSYS;
771 }
772 
773 static int ethoc_set_mac_address(struct net_device *dev, void *addr)
774 {
775 	struct ethoc *priv = netdev_priv(dev);
776 	u8 *mac = (u8 *)addr;
777 
778 	ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
779 				     (mac[4] <<  8) | (mac[5] <<  0));
780 	ethoc_write(priv, MAC_ADDR1, (mac[0] <<  8) | (mac[1] <<  0));
781 
782 	return 0;
783 }
784 
785 static void ethoc_set_multicast_list(struct net_device *dev)
786 {
787 	struct ethoc *priv = netdev_priv(dev);
788 	u32 mode = ethoc_read(priv, MODER);
789 	struct netdev_hw_addr *ha;
790 	u32 hash[2] = { 0, 0 };
791 
792 	/* set loopback mode if requested */
793 	if (dev->flags & IFF_LOOPBACK)
794 		mode |=  MODER_LOOP;
795 	else
796 		mode &= ~MODER_LOOP;
797 
798 	/* receive broadcast frames if requested */
799 	if (dev->flags & IFF_BROADCAST)
800 		mode &= ~MODER_BRO;
801 	else
802 		mode |=  MODER_BRO;
803 
804 	/* enable promiscuous mode if requested */
805 	if (dev->flags & IFF_PROMISC)
806 		mode |=  MODER_PRO;
807 	else
808 		mode &= ~MODER_PRO;
809 
810 	ethoc_write(priv, MODER, mode);
811 
812 	/* receive multicast frames */
813 	if (dev->flags & IFF_ALLMULTI) {
814 		hash[0] = 0xffffffff;
815 		hash[1] = 0xffffffff;
816 	} else {
817 		netdev_for_each_mc_addr(ha, dev) {
818 			u32 crc = ether_crc(ETH_ALEN, ha->addr);
819 			int bit = (crc >> 26) & 0x3f;
820 			hash[bit >> 5] |= 1 << (bit & 0x1f);
821 		}
822 	}
823 
824 	ethoc_write(priv, ETH_HASH0, hash[0]);
825 	ethoc_write(priv, ETH_HASH1, hash[1]);
826 }
827 
828 static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
829 {
830 	return -ENOSYS;
831 }
832 
833 static void ethoc_tx_timeout(struct net_device *dev)
834 {
835 	struct ethoc *priv = netdev_priv(dev);
836 	u32 pending = ethoc_read(priv, INT_SOURCE);
837 	if (likely(pending))
838 		ethoc_interrupt(dev->irq, dev);
839 }
840 
841 static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
842 {
843 	struct ethoc *priv = netdev_priv(dev);
844 	struct ethoc_bd bd;
845 	unsigned int entry;
846 	void *dest;
847 
848 	if (unlikely(skb->len > ETHOC_BUFSIZ)) {
849 		dev->stats.tx_errors++;
850 		goto out;
851 	}
852 
853 	entry = priv->cur_tx % priv->num_tx;
854 	spin_lock_irq(&priv->lock);
855 	priv->cur_tx++;
856 
857 	ethoc_read_bd(priv, entry, &bd);
858 	if (unlikely(skb->len < ETHOC_ZLEN))
859 		bd.stat |=  TX_BD_PAD;
860 	else
861 		bd.stat &= ~TX_BD_PAD;
862 
863 	dest = priv->vma[entry];
864 	memcpy_toio(dest, skb->data, skb->len);
865 
866 	bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
867 	bd.stat |= TX_BD_LEN(skb->len);
868 	ethoc_write_bd(priv, entry, &bd);
869 
870 	bd.stat |= TX_BD_READY;
871 	ethoc_write_bd(priv, entry, &bd);
872 
873 	if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
874 		dev_dbg(&dev->dev, "stopping queue\n");
875 		netif_stop_queue(dev);
876 	}
877 
878 	spin_unlock_irq(&priv->lock);
879 	skb_tx_timestamp(skb);
880 out:
881 	dev_kfree_skb(skb);
882 	return NETDEV_TX_OK;
883 }
884 
885 static const struct net_device_ops ethoc_netdev_ops = {
886 	.ndo_open = ethoc_open,
887 	.ndo_stop = ethoc_stop,
888 	.ndo_do_ioctl = ethoc_ioctl,
889 	.ndo_set_config = ethoc_config,
890 	.ndo_set_mac_address = ethoc_set_mac_address,
891 	.ndo_set_rx_mode = ethoc_set_multicast_list,
892 	.ndo_change_mtu = ethoc_change_mtu,
893 	.ndo_tx_timeout = ethoc_tx_timeout,
894 	.ndo_start_xmit = ethoc_start_xmit,
895 };
896 
897 /**
898  * ethoc_probe() - initialize OpenCores ethernet MAC
899  * pdev:	platform device
900  */
901 static int __devinit ethoc_probe(struct platform_device *pdev)
902 {
903 	struct net_device *netdev = NULL;
904 	struct resource *res = NULL;
905 	struct resource *mmio = NULL;
906 	struct resource *mem = NULL;
907 	struct ethoc *priv = NULL;
908 	unsigned int phy;
909 	int num_bd;
910 	int ret = 0;
911 
912 	/* allocate networking device */
913 	netdev = alloc_etherdev(sizeof(struct ethoc));
914 	if (!netdev) {
915 		dev_err(&pdev->dev, "cannot allocate network device\n");
916 		ret = -ENOMEM;
917 		goto out;
918 	}
919 
920 	SET_NETDEV_DEV(netdev, &pdev->dev);
921 	platform_set_drvdata(pdev, netdev);
922 
923 	/* obtain I/O memory space */
924 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
925 	if (!res) {
926 		dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
927 		ret = -ENXIO;
928 		goto free;
929 	}
930 
931 	mmio = devm_request_mem_region(&pdev->dev, res->start,
932 			resource_size(res), res->name);
933 	if (!mmio) {
934 		dev_err(&pdev->dev, "cannot request I/O memory space\n");
935 		ret = -ENXIO;
936 		goto free;
937 	}
938 
939 	netdev->base_addr = mmio->start;
940 
941 	/* obtain buffer memory space */
942 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
943 	if (res) {
944 		mem = devm_request_mem_region(&pdev->dev, res->start,
945 			resource_size(res), res->name);
946 		if (!mem) {
947 			dev_err(&pdev->dev, "cannot request memory space\n");
948 			ret = -ENXIO;
949 			goto free;
950 		}
951 
952 		netdev->mem_start = mem->start;
953 		netdev->mem_end   = mem->end;
954 	}
955 
956 
957 	/* obtain device IRQ number */
958 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
959 	if (!res) {
960 		dev_err(&pdev->dev, "cannot obtain IRQ\n");
961 		ret = -ENXIO;
962 		goto free;
963 	}
964 
965 	netdev->irq = res->start;
966 
967 	/* setup driver-private data */
968 	priv = netdev_priv(netdev);
969 	priv->netdev = netdev;
970 	priv->dma_alloc = 0;
971 	priv->io_region_size = resource_size(mmio);
972 
973 	priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
974 			resource_size(mmio));
975 	if (!priv->iobase) {
976 		dev_err(&pdev->dev, "cannot remap I/O memory space\n");
977 		ret = -ENXIO;
978 		goto error;
979 	}
980 
981 	if (netdev->mem_end) {
982 		priv->membase = devm_ioremap_nocache(&pdev->dev,
983 			netdev->mem_start, resource_size(mem));
984 		if (!priv->membase) {
985 			dev_err(&pdev->dev, "cannot remap memory space\n");
986 			ret = -ENXIO;
987 			goto error;
988 		}
989 	} else {
990 		/* Allocate buffer memory */
991 		priv->membase = dmam_alloc_coherent(&pdev->dev,
992 			buffer_size, (void *)&netdev->mem_start,
993 			GFP_KERNEL);
994 		if (!priv->membase) {
995 			dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
996 				buffer_size);
997 			ret = -ENOMEM;
998 			goto error;
999 		}
1000 		netdev->mem_end = netdev->mem_start + buffer_size;
1001 		priv->dma_alloc = buffer_size;
1002 	}
1003 
1004 	/* calculate the number of TX/RX buffers, maximum 128 supported */
1005 	num_bd = min_t(unsigned int,
1006 		128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
1007 	if (num_bd < 4) {
1008 		ret = -ENODEV;
1009 		goto error;
1010 	}
1011 	/* num_tx must be a power of two */
1012 	priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
1013 	priv->num_rx = num_bd - priv->num_tx;
1014 
1015 	dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1016 		priv->num_tx, priv->num_rx);
1017 
1018 	priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
1019 	if (!priv->vma) {
1020 		ret = -ENOMEM;
1021 		goto error;
1022 	}
1023 
1024 	/* Allow the platform setup code to pass in a MAC address. */
1025 	if (pdev->dev.platform_data) {
1026 		struct ethoc_platform_data *pdata = pdev->dev.platform_data;
1027 		memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
1028 		priv->phy_id = pdata->phy_id;
1029 	} else {
1030 		priv->phy_id = -1;
1031 
1032 #ifdef CONFIG_OF
1033 		{
1034 		const uint8_t* mac;
1035 
1036 		mac = of_get_property(pdev->dev.of_node,
1037 				      "local-mac-address",
1038 				      NULL);
1039 		if (mac)
1040 			memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
1041 		}
1042 #endif
1043 	}
1044 
1045 	/* Check that the given MAC address is valid. If it isn't, read the
1046 	 * current MAC from the controller. */
1047 	if (!is_valid_ether_addr(netdev->dev_addr))
1048 		ethoc_get_mac_address(netdev, netdev->dev_addr);
1049 
1050 	/* Check the MAC again for validity, if it still isn't choose and
1051 	 * program a random one. */
1052 	if (!is_valid_ether_addr(netdev->dev_addr))
1053 		random_ether_addr(netdev->dev_addr);
1054 
1055 	ethoc_set_mac_address(netdev, netdev->dev_addr);
1056 
1057 	/* register MII bus */
1058 	priv->mdio = mdiobus_alloc();
1059 	if (!priv->mdio) {
1060 		ret = -ENOMEM;
1061 		goto free;
1062 	}
1063 
1064 	priv->mdio->name = "ethoc-mdio";
1065 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
1066 			priv->mdio->name, pdev->id);
1067 	priv->mdio->read = ethoc_mdio_read;
1068 	priv->mdio->write = ethoc_mdio_write;
1069 	priv->mdio->reset = ethoc_mdio_reset;
1070 	priv->mdio->priv = priv;
1071 
1072 	priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1073 	if (!priv->mdio->irq) {
1074 		ret = -ENOMEM;
1075 		goto free_mdio;
1076 	}
1077 
1078 	for (phy = 0; phy < PHY_MAX_ADDR; phy++)
1079 		priv->mdio->irq[phy] = PHY_POLL;
1080 
1081 	ret = mdiobus_register(priv->mdio);
1082 	if (ret) {
1083 		dev_err(&netdev->dev, "failed to register MDIO bus\n");
1084 		goto free_mdio;
1085 	}
1086 
1087 	ret = ethoc_mdio_probe(netdev);
1088 	if (ret) {
1089 		dev_err(&netdev->dev, "failed to probe MDIO bus\n");
1090 		goto error;
1091 	}
1092 
1093 	ether_setup(netdev);
1094 
1095 	/* setup the net_device structure */
1096 	netdev->netdev_ops = &ethoc_netdev_ops;
1097 	netdev->watchdog_timeo = ETHOC_TIMEOUT;
1098 	netdev->features |= 0;
1099 
1100 	/* setup NAPI */
1101 	netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1102 
1103 	spin_lock_init(&priv->lock);
1104 
1105 	ret = register_netdev(netdev);
1106 	if (ret < 0) {
1107 		dev_err(&netdev->dev, "failed to register interface\n");
1108 		goto error2;
1109 	}
1110 
1111 	goto out;
1112 
1113 error2:
1114 	netif_napi_del(&priv->napi);
1115 error:
1116 	mdiobus_unregister(priv->mdio);
1117 free_mdio:
1118 	kfree(priv->mdio->irq);
1119 	mdiobus_free(priv->mdio);
1120 free:
1121 	free_netdev(netdev);
1122 out:
1123 	return ret;
1124 }
1125 
1126 /**
1127  * ethoc_remove() - shutdown OpenCores ethernet MAC
1128  * @pdev:	platform device
1129  */
1130 static int __devexit ethoc_remove(struct platform_device *pdev)
1131 {
1132 	struct net_device *netdev = platform_get_drvdata(pdev);
1133 	struct ethoc *priv = netdev_priv(netdev);
1134 
1135 	platform_set_drvdata(pdev, NULL);
1136 
1137 	if (netdev) {
1138 		netif_napi_del(&priv->napi);
1139 		phy_disconnect(priv->phy);
1140 		priv->phy = NULL;
1141 
1142 		if (priv->mdio) {
1143 			mdiobus_unregister(priv->mdio);
1144 			kfree(priv->mdio->irq);
1145 			mdiobus_free(priv->mdio);
1146 		}
1147 		unregister_netdev(netdev);
1148 		free_netdev(netdev);
1149 	}
1150 
1151 	return 0;
1152 }
1153 
1154 #ifdef CONFIG_PM
1155 static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
1156 {
1157 	return -ENOSYS;
1158 }
1159 
1160 static int ethoc_resume(struct platform_device *pdev)
1161 {
1162 	return -ENOSYS;
1163 }
1164 #else
1165 # define ethoc_suspend NULL
1166 # define ethoc_resume  NULL
1167 #endif
1168 
1169 static struct of_device_id ethoc_match[] = {
1170 	{ .compatible = "opencores,ethoc", },
1171 	{},
1172 };
1173 MODULE_DEVICE_TABLE(of, ethoc_match);
1174 
1175 static struct platform_driver ethoc_driver = {
1176 	.probe   = ethoc_probe,
1177 	.remove  = __devexit_p(ethoc_remove),
1178 	.suspend = ethoc_suspend,
1179 	.resume  = ethoc_resume,
1180 	.driver  = {
1181 		.name = "ethoc",
1182 		.owner = THIS_MODULE,
1183 		.of_match_table = ethoc_match,
1184 	},
1185 };
1186 
1187 static int __init ethoc_init(void)
1188 {
1189 	return platform_driver_register(&ethoc_driver);
1190 }
1191 
1192 static void __exit ethoc_exit(void)
1193 {
1194 	platform_driver_unregister(&ethoc_driver);
1195 }
1196 
1197 module_init(ethoc_init);
1198 module_exit(ethoc_exit);
1199 
1200 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1201 MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
1202 MODULE_LICENSE("GPL v2");
1203 
1204