xref: /linux/drivers/net/ethernet/micrel/ks8842.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * ks8842.c timberdale KS8842 ethernet driver
3  * Copyright (c) 2009 Intel Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18 
19 /* Supports:
20  * The Micrel KS8842 behind the timberdale FPGA
21  * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/platform_device.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/ethtool.h>
33 #include <linux/ks8842.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/scatterlist.h>
37 
38 #define DRV_NAME "ks8842"
39 
40 /* Timberdale specific Registers */
41 #define REG_TIMB_RST		0x1c
42 #define REG_TIMB_FIFO		0x20
43 #define REG_TIMB_ISR		0x24
44 #define REG_TIMB_IER		0x28
45 #define REG_TIMB_IAR		0x2C
46 #define REQ_TIMB_DMA_RESUME	0x30
47 
48 /* KS8842 registers */
49 
50 #define REG_SELECT_BANK 0x0e
51 
52 /* bank 0 registers */
53 #define REG_QRFCR	0x04
54 
55 /* bank 2 registers */
56 #define REG_MARL	0x00
57 #define REG_MARM	0x02
58 #define REG_MARH	0x04
59 
60 /* bank 3 registers */
61 #define REG_GRR		0x06
62 
63 /* bank 16 registers */
64 #define REG_TXCR	0x00
65 #define REG_TXSR	0x02
66 #define REG_RXCR	0x04
67 #define REG_TXMIR	0x08
68 #define REG_RXMIR	0x0A
69 
70 /* bank 17 registers */
71 #define REG_TXQCR	0x00
72 #define REG_RXQCR	0x02
73 #define REG_TXFDPR	0x04
74 #define REG_RXFDPR	0x06
75 #define REG_QMU_DATA_LO 0x08
76 #define REG_QMU_DATA_HI 0x0A
77 
78 /* bank 18 registers */
79 #define REG_IER		0x00
80 #define IRQ_LINK_CHANGE	0x8000
81 #define IRQ_TX		0x4000
82 #define IRQ_RX		0x2000
83 #define IRQ_RX_OVERRUN	0x0800
84 #define IRQ_TX_STOPPED	0x0200
85 #define IRQ_RX_STOPPED	0x0100
86 #define IRQ_RX_ERROR	0x0080
87 #define ENABLED_IRQS	(IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
88 		IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
89 /* When running via timberdale in DMA mode, the RX interrupt should be
90    enabled in the KS8842, but not in the FPGA IP, since the IP handles
91    RX DMA internally.
92    TX interrupts are not needed it is handled by the FPGA the driver is
93    notified via DMA callbacks.
94 */
95 #define ENABLED_IRQS_DMA_IP	(IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
96 	IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
97 #define ENABLED_IRQS_DMA	(ENABLED_IRQS_DMA_IP | IRQ_RX)
98 #define REG_ISR		0x02
99 #define REG_RXSR	0x04
100 #define RXSR_VALID	0x8000
101 #define RXSR_BROADCAST	0x80
102 #define RXSR_MULTICAST	0x40
103 #define RXSR_UNICAST	0x20
104 #define RXSR_FRAMETYPE	0x08
105 #define RXSR_TOO_LONG	0x04
106 #define RXSR_RUNT	0x02
107 #define RXSR_CRC_ERROR	0x01
108 #define RXSR_ERROR	(RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
109 
110 /* bank 32 registers */
111 #define REG_SW_ID_AND_ENABLE	0x00
112 #define REG_SGCR1		0x02
113 #define REG_SGCR2		0x04
114 #define REG_SGCR3		0x06
115 
116 /* bank 39 registers */
117 #define REG_MACAR1		0x00
118 #define REG_MACAR2		0x02
119 #define REG_MACAR3		0x04
120 
121 /* bank 45 registers */
122 #define REG_P1MBCR		0x00
123 #define REG_P1MBSR		0x02
124 
125 /* bank 46 registers */
126 #define REG_P2MBCR		0x00
127 #define REG_P2MBSR		0x02
128 
129 /* bank 48 registers */
130 #define REG_P1CR2		0x02
131 
132 /* bank 49 registers */
133 #define REG_P1CR4		0x02
134 #define REG_P1SR		0x04
135 
136 /* flags passed by platform_device for configuration */
137 #define	MICREL_KS884X		0x01	/* 0=Timeberdale(FPGA), 1=Micrel */
138 #define	KS884X_16BIT		0x02	/*  1=16bit, 0=32bit */
139 
140 #define DMA_BUFFER_SIZE		2048
141 
142 struct ks8842_tx_dma_ctl {
143 	struct dma_chan *chan;
144 	struct dma_async_tx_descriptor *adesc;
145 	void *buf;
146 	struct scatterlist sg;
147 	int channel;
148 };
149 
150 struct ks8842_rx_dma_ctl {
151 	struct dma_chan *chan;
152 	struct dma_async_tx_descriptor *adesc;
153 	struct sk_buff  *skb;
154 	struct scatterlist sg;
155 	struct tasklet_struct tasklet;
156 	int channel;
157 };
158 
159 #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
160 	 ((adapter)->dma_rx.channel != -1))
161 
162 struct ks8842_adapter {
163 	void __iomem	*hw_addr;
164 	int		irq;
165 	unsigned long	conf_flags;	/* copy of platform_device config */
166 	struct tasklet_struct	tasklet;
167 	spinlock_t	lock; /* spinlock to be interrupt safe */
168 	struct work_struct timeout_work;
169 	struct net_device *netdev;
170 	struct device *dev;
171 	struct ks8842_tx_dma_ctl	dma_tx;
172 	struct ks8842_rx_dma_ctl	dma_rx;
173 };
174 
175 static void ks8842_dma_rx_cb(void *data);
176 static void ks8842_dma_tx_cb(void *data);
177 
178 static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
179 {
180 	iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
181 }
182 
183 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
184 {
185 	iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
186 }
187 
188 static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
189 	u8 value, int offset)
190 {
191 	ks8842_select_bank(adapter, bank);
192 	iowrite8(value, adapter->hw_addr + offset);
193 }
194 
195 static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
196 	u16 value, int offset)
197 {
198 	ks8842_select_bank(adapter, bank);
199 	iowrite16(value, adapter->hw_addr + offset);
200 }
201 
202 static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
203 	u16 bits, int offset)
204 {
205 	u16 reg;
206 	ks8842_select_bank(adapter, bank);
207 	reg = ioread16(adapter->hw_addr + offset);
208 	reg |= bits;
209 	iowrite16(reg, adapter->hw_addr + offset);
210 }
211 
212 static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
213 	u16 bits, int offset)
214 {
215 	u16 reg;
216 	ks8842_select_bank(adapter, bank);
217 	reg = ioread16(adapter->hw_addr + offset);
218 	reg &= ~bits;
219 	iowrite16(reg, adapter->hw_addr + offset);
220 }
221 
222 static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
223 	u32 value, int offset)
224 {
225 	ks8842_select_bank(adapter, bank);
226 	iowrite32(value, adapter->hw_addr + offset);
227 }
228 
229 static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
230 	int offset)
231 {
232 	ks8842_select_bank(adapter, bank);
233 	return ioread8(adapter->hw_addr + offset);
234 }
235 
236 static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
237 	int offset)
238 {
239 	ks8842_select_bank(adapter, bank);
240 	return ioread16(adapter->hw_addr + offset);
241 }
242 
243 static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
244 	int offset)
245 {
246 	ks8842_select_bank(adapter, bank);
247 	return ioread32(adapter->hw_addr + offset);
248 }
249 
250 static void ks8842_reset(struct ks8842_adapter *adapter)
251 {
252 	if (adapter->conf_flags & MICREL_KS884X) {
253 		ks8842_write16(adapter, 3, 1, REG_GRR);
254 		msleep(10);
255 		iowrite16(0, adapter->hw_addr + REG_GRR);
256 	} else {
257 		/* The KS8842 goes haywire when doing softare reset
258 		* a work around in the timberdale IP is implemented to
259 		* do a hardware reset instead
260 		ks8842_write16(adapter, 3, 1, REG_GRR);
261 		msleep(10);
262 		iowrite16(0, adapter->hw_addr + REG_GRR);
263 		*/
264 		iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
265 		msleep(20);
266 	}
267 }
268 
269 static void ks8842_update_link_status(struct net_device *netdev,
270 	struct ks8842_adapter *adapter)
271 {
272 	/* check the status of the link */
273 	if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
274 		netif_carrier_on(netdev);
275 		netif_wake_queue(netdev);
276 	} else {
277 		netif_stop_queue(netdev);
278 		netif_carrier_off(netdev);
279 	}
280 }
281 
282 static void ks8842_enable_tx(struct ks8842_adapter *adapter)
283 {
284 	ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
285 }
286 
287 static void ks8842_disable_tx(struct ks8842_adapter *adapter)
288 {
289 	ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
290 }
291 
292 static void ks8842_enable_rx(struct ks8842_adapter *adapter)
293 {
294 	ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
295 }
296 
297 static void ks8842_disable_rx(struct ks8842_adapter *adapter)
298 {
299 	ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
300 }
301 
302 static void ks8842_reset_hw(struct ks8842_adapter *adapter)
303 {
304 	/* reset the HW */
305 	ks8842_reset(adapter);
306 
307 	/* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
308 	ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
309 
310 	/* enable the receiver, uni + multi + broadcast + flow ctrl
311 		+ crc strip */
312 	ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
313 		REG_RXCR);
314 
315 	/* TX frame pointer autoincrement */
316 	ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
317 
318 	/* RX frame pointer autoincrement */
319 	ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
320 
321 	/* RX 2 kb high watermark */
322 	ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
323 
324 	/* aggressive back off in half duplex */
325 	ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
326 
327 	/* enable no excessive collison drop */
328 	ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
329 
330 	/* Enable port 1 force flow control / back pressure / transmit / recv */
331 	ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
332 
333 	/* restart port auto-negotiation */
334 	ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
335 
336 	/* Enable the transmitter */
337 	ks8842_enable_tx(adapter);
338 
339 	/* Enable the receiver */
340 	ks8842_enable_rx(adapter);
341 
342 	/* clear all interrupts */
343 	ks8842_write16(adapter, 18, 0xffff, REG_ISR);
344 
345 	/* enable interrupts */
346 	if (KS8842_USE_DMA(adapter)) {
347 		/* When running in DMA Mode the RX interrupt is not enabled in
348 		   timberdale because RX data is received by DMA callbacks
349 		   it must still be enabled in the KS8842 because it indicates
350 		   to timberdale when there is RX data for it's DMA FIFOs */
351 		iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
352 		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
353 	} else {
354 		if (!(adapter->conf_flags & MICREL_KS884X))
355 			iowrite16(ENABLED_IRQS,
356 				adapter->hw_addr + REG_TIMB_IER);
357 		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
358 	}
359 	/* enable the switch */
360 	ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
361 }
362 
363 static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
364 {
365 	int i;
366 	u16 mac;
367 
368 	for (i = 0; i < ETH_ALEN; i++)
369 		dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
370 
371 	if (adapter->conf_flags & MICREL_KS884X) {
372 		/*
373 		the sequence of saving mac addr between MAC and Switch is
374 		different.
375 		*/
376 
377 		mac = ks8842_read16(adapter, 2, REG_MARL);
378 		ks8842_write16(adapter, 39, mac, REG_MACAR3);
379 		mac = ks8842_read16(adapter, 2, REG_MARM);
380 		ks8842_write16(adapter, 39, mac, REG_MACAR2);
381 		mac = ks8842_read16(adapter, 2, REG_MARH);
382 		ks8842_write16(adapter, 39, mac, REG_MACAR1);
383 	} else {
384 
385 		/* make sure the switch port uses the same MAC as the QMU */
386 		mac = ks8842_read16(adapter, 2, REG_MARL);
387 		ks8842_write16(adapter, 39, mac, REG_MACAR1);
388 		mac = ks8842_read16(adapter, 2, REG_MARM);
389 		ks8842_write16(adapter, 39, mac, REG_MACAR2);
390 		mac = ks8842_read16(adapter, 2, REG_MARH);
391 		ks8842_write16(adapter, 39, mac, REG_MACAR3);
392 	}
393 }
394 
395 static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
396 {
397 	unsigned long flags;
398 	unsigned i;
399 
400 	spin_lock_irqsave(&adapter->lock, flags);
401 	for (i = 0; i < ETH_ALEN; i++) {
402 		ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
403 		if (!(adapter->conf_flags & MICREL_KS884X))
404 			ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
405 				REG_MACAR1 + i);
406 	}
407 
408 	if (adapter->conf_flags & MICREL_KS884X) {
409 		/*
410 		the sequence of saving mac addr between MAC and Switch is
411 		different.
412 		*/
413 
414 		u16 mac;
415 
416 		mac = ks8842_read16(adapter, 2, REG_MARL);
417 		ks8842_write16(adapter, 39, mac, REG_MACAR3);
418 		mac = ks8842_read16(adapter, 2, REG_MARM);
419 		ks8842_write16(adapter, 39, mac, REG_MACAR2);
420 		mac = ks8842_read16(adapter, 2, REG_MARH);
421 		ks8842_write16(adapter, 39, mac, REG_MACAR1);
422 	}
423 	spin_unlock_irqrestore(&adapter->lock, flags);
424 }
425 
426 static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
427 {
428 	return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
429 }
430 
431 static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
432 {
433 	struct ks8842_adapter *adapter = netdev_priv(netdev);
434 	struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
435 	u8 *buf = ctl->buf;
436 
437 	if (ctl->adesc) {
438 		netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
439 		/* transfer ongoing */
440 		return NETDEV_TX_BUSY;
441 	}
442 
443 	sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
444 
445 	/* copy data to the TX buffer */
446 	/* the control word, enable IRQ, port 1 and the length */
447 	*buf++ = 0x00;
448 	*buf++ = 0x01; /* Port 1 */
449 	*buf++ = skb->len & 0xff;
450 	*buf++ = (skb->len >> 8) & 0xff;
451 	skb_copy_from_linear_data(skb, buf, skb->len);
452 
453 	dma_sync_single_range_for_device(adapter->dev,
454 		sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
455 		DMA_TO_DEVICE);
456 
457 	/* make sure the length is a multiple of 4 */
458 	if (sg_dma_len(&ctl->sg) % 4)
459 		sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
460 
461 	ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
462 		&ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
463 	if (!ctl->adesc)
464 		return NETDEV_TX_BUSY;
465 
466 	ctl->adesc->callback_param = netdev;
467 	ctl->adesc->callback = ks8842_dma_tx_cb;
468 	ctl->adesc->tx_submit(ctl->adesc);
469 
470 	netdev->stats.tx_bytes += skb->len;
471 
472 	dev_kfree_skb(skb);
473 
474 	return NETDEV_TX_OK;
475 }
476 
477 static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
478 {
479 	struct ks8842_adapter *adapter = netdev_priv(netdev);
480 	int len = skb->len;
481 
482 	netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
483 		__func__, skb->len, skb->head, skb->data,
484 		skb_tail_pointer(skb), skb_end_pointer(skb));
485 
486 	/* check FIFO buffer space, we need space for CRC and command bits */
487 	if (ks8842_tx_fifo_space(adapter) < len + 8)
488 		return NETDEV_TX_BUSY;
489 
490 	if (adapter->conf_flags & KS884X_16BIT) {
491 		u16 *ptr16 = (u16 *)skb->data;
492 		ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
493 		ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
494 		netdev->stats.tx_bytes += len;
495 
496 		/* copy buffer */
497 		while (len > 0) {
498 			iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
499 			iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
500 			len -= sizeof(u32);
501 		}
502 	} else {
503 
504 		u32 *ptr = (u32 *)skb->data;
505 		u32 ctrl;
506 		/* the control word, enable IRQ, port 1 and the length */
507 		ctrl = 0x8000 | 0x100 | (len << 16);
508 		ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
509 
510 		netdev->stats.tx_bytes += len;
511 
512 		/* copy buffer */
513 		while (len > 0) {
514 			iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
515 			len -= sizeof(u32);
516 			ptr++;
517 		}
518 	}
519 
520 	/* enqueue packet */
521 	ks8842_write16(adapter, 17, 1, REG_TXQCR);
522 
523 	dev_kfree_skb(skb);
524 
525 	return NETDEV_TX_OK;
526 }
527 
528 static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
529 {
530 	netdev_dbg(netdev, "RX error, status: %x\n", status);
531 
532 	netdev->stats.rx_errors++;
533 	if (status & RXSR_TOO_LONG)
534 		netdev->stats.rx_length_errors++;
535 	if (status & RXSR_CRC_ERROR)
536 		netdev->stats.rx_crc_errors++;
537 	if (status & RXSR_RUNT)
538 		netdev->stats.rx_frame_errors++;
539 }
540 
541 static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
542 	int len)
543 {
544 	netdev_dbg(netdev, "RX packet, len: %d\n", len);
545 
546 	netdev->stats.rx_packets++;
547 	netdev->stats.rx_bytes += len;
548 	if (status & RXSR_MULTICAST)
549 		netdev->stats.multicast++;
550 }
551 
552 static int __ks8842_start_new_rx_dma(struct net_device *netdev)
553 {
554 	struct ks8842_adapter *adapter = netdev_priv(netdev);
555 	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
556 	struct scatterlist *sg = &ctl->sg;
557 	int err;
558 
559 	ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
560 	if (ctl->skb) {
561 		sg_init_table(sg, 1);
562 		sg_dma_address(sg) = dma_map_single(adapter->dev,
563 			ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
564 		err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
565 		if (unlikely(err)) {
566 			sg_dma_address(sg) = 0;
567 			goto out;
568 		}
569 
570 		sg_dma_len(sg) = DMA_BUFFER_SIZE;
571 
572 		ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
573 			sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
574 
575 		if (!ctl->adesc)
576 			goto out;
577 
578 		ctl->adesc->callback_param = netdev;
579 		ctl->adesc->callback = ks8842_dma_rx_cb;
580 		ctl->adesc->tx_submit(ctl->adesc);
581 	} else {
582 		err = -ENOMEM;
583 		sg_dma_address(sg) = 0;
584 		goto out;
585 	}
586 
587 	return err;
588 out:
589 	if (sg_dma_address(sg))
590 		dma_unmap_single(adapter->dev, sg_dma_address(sg),
591 			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
592 	sg_dma_address(sg) = 0;
593 	if (ctl->skb)
594 		dev_kfree_skb(ctl->skb);
595 
596 	ctl->skb = NULL;
597 
598 	printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
599 	return err;
600 }
601 
602 static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
603 {
604 	struct net_device *netdev = (struct net_device *)arg;
605 	struct ks8842_adapter *adapter = netdev_priv(netdev);
606 	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
607 	struct sk_buff *skb = ctl->skb;
608 	dma_addr_t addr = sg_dma_address(&ctl->sg);
609 	u32 status;
610 
611 	ctl->adesc = NULL;
612 
613 	/* kick next transfer going */
614 	__ks8842_start_new_rx_dma(netdev);
615 
616 	/* now handle the data we got */
617 	dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
618 
619 	status = *((u32 *)skb->data);
620 
621 	netdev_dbg(netdev, "%s - rx_data: status: %x\n",
622 		__func__, status & 0xffff);
623 
624 	/* check the status */
625 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
626 		int len = (status >> 16) & 0x7ff;
627 
628 		ks8842_update_rx_counters(netdev, status, len);
629 
630 		/* reserve 4 bytes which is the status word */
631 		skb_reserve(skb, 4);
632 		skb_put(skb, len);
633 
634 		skb->protocol = eth_type_trans(skb, netdev);
635 		netif_rx(skb);
636 	} else {
637 		ks8842_update_rx_err_counters(netdev, status);
638 		dev_kfree_skb(skb);
639 	}
640 }
641 
642 static void ks8842_rx_frame(struct net_device *netdev,
643 	struct ks8842_adapter *adapter)
644 {
645 	u32 status;
646 	int len;
647 
648 	if (adapter->conf_flags & KS884X_16BIT) {
649 		status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
650 		len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
651 		netdev_dbg(netdev, "%s - rx_data: status: %x\n",
652 			   __func__, status);
653 	} else {
654 		status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
655 		len = (status >> 16) & 0x7ff;
656 		status &= 0xffff;
657 		netdev_dbg(netdev, "%s - rx_data: status: %x\n",
658 			   __func__, status);
659 	}
660 
661 	/* check the status */
662 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
663 		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
664 
665 		if (skb) {
666 
667 			ks8842_update_rx_counters(netdev, status, len);
668 
669 			if (adapter->conf_flags & KS884X_16BIT) {
670 				u16 *data16 = (u16 *)skb_put(skb, len);
671 				ks8842_select_bank(adapter, 17);
672 				while (len > 0) {
673 					*data16++ = ioread16(adapter->hw_addr +
674 						REG_QMU_DATA_LO);
675 					*data16++ = ioread16(adapter->hw_addr +
676 						REG_QMU_DATA_HI);
677 					len -= sizeof(u32);
678 				}
679 			} else {
680 				u32 *data = (u32 *)skb_put(skb, len);
681 
682 				ks8842_select_bank(adapter, 17);
683 				while (len > 0) {
684 					*data++ = ioread32(adapter->hw_addr +
685 						REG_QMU_DATA_LO);
686 					len -= sizeof(u32);
687 				}
688 			}
689 			skb->protocol = eth_type_trans(skb, netdev);
690 			netif_rx(skb);
691 		} else
692 			netdev->stats.rx_dropped++;
693 	} else
694 		ks8842_update_rx_err_counters(netdev, status);
695 
696 	/* set high watermark to 3K */
697 	ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
698 
699 	/* release the frame */
700 	ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
701 
702 	/* set high watermark to 2K */
703 	ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
704 }
705 
706 static void ks8842_handle_rx(struct net_device *netdev,
707 	struct ks8842_adapter *adapter)
708 {
709 	u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
710 	netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
711 	while (rx_data) {
712 		ks8842_rx_frame(netdev, adapter);
713 		rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
714 	}
715 }
716 
717 static void ks8842_handle_tx(struct net_device *netdev,
718 	struct ks8842_adapter *adapter)
719 {
720 	u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
721 	netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
722 	netdev->stats.tx_packets++;
723 	if (netif_queue_stopped(netdev))
724 		netif_wake_queue(netdev);
725 }
726 
727 static void ks8842_handle_rx_overrun(struct net_device *netdev,
728 	struct ks8842_adapter *adapter)
729 {
730 	netdev_dbg(netdev, "%s: entry\n", __func__);
731 	netdev->stats.rx_errors++;
732 	netdev->stats.rx_fifo_errors++;
733 }
734 
735 static void ks8842_tasklet(unsigned long arg)
736 {
737 	struct net_device *netdev = (struct net_device *)arg;
738 	struct ks8842_adapter *adapter = netdev_priv(netdev);
739 	u16 isr;
740 	unsigned long flags;
741 	u16 entry_bank;
742 
743 	/* read current bank to be able to set it back */
744 	spin_lock_irqsave(&adapter->lock, flags);
745 	entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
746 	spin_unlock_irqrestore(&adapter->lock, flags);
747 
748 	isr = ks8842_read16(adapter, 18, REG_ISR);
749 	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
750 
751 	/* when running in DMA mode, do not ack RX interrupts, it is handled
752 	   internally by timberdale, otherwise it's DMA FIFO:s would stop
753 	*/
754 	if (KS8842_USE_DMA(adapter))
755 		isr &= ~IRQ_RX;
756 
757 	/* Ack */
758 	ks8842_write16(adapter, 18, isr, REG_ISR);
759 
760 	if (!(adapter->conf_flags & MICREL_KS884X))
761 		/* Ack in the timberdale IP as well */
762 		iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
763 
764 	if (!netif_running(netdev))
765 		return;
766 
767 	if (isr & IRQ_LINK_CHANGE)
768 		ks8842_update_link_status(netdev, adapter);
769 
770 	/* should not get IRQ_RX when running DMA mode */
771 	if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
772 		ks8842_handle_rx(netdev, adapter);
773 
774 	/* should only happen when in PIO mode */
775 	if (isr & IRQ_TX)
776 		ks8842_handle_tx(netdev, adapter);
777 
778 	if (isr & IRQ_RX_OVERRUN)
779 		ks8842_handle_rx_overrun(netdev, adapter);
780 
781 	if (isr & IRQ_TX_STOPPED) {
782 		ks8842_disable_tx(adapter);
783 		ks8842_enable_tx(adapter);
784 	}
785 
786 	if (isr & IRQ_RX_STOPPED) {
787 		ks8842_disable_rx(adapter);
788 		ks8842_enable_rx(adapter);
789 	}
790 
791 	/* re-enable interrupts, put back the bank selection register */
792 	spin_lock_irqsave(&adapter->lock, flags);
793 	if (KS8842_USE_DMA(adapter))
794 		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
795 	else
796 		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
797 	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
798 
799 	/* Make sure timberdale continues DMA operations, they are stopped while
800 	   we are handling the ks8842 because we might change bank */
801 	if (KS8842_USE_DMA(adapter))
802 		ks8842_resume_dma(adapter);
803 
804 	spin_unlock_irqrestore(&adapter->lock, flags);
805 }
806 
807 static irqreturn_t ks8842_irq(int irq, void *devid)
808 {
809 	struct net_device *netdev = devid;
810 	struct ks8842_adapter *adapter = netdev_priv(netdev);
811 	u16 isr;
812 	u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
813 	irqreturn_t ret = IRQ_NONE;
814 
815 	isr = ks8842_read16(adapter, 18, REG_ISR);
816 	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
817 
818 	if (isr) {
819 		if (KS8842_USE_DMA(adapter))
820 			/* disable all but RX IRQ, since the FPGA relies on it*/
821 			ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
822 		else
823 			/* disable IRQ */
824 			ks8842_write16(adapter, 18, 0x00, REG_IER);
825 
826 		/* schedule tasklet */
827 		tasklet_schedule(&adapter->tasklet);
828 
829 		ret = IRQ_HANDLED;
830 	}
831 
832 	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
833 
834 	/* After an interrupt, tell timberdale to continue DMA operations.
835 	   DMA is disabled while we are handling the ks8842 because we might
836 	   change bank */
837 	ks8842_resume_dma(adapter);
838 
839 	return ret;
840 }
841 
842 static void ks8842_dma_rx_cb(void *data)
843 {
844 	struct net_device	*netdev = data;
845 	struct ks8842_adapter	*adapter = netdev_priv(netdev);
846 
847 	netdev_dbg(netdev, "RX DMA finished\n");
848 	/* schedule tasklet */
849 	if (adapter->dma_rx.adesc)
850 		tasklet_schedule(&adapter->dma_rx.tasklet);
851 }
852 
853 static void ks8842_dma_tx_cb(void *data)
854 {
855 	struct net_device		*netdev = data;
856 	struct ks8842_adapter		*adapter = netdev_priv(netdev);
857 	struct ks8842_tx_dma_ctl	*ctl = &adapter->dma_tx;
858 
859 	netdev_dbg(netdev, "TX DMA finished\n");
860 
861 	if (!ctl->adesc)
862 		return;
863 
864 	netdev->stats.tx_packets++;
865 	ctl->adesc = NULL;
866 
867 	if (netif_queue_stopped(netdev))
868 		netif_wake_queue(netdev);
869 }
870 
871 static void ks8842_stop_dma(struct ks8842_adapter *adapter)
872 {
873 	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
874 	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
875 
876 	tx_ctl->adesc = NULL;
877 	if (tx_ctl->chan)
878 		tx_ctl->chan->device->device_control(tx_ctl->chan,
879 			DMA_TERMINATE_ALL, 0);
880 
881 	rx_ctl->adesc = NULL;
882 	if (rx_ctl->chan)
883 		rx_ctl->chan->device->device_control(rx_ctl->chan,
884 			DMA_TERMINATE_ALL, 0);
885 
886 	if (sg_dma_address(&rx_ctl->sg))
887 		dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
888 			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
889 	sg_dma_address(&rx_ctl->sg) = 0;
890 
891 	dev_kfree_skb(rx_ctl->skb);
892 	rx_ctl->skb = NULL;
893 }
894 
895 static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
896 {
897 	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
898 	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
899 
900 	ks8842_stop_dma(adapter);
901 
902 	if (tx_ctl->chan)
903 		dma_release_channel(tx_ctl->chan);
904 	tx_ctl->chan = NULL;
905 
906 	if (rx_ctl->chan)
907 		dma_release_channel(rx_ctl->chan);
908 	rx_ctl->chan = NULL;
909 
910 	tasklet_kill(&rx_ctl->tasklet);
911 
912 	if (sg_dma_address(&tx_ctl->sg))
913 		dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
914 			DMA_BUFFER_SIZE, DMA_TO_DEVICE);
915 	sg_dma_address(&tx_ctl->sg) = 0;
916 
917 	kfree(tx_ctl->buf);
918 	tx_ctl->buf = NULL;
919 }
920 
921 static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
922 {
923 	return chan->chan_id == (long)filter_param;
924 }
925 
926 static int ks8842_alloc_dma_bufs(struct net_device *netdev)
927 {
928 	struct ks8842_adapter *adapter = netdev_priv(netdev);
929 	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
930 	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
931 	int err;
932 
933 	dma_cap_mask_t mask;
934 
935 	dma_cap_zero(mask);
936 	dma_cap_set(DMA_SLAVE, mask);
937 	dma_cap_set(DMA_PRIVATE, mask);
938 
939 	sg_init_table(&tx_ctl->sg, 1);
940 
941 	tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
942 					   (void *)(long)tx_ctl->channel);
943 	if (!tx_ctl->chan) {
944 		err = -ENODEV;
945 		goto err;
946 	}
947 
948 	/* allocate DMA buffer */
949 	tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
950 	if (!tx_ctl->buf) {
951 		err = -ENOMEM;
952 		goto err;
953 	}
954 
955 	sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
956 		tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
957 	err = dma_mapping_error(adapter->dev,
958 		sg_dma_address(&tx_ctl->sg));
959 	if (err) {
960 		sg_dma_address(&tx_ctl->sg) = 0;
961 		goto err;
962 	}
963 
964 	rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
965 					   (void *)(long)rx_ctl->channel);
966 	if (!rx_ctl->chan) {
967 		err = -ENODEV;
968 		goto err;
969 	}
970 
971 	tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
972 		(unsigned long)netdev);
973 
974 	return 0;
975 err:
976 	ks8842_dealloc_dma_bufs(adapter);
977 	return err;
978 }
979 
980 /* Netdevice operations */
981 
982 static int ks8842_open(struct net_device *netdev)
983 {
984 	struct ks8842_adapter *adapter = netdev_priv(netdev);
985 	int err;
986 
987 	netdev_dbg(netdev, "%s - entry\n", __func__);
988 
989 	if (KS8842_USE_DMA(adapter)) {
990 		err = ks8842_alloc_dma_bufs(netdev);
991 
992 		if (!err) {
993 			/* start RX dma */
994 			err = __ks8842_start_new_rx_dma(netdev);
995 			if (err)
996 				ks8842_dealloc_dma_bufs(adapter);
997 		}
998 
999 		if (err) {
1000 			printk(KERN_WARNING DRV_NAME
1001 				": Failed to initiate DMA, running PIO\n");
1002 			ks8842_dealloc_dma_bufs(adapter);
1003 			adapter->dma_rx.channel = -1;
1004 			adapter->dma_tx.channel = -1;
1005 		}
1006 	}
1007 
1008 	/* reset the HW */
1009 	ks8842_reset_hw(adapter);
1010 
1011 	ks8842_write_mac_addr(adapter, netdev->dev_addr);
1012 
1013 	ks8842_update_link_status(netdev, adapter);
1014 
1015 	err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
1016 		netdev);
1017 	if (err) {
1018 		pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
1019 		return err;
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 static int ks8842_close(struct net_device *netdev)
1026 {
1027 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1028 
1029 	netdev_dbg(netdev, "%s - entry\n", __func__);
1030 
1031 	cancel_work_sync(&adapter->timeout_work);
1032 
1033 	if (KS8842_USE_DMA(adapter))
1034 		ks8842_dealloc_dma_bufs(adapter);
1035 
1036 	/* free the irq */
1037 	free_irq(adapter->irq, netdev);
1038 
1039 	/* disable the switch */
1040 	ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
1041 
1042 	return 0;
1043 }
1044 
1045 static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
1046 				     struct net_device *netdev)
1047 {
1048 	int ret;
1049 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1050 
1051 	netdev_dbg(netdev, "%s: entry\n", __func__);
1052 
1053 	if (KS8842_USE_DMA(adapter)) {
1054 		unsigned long flags;
1055 		ret = ks8842_tx_frame_dma(skb, netdev);
1056 		/* for now only allow one transfer at the time */
1057 		spin_lock_irqsave(&adapter->lock, flags);
1058 		if (adapter->dma_tx.adesc)
1059 			netif_stop_queue(netdev);
1060 		spin_unlock_irqrestore(&adapter->lock, flags);
1061 		return ret;
1062 	}
1063 
1064 	ret = ks8842_tx_frame(skb, netdev);
1065 
1066 	if (ks8842_tx_fifo_space(adapter) <  netdev->mtu + 8)
1067 		netif_stop_queue(netdev);
1068 
1069 	return ret;
1070 }
1071 
1072 static int ks8842_set_mac(struct net_device *netdev, void *p)
1073 {
1074 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1075 	struct sockaddr *addr = p;
1076 	char *mac = (u8 *)addr->sa_data;
1077 
1078 	netdev_dbg(netdev, "%s: entry\n", __func__);
1079 
1080 	if (!is_valid_ether_addr(addr->sa_data))
1081 		return -EADDRNOTAVAIL;
1082 
1083 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
1084 
1085 	ks8842_write_mac_addr(adapter, mac);
1086 	return 0;
1087 }
1088 
1089 static void ks8842_tx_timeout_work(struct work_struct *work)
1090 {
1091 	struct ks8842_adapter *adapter =
1092 		container_of(work, struct ks8842_adapter, timeout_work);
1093 	struct net_device *netdev = adapter->netdev;
1094 	unsigned long flags;
1095 
1096 	netdev_dbg(netdev, "%s: entry\n", __func__);
1097 
1098 	spin_lock_irqsave(&adapter->lock, flags);
1099 
1100 	if (KS8842_USE_DMA(adapter))
1101 		ks8842_stop_dma(adapter);
1102 
1103 	/* disable interrupts */
1104 	ks8842_write16(adapter, 18, 0, REG_IER);
1105 	ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
1106 
1107 	netif_stop_queue(netdev);
1108 
1109 	spin_unlock_irqrestore(&adapter->lock, flags);
1110 
1111 	ks8842_reset_hw(adapter);
1112 
1113 	ks8842_write_mac_addr(adapter, netdev->dev_addr);
1114 
1115 	ks8842_update_link_status(netdev, adapter);
1116 
1117 	if (KS8842_USE_DMA(adapter))
1118 		__ks8842_start_new_rx_dma(netdev);
1119 }
1120 
1121 static void ks8842_tx_timeout(struct net_device *netdev)
1122 {
1123 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1124 
1125 	netdev_dbg(netdev, "%s: entry\n", __func__);
1126 
1127 	schedule_work(&adapter->timeout_work);
1128 }
1129 
1130 static const struct net_device_ops ks8842_netdev_ops = {
1131 	.ndo_open		= ks8842_open,
1132 	.ndo_stop		= ks8842_close,
1133 	.ndo_start_xmit		= ks8842_xmit_frame,
1134 	.ndo_set_mac_address	= ks8842_set_mac,
1135 	.ndo_tx_timeout 	= ks8842_tx_timeout,
1136 	.ndo_validate_addr	= eth_validate_addr
1137 };
1138 
1139 static const struct ethtool_ops ks8842_ethtool_ops = {
1140 	.get_link		= ethtool_op_get_link,
1141 };
1142 
1143 static int ks8842_probe(struct platform_device *pdev)
1144 {
1145 	int err = -ENOMEM;
1146 	struct resource *iomem;
1147 	struct net_device *netdev;
1148 	struct ks8842_adapter *adapter;
1149 	struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
1150 	u16 id;
1151 	unsigned i;
1152 
1153 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1154 	if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
1155 		goto err_mem_region;
1156 
1157 	netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
1158 	if (!netdev)
1159 		goto err_alloc_etherdev;
1160 
1161 	SET_NETDEV_DEV(netdev, &pdev->dev);
1162 
1163 	adapter = netdev_priv(netdev);
1164 	adapter->netdev = netdev;
1165 	INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
1166 	adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
1167 	adapter->conf_flags = iomem->flags;
1168 
1169 	if (!adapter->hw_addr)
1170 		goto err_ioremap;
1171 
1172 	adapter->irq = platform_get_irq(pdev, 0);
1173 	if (adapter->irq < 0) {
1174 		err = adapter->irq;
1175 		goto err_get_irq;
1176 	}
1177 
1178 	adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
1179 
1180 	/* DMA is only supported when accessed via timberdale */
1181 	if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
1182 		(pdata->tx_dma_channel != -1) &&
1183 		(pdata->rx_dma_channel != -1)) {
1184 		adapter->dma_rx.channel = pdata->rx_dma_channel;
1185 		adapter->dma_tx.channel = pdata->tx_dma_channel;
1186 	} else {
1187 		adapter->dma_rx.channel = -1;
1188 		adapter->dma_tx.channel = -1;
1189 	}
1190 
1191 	tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
1192 	spin_lock_init(&adapter->lock);
1193 
1194 	netdev->netdev_ops = &ks8842_netdev_ops;
1195 	netdev->ethtool_ops = &ks8842_ethtool_ops;
1196 
1197 	/* Check if a mac address was given */
1198 	i = netdev->addr_len;
1199 	if (pdata) {
1200 		for (i = 0; i < netdev->addr_len; i++)
1201 			if (pdata->macaddr[i] != 0)
1202 				break;
1203 
1204 		if (i < netdev->addr_len)
1205 			/* an address was passed, use it */
1206 			memcpy(netdev->dev_addr, pdata->macaddr,
1207 				netdev->addr_len);
1208 	}
1209 
1210 	if (i == netdev->addr_len) {
1211 		ks8842_read_mac_addr(adapter, netdev->dev_addr);
1212 
1213 		if (!is_valid_ether_addr(netdev->dev_addr))
1214 			eth_hw_addr_random(netdev);
1215 	}
1216 
1217 	id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
1218 
1219 	strcpy(netdev->name, "eth%d");
1220 	err = register_netdev(netdev);
1221 	if (err)
1222 		goto err_register;
1223 
1224 	platform_set_drvdata(pdev, netdev);
1225 
1226 	pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1227 		(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1228 
1229 	return 0;
1230 
1231 err_register:
1232 err_get_irq:
1233 	iounmap(adapter->hw_addr);
1234 err_ioremap:
1235 	free_netdev(netdev);
1236 err_alloc_etherdev:
1237 	release_mem_region(iomem->start, resource_size(iomem));
1238 err_mem_region:
1239 	return err;
1240 }
1241 
1242 static int ks8842_remove(struct platform_device *pdev)
1243 {
1244 	struct net_device *netdev = platform_get_drvdata(pdev);
1245 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1246 	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1247 
1248 	unregister_netdev(netdev);
1249 	tasklet_kill(&adapter->tasklet);
1250 	iounmap(adapter->hw_addr);
1251 	free_netdev(netdev);
1252 	release_mem_region(iomem->start, resource_size(iomem));
1253 	return 0;
1254 }
1255 
1256 
1257 static struct platform_driver ks8842_platform_driver = {
1258 	.driver = {
1259 		.name	= DRV_NAME,
1260 		.owner	= THIS_MODULE,
1261 	},
1262 	.probe		= ks8842_probe,
1263 	.remove		= ks8842_remove,
1264 };
1265 
1266 module_platform_driver(ks8842_platform_driver);
1267 
1268 MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
1269 MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
1270 MODULE_LICENSE("GPL v2");
1271 MODULE_ALIAS("platform:ks8842");
1272 
1273