xref: /linux/drivers/net/ethernet/broadcom/b44.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9  *
10  * Distribute under GPL.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/ssb/ssb.h>
31 #include <linux/slab.h>
32 
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 
37 
38 #include "b44.h"
39 
40 #define DRV_MODULE_NAME		"b44"
41 #define DRV_MODULE_VERSION	"2.0"
42 #define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
43 
44 #define B44_DEF_MSG_ENABLE	  \
45 	(NETIF_MSG_DRV		| \
46 	 NETIF_MSG_PROBE	| \
47 	 NETIF_MSG_LINK		| \
48 	 NETIF_MSG_TIMER	| \
49 	 NETIF_MSG_IFDOWN	| \
50 	 NETIF_MSG_IFUP		| \
51 	 NETIF_MSG_RX_ERR	| \
52 	 NETIF_MSG_TX_ERR)
53 
54 /* length of time before we decide the hardware is borked,
55  * and dev->tx_timeout() should be called to fix the problem
56  */
57 #define B44_TX_TIMEOUT			(5 * HZ)
58 
59 /* hardware minimum and maximum for a single frame's data payload */
60 #define B44_MIN_MTU			60
61 #define B44_MAX_MTU			1500
62 
63 #define B44_RX_RING_SIZE		512
64 #define B44_DEF_RX_RING_PENDING		200
65 #define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
66 				 B44_RX_RING_SIZE)
67 #define B44_TX_RING_SIZE		512
68 #define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
69 #define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
70 				 B44_TX_RING_SIZE)
71 
72 #define TX_RING_GAP(BP)	\
73 	(B44_TX_RING_SIZE - (BP)->tx_pending)
74 #define TX_BUFFS_AVAIL(BP)						\
75 	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
76 	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
77 	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78 #define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
79 
80 #define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
81 #define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
82 
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
85 
86 /* b44 internal pattern match filter info */
87 #define B44_PATTERN_BASE	0x400
88 #define B44_PATTERN_SIZE	0x80
89 #define B44_PMASK_BASE		0x600
90 #define B44_PMASK_SIZE		0x10
91 #define B44_MAX_PATTERNS	16
92 #define B44_ETHIPV6UDP_HLEN	62
93 #define B44_ETHIPV4UDP_HLEN	42
94 
95 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
96 MODULE_DESCRIPTION(DRV_DESCRIPTION);
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_MODULE_VERSION);
99 
100 static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
101 module_param(b44_debug, int, 0);
102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103 
104 
105 #ifdef CONFIG_B44_PCI
106 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
107 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 	{ 0 } /* terminate list with empty entry */
111 };
112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113 
114 static struct pci_driver b44_pci_driver = {
115 	.name		= DRV_MODULE_NAME,
116 	.id_table	= b44_pci_tbl,
117 };
118 #endif /* CONFIG_B44_PCI */
119 
120 static const struct ssb_device_id b44_ssb_tbl[] = {
121 	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122 	SSB_DEVTABLE_END
123 };
124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125 
126 static void b44_halt(struct b44 *);
127 static void b44_init_rings(struct b44 *);
128 
129 #define B44_FULL_RESET		1
130 #define B44_FULL_RESET_SKIP_PHY	2
131 #define B44_PARTIAL_RESET	3
132 #define B44_CHIP_RESET_FULL	4
133 #define B44_CHIP_RESET_PARTIAL	5
134 
135 static void b44_init_hw(struct b44 *, int);
136 
137 static int dma_desc_sync_size;
138 static int instance;
139 
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...)	# x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
144 };
145 
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 						dma_addr_t dma_base,
148 						unsigned long offset,
149 						enum dma_data_direction dir)
150 {
151 	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152 				   dma_desc_sync_size, dir);
153 }
154 
155 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156 					     dma_addr_t dma_base,
157 					     unsigned long offset,
158 					     enum dma_data_direction dir)
159 {
160 	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161 				dma_desc_sync_size, dir);
162 }
163 
164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165 {
166 	return ssb_read32(bp->sdev, reg);
167 }
168 
169 static inline void bw32(const struct b44 *bp,
170 			unsigned long reg, unsigned long val)
171 {
172 	ssb_write32(bp->sdev, reg, val);
173 }
174 
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176 			u32 bit, unsigned long timeout, const int clear)
177 {
178 	unsigned long i;
179 
180 	for (i = 0; i < timeout; i++) {
181 		u32 val = br32(bp, reg);
182 
183 		if (clear && !(val & bit))
184 			break;
185 		if (!clear && (val & bit))
186 			break;
187 		udelay(10);
188 	}
189 	if (i == timeout) {
190 		if (net_ratelimit())
191 			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
192 				   bit, reg, clear ? "clear" : "set");
193 
194 		return -ENODEV;
195 	}
196 	return 0;
197 }
198 
199 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
200 {
201 	u32 val;
202 
203 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204 			    (index << CAM_CTRL_INDEX_SHIFT)));
205 
206 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
207 
208 	val = br32(bp, B44_CAM_DATA_LO);
209 
210 	data[2] = (val >> 24) & 0xFF;
211 	data[3] = (val >> 16) & 0xFF;
212 	data[4] = (val >> 8) & 0xFF;
213 	data[5] = (val >> 0) & 0xFF;
214 
215 	val = br32(bp, B44_CAM_DATA_HI);
216 
217 	data[0] = (val >> 8) & 0xFF;
218 	data[1] = (val >> 0) & 0xFF;
219 }
220 
221 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
222 {
223 	u32 val;
224 
225 	val  = ((u32) data[2]) << 24;
226 	val |= ((u32) data[3]) << 16;
227 	val |= ((u32) data[4]) <<  8;
228 	val |= ((u32) data[5]) <<  0;
229 	bw32(bp, B44_CAM_DATA_LO, val);
230 	val = (CAM_DATA_HI_VALID |
231 	       (((u32) data[0]) << 8) |
232 	       (((u32) data[1]) << 0));
233 	bw32(bp, B44_CAM_DATA_HI, val);
234 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235 			    (index << CAM_CTRL_INDEX_SHIFT)));
236 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
237 }
238 
239 static inline void __b44_disable_ints(struct b44 *bp)
240 {
241 	bw32(bp, B44_IMASK, 0);
242 }
243 
244 static void b44_disable_ints(struct b44 *bp)
245 {
246 	__b44_disable_ints(bp);
247 
248 	/* Flush posted writes. */
249 	br32(bp, B44_IMASK);
250 }
251 
252 static void b44_enable_ints(struct b44 *bp)
253 {
254 	bw32(bp, B44_IMASK, bp->imask);
255 }
256 
257 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
258 {
259 	int err;
260 
261 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263 			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
264 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
265 			     (reg << MDIO_DATA_RA_SHIFT) |
266 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268 	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
269 
270 	return err;
271 }
272 
273 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
274 {
275 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277 			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
278 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
279 			     (reg << MDIO_DATA_RA_SHIFT) |
280 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281 			     (val & MDIO_DATA_DATA)));
282 	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
283 }
284 
285 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
286 {
287 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
288 		return 0;
289 
290 	return __b44_readphy(bp, bp->phy_addr, reg, val);
291 }
292 
293 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
294 {
295 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
296 		return 0;
297 
298 	return __b44_writephy(bp, bp->phy_addr, reg, val);
299 }
300 
301 /* miilib interface */
302 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
303 {
304 	u32 val;
305 	struct b44 *bp = netdev_priv(dev);
306 	int rc = __b44_readphy(bp, phy_id, location, &val);
307 	if (rc)
308 		return 0xffffffff;
309 	return val;
310 }
311 
312 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
313 			 int val)
314 {
315 	struct b44 *bp = netdev_priv(dev);
316 	__b44_writephy(bp, phy_id, location, val);
317 }
318 
319 static int b44_phy_reset(struct b44 *bp)
320 {
321 	u32 val;
322 	int err;
323 
324 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
325 		return 0;
326 	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
327 	if (err)
328 		return err;
329 	udelay(100);
330 	err = b44_readphy(bp, MII_BMCR, &val);
331 	if (!err) {
332 		if (val & BMCR_RESET) {
333 			netdev_err(bp->dev, "PHY Reset would not complete\n");
334 			err = -ENODEV;
335 		}
336 	}
337 
338 	return err;
339 }
340 
341 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
342 {
343 	u32 val;
344 
345 	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
346 	bp->flags |= pause_flags;
347 
348 	val = br32(bp, B44_RXCONFIG);
349 	if (pause_flags & B44_FLAG_RX_PAUSE)
350 		val |= RXCONFIG_FLOW;
351 	else
352 		val &= ~RXCONFIG_FLOW;
353 	bw32(bp, B44_RXCONFIG, val);
354 
355 	val = br32(bp, B44_MAC_FLOW);
356 	if (pause_flags & B44_FLAG_TX_PAUSE)
357 		val |= (MAC_FLOW_PAUSE_ENAB |
358 			(0xc0 & MAC_FLOW_RX_HI_WATER));
359 	else
360 		val &= ~MAC_FLOW_PAUSE_ENAB;
361 	bw32(bp, B44_MAC_FLOW, val);
362 }
363 
364 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
365 {
366 	u32 pause_enab = 0;
367 
368 	/* The driver supports only rx pause by default because
369 	   the b44 mac tx pause mechanism generates excessive
370 	   pause frames.
371 	   Use ethtool to turn on b44 tx pause if necessary.
372 	 */
373 	if ((local & ADVERTISE_PAUSE_CAP) &&
374 	    (local & ADVERTISE_PAUSE_ASYM)){
375 		if ((remote & LPA_PAUSE_ASYM) &&
376 		    !(remote & LPA_PAUSE_CAP))
377 			pause_enab |= B44_FLAG_RX_PAUSE;
378 	}
379 
380 	__b44_set_flow_ctrl(bp, pause_enab);
381 }
382 
383 #ifdef CONFIG_BCM47XX
384 #include <bcm47xx_nvram.h>
385 static void b44_wap54g10_workaround(struct b44 *bp)
386 {
387 	char buf[20];
388 	u32 val;
389 	int err;
390 
391 	/*
392 	 * workaround for bad hardware design in Linksys WAP54G v1.0
393 	 * see https://dev.openwrt.org/ticket/146
394 	 * check and reset bit "isolate"
395 	 */
396 	if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
397 		return;
398 	if (simple_strtoul(buf, NULL, 0) == 2) {
399 		err = __b44_readphy(bp, 0, MII_BMCR, &val);
400 		if (err)
401 			goto error;
402 		if (!(val & BMCR_ISOLATE))
403 			return;
404 		val &= ~BMCR_ISOLATE;
405 		err = __b44_writephy(bp, 0, MII_BMCR, val);
406 		if (err)
407 			goto error;
408 	}
409 	return;
410 error:
411 	pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
412 }
413 #else
414 static inline void b44_wap54g10_workaround(struct b44 *bp)
415 {
416 }
417 #endif
418 
419 static int b44_setup_phy(struct b44 *bp)
420 {
421 	u32 val;
422 	int err;
423 
424 	b44_wap54g10_workaround(bp);
425 
426 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
427 		return 0;
428 	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
429 		goto out;
430 	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
431 				val & MII_ALEDCTRL_ALLMSK)) != 0)
432 		goto out;
433 	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
434 		goto out;
435 	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
436 				val | MII_TLEDCTRL_ENABLE)) != 0)
437 		goto out;
438 
439 	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
440 		u32 adv = ADVERTISE_CSMA;
441 
442 		if (bp->flags & B44_FLAG_ADV_10HALF)
443 			adv |= ADVERTISE_10HALF;
444 		if (bp->flags & B44_FLAG_ADV_10FULL)
445 			adv |= ADVERTISE_10FULL;
446 		if (bp->flags & B44_FLAG_ADV_100HALF)
447 			adv |= ADVERTISE_100HALF;
448 		if (bp->flags & B44_FLAG_ADV_100FULL)
449 			adv |= ADVERTISE_100FULL;
450 
451 		if (bp->flags & B44_FLAG_PAUSE_AUTO)
452 			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
453 
454 		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
455 			goto out;
456 		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
457 						       BMCR_ANRESTART))) != 0)
458 			goto out;
459 	} else {
460 		u32 bmcr;
461 
462 		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
463 			goto out;
464 		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
465 		if (bp->flags & B44_FLAG_100_BASE_T)
466 			bmcr |= BMCR_SPEED100;
467 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
468 			bmcr |= BMCR_FULLDPLX;
469 		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
470 			goto out;
471 
472 		/* Since we will not be negotiating there is no safe way
473 		 * to determine if the link partner supports flow control
474 		 * or not.  So just disable it completely in this case.
475 		 */
476 		b44_set_flow_ctrl(bp, 0, 0);
477 	}
478 
479 out:
480 	return err;
481 }
482 
483 static void b44_stats_update(struct b44 *bp)
484 {
485 	unsigned long reg;
486 	u64 *val;
487 
488 	val = &bp->hw_stats.tx_good_octets;
489 	u64_stats_update_begin(&bp->hw_stats.syncp);
490 
491 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
492 		*val++ += br32(bp, reg);
493 	}
494 
495 	/* Pad */
496 	reg += 8*4UL;
497 
498 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
499 		*val++ += br32(bp, reg);
500 	}
501 
502 	u64_stats_update_end(&bp->hw_stats.syncp);
503 }
504 
505 static void b44_link_report(struct b44 *bp)
506 {
507 	if (!netif_carrier_ok(bp->dev)) {
508 		netdev_info(bp->dev, "Link is down\n");
509 	} else {
510 		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
511 			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
512 			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
513 
514 		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
515 			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
516 			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
517 	}
518 }
519 
520 static void b44_check_phy(struct b44 *bp)
521 {
522 	u32 bmsr, aux;
523 
524 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
525 		bp->flags |= B44_FLAG_100_BASE_T;
526 		bp->flags |= B44_FLAG_FULL_DUPLEX;
527 		if (!netif_carrier_ok(bp->dev)) {
528 			u32 val = br32(bp, B44_TX_CTRL);
529 			val |= TX_CTRL_DUPLEX;
530 			bw32(bp, B44_TX_CTRL, val);
531 			netif_carrier_on(bp->dev);
532 			b44_link_report(bp);
533 		}
534 		return;
535 	}
536 
537 	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
538 	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
539 	    (bmsr != 0xffff)) {
540 		if (aux & MII_AUXCTRL_SPEED)
541 			bp->flags |= B44_FLAG_100_BASE_T;
542 		else
543 			bp->flags &= ~B44_FLAG_100_BASE_T;
544 		if (aux & MII_AUXCTRL_DUPLEX)
545 			bp->flags |= B44_FLAG_FULL_DUPLEX;
546 		else
547 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
548 
549 		if (!netif_carrier_ok(bp->dev) &&
550 		    (bmsr & BMSR_LSTATUS)) {
551 			u32 val = br32(bp, B44_TX_CTRL);
552 			u32 local_adv, remote_adv;
553 
554 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
555 				val |= TX_CTRL_DUPLEX;
556 			else
557 				val &= ~TX_CTRL_DUPLEX;
558 			bw32(bp, B44_TX_CTRL, val);
559 
560 			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
561 			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
562 			    !b44_readphy(bp, MII_LPA, &remote_adv))
563 				b44_set_flow_ctrl(bp, local_adv, remote_adv);
564 
565 			/* Link now up */
566 			netif_carrier_on(bp->dev);
567 			b44_link_report(bp);
568 		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
569 			/* Link now down */
570 			netif_carrier_off(bp->dev);
571 			b44_link_report(bp);
572 		}
573 
574 		if (bmsr & BMSR_RFAULT)
575 			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
576 		if (bmsr & BMSR_JCD)
577 			netdev_warn(bp->dev, "Jabber detected in PHY\n");
578 	}
579 }
580 
581 static void b44_timer(unsigned long __opaque)
582 {
583 	struct b44 *bp = (struct b44 *) __opaque;
584 
585 	spin_lock_irq(&bp->lock);
586 
587 	b44_check_phy(bp);
588 
589 	b44_stats_update(bp);
590 
591 	spin_unlock_irq(&bp->lock);
592 
593 	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
594 }
595 
596 static void b44_tx(struct b44 *bp)
597 {
598 	u32 cur, cons;
599 	unsigned bytes_compl = 0, pkts_compl = 0;
600 
601 	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
602 	cur /= sizeof(struct dma_desc);
603 
604 	/* XXX needs updating when NETIF_F_SG is supported */
605 	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
606 		struct ring_info *rp = &bp->tx_buffers[cons];
607 		struct sk_buff *skb = rp->skb;
608 
609 		BUG_ON(skb == NULL);
610 
611 		dma_unmap_single(bp->sdev->dma_dev,
612 				 rp->mapping,
613 				 skb->len,
614 				 DMA_TO_DEVICE);
615 		rp->skb = NULL;
616 
617 		bytes_compl += skb->len;
618 		pkts_compl++;
619 
620 		dev_kfree_skb_irq(skb);
621 	}
622 
623 	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
624 	bp->tx_cons = cons;
625 	if (netif_queue_stopped(bp->dev) &&
626 	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 		netif_wake_queue(bp->dev);
628 
629 	bw32(bp, B44_GPTIMER, 0);
630 }
631 
632 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
633  * before the DMA address you give it.  So we allocate 30 more bytes
634  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635  * point the chip at 30 bytes past where the rx_header will go.
636  */
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
638 {
639 	struct dma_desc *dp;
640 	struct ring_info *src_map, *map;
641 	struct rx_header *rh;
642 	struct sk_buff *skb;
643 	dma_addr_t mapping;
644 	int dest_idx;
645 	u32 ctrl;
646 
647 	src_map = NULL;
648 	if (src_idx >= 0)
649 		src_map = &bp->rx_buffers[src_idx];
650 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 	map = &bp->rx_buffers[dest_idx];
652 	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
653 	if (skb == NULL)
654 		return -ENOMEM;
655 
656 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
657 				 RX_PKT_BUF_SZ,
658 				 DMA_FROM_DEVICE);
659 
660 	/* Hardware bug work-around, the chip is unable to do PCI DMA
661 	   to/from anything above 1GB :-( */
662 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
663 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
664 		/* Sigh... */
665 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
666 			dma_unmap_single(bp->sdev->dma_dev, mapping,
667 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 		dev_kfree_skb_any(skb);
669 		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
670 		if (skb == NULL)
671 			return -ENOMEM;
672 		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
673 					 RX_PKT_BUF_SZ,
674 					 DMA_FROM_DEVICE);
675 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
676 		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
677 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
678 				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 			dev_kfree_skb_any(skb);
680 			return -ENOMEM;
681 		}
682 		bp->force_copybreak = 1;
683 	}
684 
685 	rh = (struct rx_header *) skb->data;
686 
687 	rh->len = 0;
688 	rh->flags = 0;
689 
690 	map->skb = skb;
691 	map->mapping = mapping;
692 
693 	if (src_map != NULL)
694 		src_map->skb = NULL;
695 
696 	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
697 	if (dest_idx == (B44_RX_RING_SIZE - 1))
698 		ctrl |= DESC_CTRL_EOT;
699 
700 	dp = &bp->rx_ring[dest_idx];
701 	dp->ctrl = cpu_to_le32(ctrl);
702 	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
703 
704 	if (bp->flags & B44_FLAG_RX_RING_HACK)
705 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
706 			                    dest_idx * sizeof(*dp),
707 			                    DMA_BIDIRECTIONAL);
708 
709 	return RX_PKT_BUF_SZ;
710 }
711 
712 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
713 {
714 	struct dma_desc *src_desc, *dest_desc;
715 	struct ring_info *src_map, *dest_map;
716 	struct rx_header *rh;
717 	int dest_idx;
718 	__le32 ctrl;
719 
720 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
721 	dest_desc = &bp->rx_ring[dest_idx];
722 	dest_map = &bp->rx_buffers[dest_idx];
723 	src_desc = &bp->rx_ring[src_idx];
724 	src_map = &bp->rx_buffers[src_idx];
725 
726 	dest_map->skb = src_map->skb;
727 	rh = (struct rx_header *) src_map->skb->data;
728 	rh->len = 0;
729 	rh->flags = 0;
730 	dest_map->mapping = src_map->mapping;
731 
732 	if (bp->flags & B44_FLAG_RX_RING_HACK)
733 		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
734 			                 src_idx * sizeof(*src_desc),
735 			                 DMA_BIDIRECTIONAL);
736 
737 	ctrl = src_desc->ctrl;
738 	if (dest_idx == (B44_RX_RING_SIZE - 1))
739 		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
740 	else
741 		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
742 
743 	dest_desc->ctrl = ctrl;
744 	dest_desc->addr = src_desc->addr;
745 
746 	src_map->skb = NULL;
747 
748 	if (bp->flags & B44_FLAG_RX_RING_HACK)
749 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
750 					     dest_idx * sizeof(*dest_desc),
751 					     DMA_BIDIRECTIONAL);
752 
753 	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
754 				   RX_PKT_BUF_SZ,
755 				   DMA_FROM_DEVICE);
756 }
757 
758 static int b44_rx(struct b44 *bp, int budget)
759 {
760 	int received;
761 	u32 cons, prod;
762 
763 	received = 0;
764 	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
765 	prod /= sizeof(struct dma_desc);
766 	cons = bp->rx_cons;
767 
768 	while (cons != prod && budget > 0) {
769 		struct ring_info *rp = &bp->rx_buffers[cons];
770 		struct sk_buff *skb = rp->skb;
771 		dma_addr_t map = rp->mapping;
772 		struct rx_header *rh;
773 		u16 len;
774 
775 		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
776 					RX_PKT_BUF_SZ,
777 					DMA_FROM_DEVICE);
778 		rh = (struct rx_header *) skb->data;
779 		len = le16_to_cpu(rh->len);
780 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
781 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
782 		drop_it:
783 			b44_recycle_rx(bp, cons, bp->rx_prod);
784 		drop_it_no_recycle:
785 			bp->dev->stats.rx_dropped++;
786 			goto next_pkt;
787 		}
788 
789 		if (len == 0) {
790 			int i = 0;
791 
792 			do {
793 				udelay(2);
794 				barrier();
795 				len = le16_to_cpu(rh->len);
796 			} while (len == 0 && i++ < 5);
797 			if (len == 0)
798 				goto drop_it;
799 		}
800 
801 		/* Omit CRC. */
802 		len -= 4;
803 
804 		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
805 			int skb_size;
806 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
807 			if (skb_size < 0)
808 				goto drop_it;
809 			dma_unmap_single(bp->sdev->dma_dev, map,
810 					 skb_size, DMA_FROM_DEVICE);
811 			/* Leave out rx_header */
812 			skb_put(skb, len + RX_PKT_OFFSET);
813 			skb_pull(skb, RX_PKT_OFFSET);
814 		} else {
815 			struct sk_buff *copy_skb;
816 
817 			b44_recycle_rx(bp, cons, bp->rx_prod);
818 			copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
819 			if (copy_skb == NULL)
820 				goto drop_it_no_recycle;
821 
822 			skb_put(copy_skb, len);
823 			/* DMA sync done above, copy just the actual packet */
824 			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
825 							 copy_skb->data, len);
826 			skb = copy_skb;
827 		}
828 		skb_checksum_none_assert(skb);
829 		skb->protocol = eth_type_trans(skb, bp->dev);
830 		netif_receive_skb(skb);
831 		received++;
832 		budget--;
833 	next_pkt:
834 		bp->rx_prod = (bp->rx_prod + 1) &
835 			(B44_RX_RING_SIZE - 1);
836 		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
837 	}
838 
839 	bp->rx_cons = cons;
840 	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
841 
842 	return received;
843 }
844 
845 static int b44_poll(struct napi_struct *napi, int budget)
846 {
847 	struct b44 *bp = container_of(napi, struct b44, napi);
848 	int work_done;
849 	unsigned long flags;
850 
851 	spin_lock_irqsave(&bp->lock, flags);
852 
853 	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
854 		/* spin_lock(&bp->tx_lock); */
855 		b44_tx(bp);
856 		/* spin_unlock(&bp->tx_lock); */
857 	}
858 	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
859 		bp->istat &= ~ISTAT_RFO;
860 		b44_disable_ints(bp);
861 		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
862 		b44_init_rings(bp);
863 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
864 		netif_wake_queue(bp->dev);
865 	}
866 
867 	spin_unlock_irqrestore(&bp->lock, flags);
868 
869 	work_done = 0;
870 	if (bp->istat & ISTAT_RX)
871 		work_done += b44_rx(bp, budget);
872 
873 	if (bp->istat & ISTAT_ERRORS) {
874 		spin_lock_irqsave(&bp->lock, flags);
875 		b44_halt(bp);
876 		b44_init_rings(bp);
877 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
878 		netif_wake_queue(bp->dev);
879 		spin_unlock_irqrestore(&bp->lock, flags);
880 		work_done = 0;
881 	}
882 
883 	if (work_done < budget) {
884 		napi_complete(napi);
885 		b44_enable_ints(bp);
886 	}
887 
888 	return work_done;
889 }
890 
891 static irqreturn_t b44_interrupt(int irq, void *dev_id)
892 {
893 	struct net_device *dev = dev_id;
894 	struct b44 *bp = netdev_priv(dev);
895 	u32 istat, imask;
896 	int handled = 0;
897 
898 	spin_lock(&bp->lock);
899 
900 	istat = br32(bp, B44_ISTAT);
901 	imask = br32(bp, B44_IMASK);
902 
903 	/* The interrupt mask register controls which interrupt bits
904 	 * will actually raise an interrupt to the CPU when set by hw/firmware,
905 	 * but doesn't mask off the bits.
906 	 */
907 	istat &= imask;
908 	if (istat) {
909 		handled = 1;
910 
911 		if (unlikely(!netif_running(dev))) {
912 			netdev_info(dev, "late interrupt\n");
913 			goto irq_ack;
914 		}
915 
916 		if (napi_schedule_prep(&bp->napi)) {
917 			/* NOTE: These writes are posted by the readback of
918 			 *       the ISTAT register below.
919 			 */
920 			bp->istat = istat;
921 			__b44_disable_ints(bp);
922 			__napi_schedule(&bp->napi);
923 		}
924 
925 irq_ack:
926 		bw32(bp, B44_ISTAT, istat);
927 		br32(bp, B44_ISTAT);
928 	}
929 	spin_unlock(&bp->lock);
930 	return IRQ_RETVAL(handled);
931 }
932 
933 static void b44_tx_timeout(struct net_device *dev)
934 {
935 	struct b44 *bp = netdev_priv(dev);
936 
937 	netdev_err(dev, "transmit timed out, resetting\n");
938 
939 	spin_lock_irq(&bp->lock);
940 
941 	b44_halt(bp);
942 	b44_init_rings(bp);
943 	b44_init_hw(bp, B44_FULL_RESET);
944 
945 	spin_unlock_irq(&bp->lock);
946 
947 	b44_enable_ints(bp);
948 
949 	netif_wake_queue(dev);
950 }
951 
952 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
953 {
954 	struct b44 *bp = netdev_priv(dev);
955 	int rc = NETDEV_TX_OK;
956 	dma_addr_t mapping;
957 	u32 len, entry, ctrl;
958 	unsigned long flags;
959 
960 	len = skb->len;
961 	spin_lock_irqsave(&bp->lock, flags);
962 
963 	/* This is a hard error, log it. */
964 	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
965 		netif_stop_queue(dev);
966 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
967 		goto err_out;
968 	}
969 
970 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
971 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
972 		struct sk_buff *bounce_skb;
973 
974 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
975 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
976 			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
977 					     DMA_TO_DEVICE);
978 
979 		bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
980 		if (!bounce_skb)
981 			goto err_out;
982 
983 		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
984 					 len, DMA_TO_DEVICE);
985 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
986 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
987 				dma_unmap_single(bp->sdev->dma_dev, mapping,
988 						     len, DMA_TO_DEVICE);
989 			dev_kfree_skb_any(bounce_skb);
990 			goto err_out;
991 		}
992 
993 		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
994 		dev_kfree_skb_any(skb);
995 		skb = bounce_skb;
996 	}
997 
998 	entry = bp->tx_prod;
999 	bp->tx_buffers[entry].skb = skb;
1000 	bp->tx_buffers[entry].mapping = mapping;
1001 
1002 	ctrl  = (len & DESC_CTRL_LEN);
1003 	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1004 	if (entry == (B44_TX_RING_SIZE - 1))
1005 		ctrl |= DESC_CTRL_EOT;
1006 
1007 	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1008 	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1009 
1010 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1011 		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1012 			                    entry * sizeof(bp->tx_ring[0]),
1013 			                    DMA_TO_DEVICE);
1014 
1015 	entry = NEXT_TX(entry);
1016 
1017 	bp->tx_prod = entry;
1018 
1019 	wmb();
1020 
1021 	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1022 	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1023 		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1024 	if (bp->flags & B44_FLAG_REORDER_BUG)
1025 		br32(bp, B44_DMATX_PTR);
1026 
1027 	netdev_sent_queue(dev, skb->len);
1028 
1029 	if (TX_BUFFS_AVAIL(bp) < 1)
1030 		netif_stop_queue(dev);
1031 
1032 out_unlock:
1033 	spin_unlock_irqrestore(&bp->lock, flags);
1034 
1035 	return rc;
1036 
1037 err_out:
1038 	rc = NETDEV_TX_BUSY;
1039 	goto out_unlock;
1040 }
1041 
1042 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1043 {
1044 	struct b44 *bp = netdev_priv(dev);
1045 
1046 	if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1047 		return -EINVAL;
1048 
1049 	if (!netif_running(dev)) {
1050 		/* We'll just catch it later when the
1051 		 * device is up'd.
1052 		 */
1053 		dev->mtu = new_mtu;
1054 		return 0;
1055 	}
1056 
1057 	spin_lock_irq(&bp->lock);
1058 	b44_halt(bp);
1059 	dev->mtu = new_mtu;
1060 	b44_init_rings(bp);
1061 	b44_init_hw(bp, B44_FULL_RESET);
1062 	spin_unlock_irq(&bp->lock);
1063 
1064 	b44_enable_ints(bp);
1065 
1066 	return 0;
1067 }
1068 
1069 /* Free up pending packets in all rx/tx rings.
1070  *
1071  * The chip has been shut down and the driver detached from
1072  * the networking, so no interrupts or new tx packets will
1073  * end up in the driver.  bp->lock is not held and we are not
1074  * in an interrupt context and thus may sleep.
1075  */
1076 static void b44_free_rings(struct b44 *bp)
1077 {
1078 	struct ring_info *rp;
1079 	int i;
1080 
1081 	for (i = 0; i < B44_RX_RING_SIZE; i++) {
1082 		rp = &bp->rx_buffers[i];
1083 
1084 		if (rp->skb == NULL)
1085 			continue;
1086 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1087 				 DMA_FROM_DEVICE);
1088 		dev_kfree_skb_any(rp->skb);
1089 		rp->skb = NULL;
1090 	}
1091 
1092 	/* XXX needs changes once NETIF_F_SG is set... */
1093 	for (i = 0; i < B44_TX_RING_SIZE; i++) {
1094 		rp = &bp->tx_buffers[i];
1095 
1096 		if (rp->skb == NULL)
1097 			continue;
1098 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1099 				 DMA_TO_DEVICE);
1100 		dev_kfree_skb_any(rp->skb);
1101 		rp->skb = NULL;
1102 	}
1103 }
1104 
1105 /* Initialize tx/rx rings for packet processing.
1106  *
1107  * The chip has been shut down and the driver detached from
1108  * the networking, so no interrupts or new tx packets will
1109  * end up in the driver.
1110  */
1111 static void b44_init_rings(struct b44 *bp)
1112 {
1113 	int i;
1114 
1115 	b44_free_rings(bp);
1116 
1117 	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1118 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1119 
1120 	if (bp->flags & B44_FLAG_RX_RING_HACK)
1121 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1122 					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1123 
1124 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1125 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1126 					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
1127 
1128 	for (i = 0; i < bp->rx_pending; i++) {
1129 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
1130 			break;
1131 	}
1132 }
1133 
1134 /*
1135  * Must not be invoked with interrupt sources disabled and
1136  * the hardware shutdown down.
1137  */
1138 static void b44_free_consistent(struct b44 *bp)
1139 {
1140 	kfree(bp->rx_buffers);
1141 	bp->rx_buffers = NULL;
1142 	kfree(bp->tx_buffers);
1143 	bp->tx_buffers = NULL;
1144 	if (bp->rx_ring) {
1145 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
1146 			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1147 					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1148 			kfree(bp->rx_ring);
1149 		} else
1150 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1151 					  bp->rx_ring, bp->rx_ring_dma);
1152 		bp->rx_ring = NULL;
1153 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
1154 	}
1155 	if (bp->tx_ring) {
1156 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
1157 			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1158 					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1159 			kfree(bp->tx_ring);
1160 		} else
1161 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1162 					  bp->tx_ring, bp->tx_ring_dma);
1163 		bp->tx_ring = NULL;
1164 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
1165 	}
1166 }
1167 
1168 /*
1169  * Must not be invoked with interrupt sources disabled and
1170  * the hardware shutdown down.  Can sleep.
1171  */
1172 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1173 {
1174 	int size;
1175 
1176 	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1177 	bp->rx_buffers = kzalloc(size, gfp);
1178 	if (!bp->rx_buffers)
1179 		goto out_err;
1180 
1181 	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1182 	bp->tx_buffers = kzalloc(size, gfp);
1183 	if (!bp->tx_buffers)
1184 		goto out_err;
1185 
1186 	size = DMA_TABLE_BYTES;
1187 	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1188 					 &bp->rx_ring_dma, gfp);
1189 	if (!bp->rx_ring) {
1190 		/* Allocation may have failed due to pci_alloc_consistent
1191 		   insisting on use of GFP_DMA, which is more restrictive
1192 		   than necessary...  */
1193 		struct dma_desc *rx_ring;
1194 		dma_addr_t rx_ring_dma;
1195 
1196 		rx_ring = kzalloc(size, gfp);
1197 		if (!rx_ring)
1198 			goto out_err;
1199 
1200 		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1201 					     DMA_TABLE_BYTES,
1202 					     DMA_BIDIRECTIONAL);
1203 
1204 		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1205 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
1206 			kfree(rx_ring);
1207 			goto out_err;
1208 		}
1209 
1210 		bp->rx_ring = rx_ring;
1211 		bp->rx_ring_dma = rx_ring_dma;
1212 		bp->flags |= B44_FLAG_RX_RING_HACK;
1213 	}
1214 
1215 	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1216 					 &bp->tx_ring_dma, gfp);
1217 	if (!bp->tx_ring) {
1218 		/* Allocation may have failed due to ssb_dma_alloc_consistent
1219 		   insisting on use of GFP_DMA, which is more restrictive
1220 		   than necessary...  */
1221 		struct dma_desc *tx_ring;
1222 		dma_addr_t tx_ring_dma;
1223 
1224 		tx_ring = kzalloc(size, gfp);
1225 		if (!tx_ring)
1226 			goto out_err;
1227 
1228 		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1229 					     DMA_TABLE_BYTES,
1230 					     DMA_TO_DEVICE);
1231 
1232 		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1233 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
1234 			kfree(tx_ring);
1235 			goto out_err;
1236 		}
1237 
1238 		bp->tx_ring = tx_ring;
1239 		bp->tx_ring_dma = tx_ring_dma;
1240 		bp->flags |= B44_FLAG_TX_RING_HACK;
1241 	}
1242 
1243 	return 0;
1244 
1245 out_err:
1246 	b44_free_consistent(bp);
1247 	return -ENOMEM;
1248 }
1249 
1250 /* bp->lock is held. */
1251 static void b44_clear_stats(struct b44 *bp)
1252 {
1253 	unsigned long reg;
1254 
1255 	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1256 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1257 		br32(bp, reg);
1258 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1259 		br32(bp, reg);
1260 }
1261 
1262 /* bp->lock is held. */
1263 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1264 {
1265 	struct ssb_device *sdev = bp->sdev;
1266 	bool was_enabled;
1267 
1268 	was_enabled = ssb_device_is_enabled(bp->sdev);
1269 
1270 	ssb_device_enable(bp->sdev, 0);
1271 	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1272 
1273 	if (was_enabled) {
1274 		bw32(bp, B44_RCV_LAZY, 0);
1275 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1276 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1277 		bw32(bp, B44_DMATX_CTRL, 0);
1278 		bp->tx_prod = bp->tx_cons = 0;
1279 		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1280 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1281 				     100, 0);
1282 		}
1283 		bw32(bp, B44_DMARX_CTRL, 0);
1284 		bp->rx_prod = bp->rx_cons = 0;
1285 	}
1286 
1287 	b44_clear_stats(bp);
1288 
1289 	/*
1290 	 * Don't enable PHY if we are doing a partial reset
1291 	 * we are probably going to power down
1292 	 */
1293 	if (reset_kind == B44_CHIP_RESET_PARTIAL)
1294 		return;
1295 
1296 	switch (sdev->bus->bustype) {
1297 	case SSB_BUSTYPE_SSB:
1298 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1299 		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1300 					B44_MDC_RATIO)
1301 		     & MDIO_CTRL_MAXF_MASK)));
1302 		break;
1303 	case SSB_BUSTYPE_PCI:
1304 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1305 		     (0x0d & MDIO_CTRL_MAXF_MASK)));
1306 		break;
1307 	case SSB_BUSTYPE_PCMCIA:
1308 	case SSB_BUSTYPE_SDIO:
1309 		WARN_ON(1); /* A device with this bus does not exist. */
1310 		break;
1311 	}
1312 
1313 	br32(bp, B44_MDIO_CTRL);
1314 
1315 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1316 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1317 		br32(bp, B44_ENET_CTRL);
1318 		bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1319 	} else {
1320 		u32 val = br32(bp, B44_DEVCTRL);
1321 
1322 		if (val & DEVCTRL_EPR) {
1323 			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1324 			br32(bp, B44_DEVCTRL);
1325 			udelay(100);
1326 		}
1327 		bp->flags |= B44_FLAG_INTERNAL_PHY;
1328 	}
1329 }
1330 
1331 /* bp->lock is held. */
1332 static void b44_halt(struct b44 *bp)
1333 {
1334 	b44_disable_ints(bp);
1335 	/* reset PHY */
1336 	b44_phy_reset(bp);
1337 	/* power down PHY */
1338 	netdev_info(bp->dev, "powering down PHY\n");
1339 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1340 	/* now reset the chip, but without enabling the MAC&PHY
1341 	 * part of it. This has to be done _after_ we shut down the PHY */
1342 	b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1343 }
1344 
1345 /* bp->lock is held. */
1346 static void __b44_set_mac_addr(struct b44 *bp)
1347 {
1348 	bw32(bp, B44_CAM_CTRL, 0);
1349 	if (!(bp->dev->flags & IFF_PROMISC)) {
1350 		u32 val;
1351 
1352 		__b44_cam_write(bp, bp->dev->dev_addr, 0);
1353 		val = br32(bp, B44_CAM_CTRL);
1354 		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1355 	}
1356 }
1357 
1358 static int b44_set_mac_addr(struct net_device *dev, void *p)
1359 {
1360 	struct b44 *bp = netdev_priv(dev);
1361 	struct sockaddr *addr = p;
1362 	u32 val;
1363 
1364 	if (netif_running(dev))
1365 		return -EBUSY;
1366 
1367 	if (!is_valid_ether_addr(addr->sa_data))
1368 		return -EINVAL;
1369 
1370 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1371 
1372 	spin_lock_irq(&bp->lock);
1373 
1374 	val = br32(bp, B44_RXCONFIG);
1375 	if (!(val & RXCONFIG_CAM_ABSENT))
1376 		__b44_set_mac_addr(bp);
1377 
1378 	spin_unlock_irq(&bp->lock);
1379 
1380 	return 0;
1381 }
1382 
1383 /* Called at device open time to get the chip ready for
1384  * packet processing.  Invoked with bp->lock held.
1385  */
1386 static void __b44_set_rx_mode(struct net_device *);
1387 static void b44_init_hw(struct b44 *bp, int reset_kind)
1388 {
1389 	u32 val;
1390 
1391 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1392 	if (reset_kind == B44_FULL_RESET) {
1393 		b44_phy_reset(bp);
1394 		b44_setup_phy(bp);
1395 	}
1396 
1397 	/* Enable CRC32, set proper LED modes and power on PHY */
1398 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1399 	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1400 
1401 	/* This sets the MAC address too.  */
1402 	__b44_set_rx_mode(bp->dev);
1403 
1404 	/* MTU + eth header + possible VLAN tag + struct rx_header */
1405 	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1406 	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1407 
1408 	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1409 	if (reset_kind == B44_PARTIAL_RESET) {
1410 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1411 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1412 	} else {
1413 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1414 		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1415 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1416 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1417 		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1418 
1419 		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1420 		bp->rx_prod = bp->rx_pending;
1421 
1422 		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1423 	}
1424 
1425 	val = br32(bp, B44_ENET_CTRL);
1426 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1427 
1428 	netdev_reset_queue(bp->dev);
1429 }
1430 
1431 static int b44_open(struct net_device *dev)
1432 {
1433 	struct b44 *bp = netdev_priv(dev);
1434 	int err;
1435 
1436 	err = b44_alloc_consistent(bp, GFP_KERNEL);
1437 	if (err)
1438 		goto out;
1439 
1440 	napi_enable(&bp->napi);
1441 
1442 	b44_init_rings(bp);
1443 	b44_init_hw(bp, B44_FULL_RESET);
1444 
1445 	b44_check_phy(bp);
1446 
1447 	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1448 	if (unlikely(err < 0)) {
1449 		napi_disable(&bp->napi);
1450 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1451 		b44_free_rings(bp);
1452 		b44_free_consistent(bp);
1453 		goto out;
1454 	}
1455 
1456 	init_timer(&bp->timer);
1457 	bp->timer.expires = jiffies + HZ;
1458 	bp->timer.data = (unsigned long) bp;
1459 	bp->timer.function = b44_timer;
1460 	add_timer(&bp->timer);
1461 
1462 	b44_enable_ints(bp);
1463 	netif_start_queue(dev);
1464 out:
1465 	return err;
1466 }
1467 
1468 #ifdef CONFIG_NET_POLL_CONTROLLER
1469 /*
1470  * Polling receive - used by netconsole and other diagnostic tools
1471  * to allow network i/o with interrupts disabled.
1472  */
1473 static void b44_poll_controller(struct net_device *dev)
1474 {
1475 	disable_irq(dev->irq);
1476 	b44_interrupt(dev->irq, dev);
1477 	enable_irq(dev->irq);
1478 }
1479 #endif
1480 
1481 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1482 {
1483 	u32 i;
1484 	u32 *pattern = (u32 *) pp;
1485 
1486 	for (i = 0; i < bytes; i += sizeof(u32)) {
1487 		bw32(bp, B44_FILT_ADDR, table_offset + i);
1488 		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1489 	}
1490 }
1491 
1492 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1493 {
1494 	int magicsync = 6;
1495 	int k, j, len = offset;
1496 	int ethaddr_bytes = ETH_ALEN;
1497 
1498 	memset(ppattern + offset, 0xff, magicsync);
1499 	for (j = 0; j < magicsync; j++)
1500 		set_bit(len++, (unsigned long *) pmask);
1501 
1502 	for (j = 0; j < B44_MAX_PATTERNS; j++) {
1503 		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1504 			ethaddr_bytes = ETH_ALEN;
1505 		else
1506 			ethaddr_bytes = B44_PATTERN_SIZE - len;
1507 		if (ethaddr_bytes <=0)
1508 			break;
1509 		for (k = 0; k< ethaddr_bytes; k++) {
1510 			ppattern[offset + magicsync +
1511 				(j * ETH_ALEN) + k] = macaddr[k];
1512 			set_bit(len++, (unsigned long *) pmask);
1513 		}
1514 	}
1515 	return len - 1;
1516 }
1517 
1518 /* Setup magic packet patterns in the b44 WOL
1519  * pattern matching filter.
1520  */
1521 static void b44_setup_pseudo_magicp(struct b44 *bp)
1522 {
1523 
1524 	u32 val;
1525 	int plen0, plen1, plen2;
1526 	u8 *pwol_pattern;
1527 	u8 pwol_mask[B44_PMASK_SIZE];
1528 
1529 	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1530 	if (!pwol_pattern)
1531 		return;
1532 
1533 	/* Ipv4 magic packet pattern - pattern 0.*/
1534 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1535 	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1536 				  B44_ETHIPV4UDP_HLEN);
1537 
1538    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1539    	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1540 
1541 	/* Raw ethernet II magic packet pattern - pattern 1 */
1542 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1544 	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1545 				  ETH_HLEN);
1546 
1547    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548 		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1549   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550 		       B44_PMASK_BASE + B44_PMASK_SIZE);
1551 
1552 	/* Ipv6 magic packet pattern - pattern 2 */
1553 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1554 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1555 	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1556 				  B44_ETHIPV6UDP_HLEN);
1557 
1558    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1559 		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1560   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1561 		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1562 
1563 	kfree(pwol_pattern);
1564 
1565 	/* set these pattern's lengths: one less than each real length */
1566 	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1567 	bw32(bp, B44_WKUP_LEN, val);
1568 
1569 	/* enable wakeup pattern matching */
1570 	val = br32(bp, B44_DEVCTRL);
1571 	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1572 
1573 }
1574 
1575 #ifdef CONFIG_B44_PCI
1576 static void b44_setup_wol_pci(struct b44 *bp)
1577 {
1578 	u16 val;
1579 
1580 	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1581 		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1582 		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1583 		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1584 	}
1585 }
1586 #else
1587 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1588 #endif /* CONFIG_B44_PCI */
1589 
1590 static void b44_setup_wol(struct b44 *bp)
1591 {
1592 	u32 val;
1593 
1594 	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1595 
1596 	if (bp->flags & B44_FLAG_B0_ANDLATER) {
1597 
1598 		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1599 
1600 		val = bp->dev->dev_addr[2] << 24 |
1601 			bp->dev->dev_addr[3] << 16 |
1602 			bp->dev->dev_addr[4] << 8 |
1603 			bp->dev->dev_addr[5];
1604 		bw32(bp, B44_ADDR_LO, val);
1605 
1606 		val = bp->dev->dev_addr[0] << 8 |
1607 			bp->dev->dev_addr[1];
1608 		bw32(bp, B44_ADDR_HI, val);
1609 
1610 		val = br32(bp, B44_DEVCTRL);
1611 		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1612 
1613  	} else {
1614  		b44_setup_pseudo_magicp(bp);
1615  	}
1616 	b44_setup_wol_pci(bp);
1617 }
1618 
1619 static int b44_close(struct net_device *dev)
1620 {
1621 	struct b44 *bp = netdev_priv(dev);
1622 
1623 	netif_stop_queue(dev);
1624 
1625 	napi_disable(&bp->napi);
1626 
1627 	del_timer_sync(&bp->timer);
1628 
1629 	spin_lock_irq(&bp->lock);
1630 
1631 	b44_halt(bp);
1632 	b44_free_rings(bp);
1633 	netif_carrier_off(dev);
1634 
1635 	spin_unlock_irq(&bp->lock);
1636 
1637 	free_irq(dev->irq, dev);
1638 
1639 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
1640 		b44_init_hw(bp, B44_PARTIAL_RESET);
1641 		b44_setup_wol(bp);
1642 	}
1643 
1644 	b44_free_consistent(bp);
1645 
1646 	return 0;
1647 }
1648 
1649 static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1650 					struct rtnl_link_stats64 *nstat)
1651 {
1652 	struct b44 *bp = netdev_priv(dev);
1653 	struct b44_hw_stats *hwstat = &bp->hw_stats;
1654 	unsigned int start;
1655 
1656 	do {
1657 		start = u64_stats_fetch_begin_bh(&hwstat->syncp);
1658 
1659 		/* Convert HW stats into rtnl_link_stats64 stats. */
1660 		nstat->rx_packets = hwstat->rx_pkts;
1661 		nstat->tx_packets = hwstat->tx_pkts;
1662 		nstat->rx_bytes   = hwstat->rx_octets;
1663 		nstat->tx_bytes   = hwstat->tx_octets;
1664 		nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1665 				     hwstat->tx_oversize_pkts +
1666 				     hwstat->tx_underruns +
1667 				     hwstat->tx_excessive_cols +
1668 				     hwstat->tx_late_cols);
1669 		nstat->multicast  = hwstat->tx_multicast_pkts;
1670 		nstat->collisions = hwstat->tx_total_cols;
1671 
1672 		nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1673 					   hwstat->rx_undersize);
1674 		nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1675 		nstat->rx_frame_errors  = hwstat->rx_align_errs;
1676 		nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1677 		nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1678 					   hwstat->rx_oversize_pkts +
1679 					   hwstat->rx_missed_pkts +
1680 					   hwstat->rx_crc_align_errs +
1681 					   hwstat->rx_undersize +
1682 					   hwstat->rx_crc_errs +
1683 					   hwstat->rx_align_errs +
1684 					   hwstat->rx_symbol_errs);
1685 
1686 		nstat->tx_aborted_errors = hwstat->tx_underruns;
1687 #if 0
1688 		/* Carrier lost counter seems to be broken for some devices */
1689 		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1690 #endif
1691 	} while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
1692 
1693 	return nstat;
1694 }
1695 
1696 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1697 {
1698 	struct netdev_hw_addr *ha;
1699 	int i, num_ents;
1700 
1701 	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1702 	i = 0;
1703 	netdev_for_each_mc_addr(ha, dev) {
1704 		if (i == num_ents)
1705 			break;
1706 		__b44_cam_write(bp, ha->addr, i++ + 1);
1707 	}
1708 	return i+1;
1709 }
1710 
1711 static void __b44_set_rx_mode(struct net_device *dev)
1712 {
1713 	struct b44 *bp = netdev_priv(dev);
1714 	u32 val;
1715 
1716 	val = br32(bp, B44_RXCONFIG);
1717 	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1718 	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1719 		val |= RXCONFIG_PROMISC;
1720 		bw32(bp, B44_RXCONFIG, val);
1721 	} else {
1722 		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1723 		int i = 1;
1724 
1725 		__b44_set_mac_addr(bp);
1726 
1727 		if ((dev->flags & IFF_ALLMULTI) ||
1728 		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1729 			val |= RXCONFIG_ALLMULTI;
1730 		else
1731 			i = __b44_load_mcast(bp, dev);
1732 
1733 		for (; i < 64; i++)
1734 			__b44_cam_write(bp, zero, i);
1735 
1736 		bw32(bp, B44_RXCONFIG, val);
1737         	val = br32(bp, B44_CAM_CTRL);
1738 	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1739 	}
1740 }
1741 
1742 static void b44_set_rx_mode(struct net_device *dev)
1743 {
1744 	struct b44 *bp = netdev_priv(dev);
1745 
1746 	spin_lock_irq(&bp->lock);
1747 	__b44_set_rx_mode(dev);
1748 	spin_unlock_irq(&bp->lock);
1749 }
1750 
1751 static u32 b44_get_msglevel(struct net_device *dev)
1752 {
1753 	struct b44 *bp = netdev_priv(dev);
1754 	return bp->msg_enable;
1755 }
1756 
1757 static void b44_set_msglevel(struct net_device *dev, u32 value)
1758 {
1759 	struct b44 *bp = netdev_priv(dev);
1760 	bp->msg_enable = value;
1761 }
1762 
1763 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1764 {
1765 	struct b44 *bp = netdev_priv(dev);
1766 	struct ssb_bus *bus = bp->sdev->bus;
1767 
1768 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1769 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1770 	switch (bus->bustype) {
1771 	case SSB_BUSTYPE_PCI:
1772 		strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1773 		break;
1774 	case SSB_BUSTYPE_SSB:
1775 		strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1776 		break;
1777 	case SSB_BUSTYPE_PCMCIA:
1778 	case SSB_BUSTYPE_SDIO:
1779 		WARN_ON(1); /* A device with this bus does not exist. */
1780 		break;
1781 	}
1782 }
1783 
1784 static int b44_nway_reset(struct net_device *dev)
1785 {
1786 	struct b44 *bp = netdev_priv(dev);
1787 	u32 bmcr;
1788 	int r;
1789 
1790 	spin_lock_irq(&bp->lock);
1791 	b44_readphy(bp, MII_BMCR, &bmcr);
1792 	b44_readphy(bp, MII_BMCR, &bmcr);
1793 	r = -EINVAL;
1794 	if (bmcr & BMCR_ANENABLE) {
1795 		b44_writephy(bp, MII_BMCR,
1796 			     bmcr | BMCR_ANRESTART);
1797 		r = 0;
1798 	}
1799 	spin_unlock_irq(&bp->lock);
1800 
1801 	return r;
1802 }
1803 
1804 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1805 {
1806 	struct b44 *bp = netdev_priv(dev);
1807 
1808 	cmd->supported = (SUPPORTED_Autoneg);
1809 	cmd->supported |= (SUPPORTED_100baseT_Half |
1810 			  SUPPORTED_100baseT_Full |
1811 			  SUPPORTED_10baseT_Half |
1812 			  SUPPORTED_10baseT_Full |
1813 			  SUPPORTED_MII);
1814 
1815 	cmd->advertising = 0;
1816 	if (bp->flags & B44_FLAG_ADV_10HALF)
1817 		cmd->advertising |= ADVERTISED_10baseT_Half;
1818 	if (bp->flags & B44_FLAG_ADV_10FULL)
1819 		cmd->advertising |= ADVERTISED_10baseT_Full;
1820 	if (bp->flags & B44_FLAG_ADV_100HALF)
1821 		cmd->advertising |= ADVERTISED_100baseT_Half;
1822 	if (bp->flags & B44_FLAG_ADV_100FULL)
1823 		cmd->advertising |= ADVERTISED_100baseT_Full;
1824 	cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1825 	ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1826 				    SPEED_100 : SPEED_10));
1827 	cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1828 		DUPLEX_FULL : DUPLEX_HALF;
1829 	cmd->port = 0;
1830 	cmd->phy_address = bp->phy_addr;
1831 	cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1832 		XCVR_INTERNAL : XCVR_EXTERNAL;
1833 	cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1834 		AUTONEG_DISABLE : AUTONEG_ENABLE;
1835 	if (cmd->autoneg == AUTONEG_ENABLE)
1836 		cmd->advertising |= ADVERTISED_Autoneg;
1837 	if (!netif_running(dev)){
1838 		ethtool_cmd_speed_set(cmd, 0);
1839 		cmd->duplex = 0xff;
1840 	}
1841 	cmd->maxtxpkt = 0;
1842 	cmd->maxrxpkt = 0;
1843 	return 0;
1844 }
1845 
1846 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1847 {
1848 	struct b44 *bp = netdev_priv(dev);
1849 	u32 speed = ethtool_cmd_speed(cmd);
1850 
1851 	/* We do not support gigabit. */
1852 	if (cmd->autoneg == AUTONEG_ENABLE) {
1853 		if (cmd->advertising &
1854 		    (ADVERTISED_1000baseT_Half |
1855 		     ADVERTISED_1000baseT_Full))
1856 			return -EINVAL;
1857 	} else if ((speed != SPEED_100 &&
1858 		    speed != SPEED_10) ||
1859 		   (cmd->duplex != DUPLEX_HALF &&
1860 		    cmd->duplex != DUPLEX_FULL)) {
1861 			return -EINVAL;
1862 	}
1863 
1864 	spin_lock_irq(&bp->lock);
1865 
1866 	if (cmd->autoneg == AUTONEG_ENABLE) {
1867 		bp->flags &= ~(B44_FLAG_FORCE_LINK |
1868 			       B44_FLAG_100_BASE_T |
1869 			       B44_FLAG_FULL_DUPLEX |
1870 			       B44_FLAG_ADV_10HALF |
1871 			       B44_FLAG_ADV_10FULL |
1872 			       B44_FLAG_ADV_100HALF |
1873 			       B44_FLAG_ADV_100FULL);
1874 		if (cmd->advertising == 0) {
1875 			bp->flags |= (B44_FLAG_ADV_10HALF |
1876 				      B44_FLAG_ADV_10FULL |
1877 				      B44_FLAG_ADV_100HALF |
1878 				      B44_FLAG_ADV_100FULL);
1879 		} else {
1880 			if (cmd->advertising & ADVERTISED_10baseT_Half)
1881 				bp->flags |= B44_FLAG_ADV_10HALF;
1882 			if (cmd->advertising & ADVERTISED_10baseT_Full)
1883 				bp->flags |= B44_FLAG_ADV_10FULL;
1884 			if (cmd->advertising & ADVERTISED_100baseT_Half)
1885 				bp->flags |= B44_FLAG_ADV_100HALF;
1886 			if (cmd->advertising & ADVERTISED_100baseT_Full)
1887 				bp->flags |= B44_FLAG_ADV_100FULL;
1888 		}
1889 	} else {
1890 		bp->flags |= B44_FLAG_FORCE_LINK;
1891 		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1892 		if (speed == SPEED_100)
1893 			bp->flags |= B44_FLAG_100_BASE_T;
1894 		if (cmd->duplex == DUPLEX_FULL)
1895 			bp->flags |= B44_FLAG_FULL_DUPLEX;
1896 	}
1897 
1898 	if (netif_running(dev))
1899 		b44_setup_phy(bp);
1900 
1901 	spin_unlock_irq(&bp->lock);
1902 
1903 	return 0;
1904 }
1905 
1906 static void b44_get_ringparam(struct net_device *dev,
1907 			      struct ethtool_ringparam *ering)
1908 {
1909 	struct b44 *bp = netdev_priv(dev);
1910 
1911 	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1912 	ering->rx_pending = bp->rx_pending;
1913 
1914 	/* XXX ethtool lacks a tx_max_pending, oops... */
1915 }
1916 
1917 static int b44_set_ringparam(struct net_device *dev,
1918 			     struct ethtool_ringparam *ering)
1919 {
1920 	struct b44 *bp = netdev_priv(dev);
1921 
1922 	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1923 	    (ering->rx_mini_pending != 0) ||
1924 	    (ering->rx_jumbo_pending != 0) ||
1925 	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
1926 		return -EINVAL;
1927 
1928 	spin_lock_irq(&bp->lock);
1929 
1930 	bp->rx_pending = ering->rx_pending;
1931 	bp->tx_pending = ering->tx_pending;
1932 
1933 	b44_halt(bp);
1934 	b44_init_rings(bp);
1935 	b44_init_hw(bp, B44_FULL_RESET);
1936 	netif_wake_queue(bp->dev);
1937 	spin_unlock_irq(&bp->lock);
1938 
1939 	b44_enable_ints(bp);
1940 
1941 	return 0;
1942 }
1943 
1944 static void b44_get_pauseparam(struct net_device *dev,
1945 				struct ethtool_pauseparam *epause)
1946 {
1947 	struct b44 *bp = netdev_priv(dev);
1948 
1949 	epause->autoneg =
1950 		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1951 	epause->rx_pause =
1952 		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
1953 	epause->tx_pause =
1954 		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
1955 }
1956 
1957 static int b44_set_pauseparam(struct net_device *dev,
1958 				struct ethtool_pauseparam *epause)
1959 {
1960 	struct b44 *bp = netdev_priv(dev);
1961 
1962 	spin_lock_irq(&bp->lock);
1963 	if (epause->autoneg)
1964 		bp->flags |= B44_FLAG_PAUSE_AUTO;
1965 	else
1966 		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1967 	if (epause->rx_pause)
1968 		bp->flags |= B44_FLAG_RX_PAUSE;
1969 	else
1970 		bp->flags &= ~B44_FLAG_RX_PAUSE;
1971 	if (epause->tx_pause)
1972 		bp->flags |= B44_FLAG_TX_PAUSE;
1973 	else
1974 		bp->flags &= ~B44_FLAG_TX_PAUSE;
1975 	if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1976 		b44_halt(bp);
1977 		b44_init_rings(bp);
1978 		b44_init_hw(bp, B44_FULL_RESET);
1979 	} else {
1980 		__b44_set_flow_ctrl(bp, bp->flags);
1981 	}
1982 	spin_unlock_irq(&bp->lock);
1983 
1984 	b44_enable_ints(bp);
1985 
1986 	return 0;
1987 }
1988 
1989 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1990 {
1991 	switch(stringset) {
1992 	case ETH_SS_STATS:
1993 		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1994 		break;
1995 	}
1996 }
1997 
1998 static int b44_get_sset_count(struct net_device *dev, int sset)
1999 {
2000 	switch (sset) {
2001 	case ETH_SS_STATS:
2002 		return ARRAY_SIZE(b44_gstrings);
2003 	default:
2004 		return -EOPNOTSUPP;
2005 	}
2006 }
2007 
2008 static void b44_get_ethtool_stats(struct net_device *dev,
2009 				  struct ethtool_stats *stats, u64 *data)
2010 {
2011 	struct b44 *bp = netdev_priv(dev);
2012 	struct b44_hw_stats *hwstat = &bp->hw_stats;
2013 	u64 *data_src, *data_dst;
2014 	unsigned int start;
2015 	u32 i;
2016 
2017 	spin_lock_irq(&bp->lock);
2018 	b44_stats_update(bp);
2019 	spin_unlock_irq(&bp->lock);
2020 
2021 	do {
2022 		data_src = &hwstat->tx_good_octets;
2023 		data_dst = data;
2024 		start = u64_stats_fetch_begin_bh(&hwstat->syncp);
2025 
2026 		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2027 			*data_dst++ = *data_src++;
2028 
2029 	} while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
2030 }
2031 
2032 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2033 {
2034 	struct b44 *bp = netdev_priv(dev);
2035 
2036 	wol->supported = WAKE_MAGIC;
2037 	if (bp->flags & B44_FLAG_WOL_ENABLE)
2038 		wol->wolopts = WAKE_MAGIC;
2039 	else
2040 		wol->wolopts = 0;
2041 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2042 }
2043 
2044 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2045 {
2046 	struct b44 *bp = netdev_priv(dev);
2047 
2048 	spin_lock_irq(&bp->lock);
2049 	if (wol->wolopts & WAKE_MAGIC)
2050 		bp->flags |= B44_FLAG_WOL_ENABLE;
2051 	else
2052 		bp->flags &= ~B44_FLAG_WOL_ENABLE;
2053 	spin_unlock_irq(&bp->lock);
2054 
2055 	return 0;
2056 }
2057 
2058 static const struct ethtool_ops b44_ethtool_ops = {
2059 	.get_drvinfo		= b44_get_drvinfo,
2060 	.get_settings		= b44_get_settings,
2061 	.set_settings		= b44_set_settings,
2062 	.nway_reset		= b44_nway_reset,
2063 	.get_link		= ethtool_op_get_link,
2064 	.get_wol		= b44_get_wol,
2065 	.set_wol		= b44_set_wol,
2066 	.get_ringparam		= b44_get_ringparam,
2067 	.set_ringparam		= b44_set_ringparam,
2068 	.get_pauseparam		= b44_get_pauseparam,
2069 	.set_pauseparam		= b44_set_pauseparam,
2070 	.get_msglevel		= b44_get_msglevel,
2071 	.set_msglevel		= b44_set_msglevel,
2072 	.get_strings		= b44_get_strings,
2073 	.get_sset_count		= b44_get_sset_count,
2074 	.get_ethtool_stats	= b44_get_ethtool_stats,
2075 };
2076 
2077 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2078 {
2079 	struct mii_ioctl_data *data = if_mii(ifr);
2080 	struct b44 *bp = netdev_priv(dev);
2081 	int err = -EINVAL;
2082 
2083 	if (!netif_running(dev))
2084 		goto out;
2085 
2086 	spin_lock_irq(&bp->lock);
2087 	err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2088 	spin_unlock_irq(&bp->lock);
2089 out:
2090 	return err;
2091 }
2092 
2093 static int b44_get_invariants(struct b44 *bp)
2094 {
2095 	struct ssb_device *sdev = bp->sdev;
2096 	int err = 0;
2097 	u8 *addr;
2098 
2099 	bp->dma_offset = ssb_dma_translation(sdev);
2100 
2101 	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2102 	    instance > 1) {
2103 		addr = sdev->bus->sprom.et1mac;
2104 		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2105 	} else {
2106 		addr = sdev->bus->sprom.et0mac;
2107 		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2108 	}
2109 	/* Some ROMs have buggy PHY addresses with the high
2110 	 * bits set (sign extension?). Truncate them to a
2111 	 * valid PHY address. */
2112 	bp->phy_addr &= 0x1F;
2113 
2114 	memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2115 
2116 	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2117 		pr_err("Invalid MAC address found in EEPROM\n");
2118 		return -EINVAL;
2119 	}
2120 
2121 	bp->imask = IMASK_DEF;
2122 
2123 	/* XXX - really required?
2124 	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
2125 	*/
2126 
2127 	if (bp->sdev->id.revision >= 7)
2128 		bp->flags |= B44_FLAG_B0_ANDLATER;
2129 
2130 	return err;
2131 }
2132 
2133 static const struct net_device_ops b44_netdev_ops = {
2134 	.ndo_open		= b44_open,
2135 	.ndo_stop		= b44_close,
2136 	.ndo_start_xmit		= b44_start_xmit,
2137 	.ndo_get_stats64	= b44_get_stats64,
2138 	.ndo_set_rx_mode	= b44_set_rx_mode,
2139 	.ndo_set_mac_address	= b44_set_mac_addr,
2140 	.ndo_validate_addr	= eth_validate_addr,
2141 	.ndo_do_ioctl		= b44_ioctl,
2142 	.ndo_tx_timeout		= b44_tx_timeout,
2143 	.ndo_change_mtu		= b44_change_mtu,
2144 #ifdef CONFIG_NET_POLL_CONTROLLER
2145 	.ndo_poll_controller	= b44_poll_controller,
2146 #endif
2147 };
2148 
2149 static int b44_init_one(struct ssb_device *sdev,
2150 			const struct ssb_device_id *ent)
2151 {
2152 	struct net_device *dev;
2153 	struct b44 *bp;
2154 	int err;
2155 
2156 	instance++;
2157 
2158 	pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2159 
2160 	dev = alloc_etherdev(sizeof(*bp));
2161 	if (!dev) {
2162 		err = -ENOMEM;
2163 		goto out;
2164 	}
2165 
2166 	SET_NETDEV_DEV(dev, sdev->dev);
2167 
2168 	/* No interesting netdevice features in this card... */
2169 	dev->features |= 0;
2170 
2171 	bp = netdev_priv(dev);
2172 	bp->sdev = sdev;
2173 	bp->dev = dev;
2174 	bp->force_copybreak = 0;
2175 
2176 	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2177 
2178 	spin_lock_init(&bp->lock);
2179 
2180 	bp->rx_pending = B44_DEF_RX_RING_PENDING;
2181 	bp->tx_pending = B44_DEF_TX_RING_PENDING;
2182 
2183 	dev->netdev_ops = &b44_netdev_ops;
2184 	netif_napi_add(dev, &bp->napi, b44_poll, 64);
2185 	dev->watchdog_timeo = B44_TX_TIMEOUT;
2186 	dev->irq = sdev->irq;
2187 	SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2188 
2189 	err = ssb_bus_powerup(sdev->bus, 0);
2190 	if (err) {
2191 		dev_err(sdev->dev,
2192 			"Failed to powerup the bus\n");
2193 		goto err_out_free_dev;
2194 	}
2195 
2196 	if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
2197 		dev_err(sdev->dev,
2198 			"Required 30BIT DMA mask unsupported by the system\n");
2199 		goto err_out_powerdown;
2200 	}
2201 
2202 	err = b44_get_invariants(bp);
2203 	if (err) {
2204 		dev_err(sdev->dev,
2205 			"Problem fetching invariants of chip, aborting\n");
2206 		goto err_out_powerdown;
2207 	}
2208 
2209 	bp->mii_if.dev = dev;
2210 	bp->mii_if.mdio_read = b44_mii_read;
2211 	bp->mii_if.mdio_write = b44_mii_write;
2212 	bp->mii_if.phy_id = bp->phy_addr;
2213 	bp->mii_if.phy_id_mask = 0x1f;
2214 	bp->mii_if.reg_num_mask = 0x1f;
2215 
2216 	/* By default, advertise all speed/duplex settings. */
2217 	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2218 		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2219 
2220 	/* By default, auto-negotiate PAUSE. */
2221 	bp->flags |= B44_FLAG_PAUSE_AUTO;
2222 
2223 	err = register_netdev(dev);
2224 	if (err) {
2225 		dev_err(sdev->dev, "Cannot register net device, aborting\n");
2226 		goto err_out_powerdown;
2227 	}
2228 
2229 	netif_carrier_off(dev);
2230 
2231 	ssb_set_drvdata(sdev, dev);
2232 
2233 	/* Chip reset provides power to the b44 MAC & PCI cores, which
2234 	 * is necessary for MAC register access.
2235 	 */
2236 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2237 
2238 	/* do a phy reset to test if there is an active phy */
2239 	if (b44_phy_reset(bp) < 0)
2240 		bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2241 
2242 	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2243 
2244 	return 0;
2245 
2246 err_out_powerdown:
2247 	ssb_bus_may_powerdown(sdev->bus);
2248 
2249 err_out_free_dev:
2250 	free_netdev(dev);
2251 
2252 out:
2253 	return err;
2254 }
2255 
2256 static void b44_remove_one(struct ssb_device *sdev)
2257 {
2258 	struct net_device *dev = ssb_get_drvdata(sdev);
2259 
2260 	unregister_netdev(dev);
2261 	ssb_device_disable(sdev, 0);
2262 	ssb_bus_may_powerdown(sdev->bus);
2263 	free_netdev(dev);
2264 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2265 	ssb_set_drvdata(sdev, NULL);
2266 }
2267 
2268 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2269 {
2270 	struct net_device *dev = ssb_get_drvdata(sdev);
2271 	struct b44 *bp = netdev_priv(dev);
2272 
2273 	if (!netif_running(dev))
2274 		return 0;
2275 
2276 	del_timer_sync(&bp->timer);
2277 
2278 	spin_lock_irq(&bp->lock);
2279 
2280 	b44_halt(bp);
2281 	netif_carrier_off(bp->dev);
2282 	netif_device_detach(bp->dev);
2283 	b44_free_rings(bp);
2284 
2285 	spin_unlock_irq(&bp->lock);
2286 
2287 	free_irq(dev->irq, dev);
2288 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
2289 		b44_init_hw(bp, B44_PARTIAL_RESET);
2290 		b44_setup_wol(bp);
2291 	}
2292 
2293 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2294 	return 0;
2295 }
2296 
2297 static int b44_resume(struct ssb_device *sdev)
2298 {
2299 	struct net_device *dev = ssb_get_drvdata(sdev);
2300 	struct b44 *bp = netdev_priv(dev);
2301 	int rc = 0;
2302 
2303 	rc = ssb_bus_powerup(sdev->bus, 0);
2304 	if (rc) {
2305 		dev_err(sdev->dev,
2306 			"Failed to powerup the bus\n");
2307 		return rc;
2308 	}
2309 
2310 	if (!netif_running(dev))
2311 		return 0;
2312 
2313 	spin_lock_irq(&bp->lock);
2314 	b44_init_rings(bp);
2315 	b44_init_hw(bp, B44_FULL_RESET);
2316 	spin_unlock_irq(&bp->lock);
2317 
2318 	/*
2319 	 * As a shared interrupt, the handler can be called immediately. To be
2320 	 * able to check the interrupt status the hardware must already be
2321 	 * powered back on (b44_init_hw).
2322 	 */
2323 	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2324 	if (rc) {
2325 		netdev_err(dev, "request_irq failed\n");
2326 		spin_lock_irq(&bp->lock);
2327 		b44_halt(bp);
2328 		b44_free_rings(bp);
2329 		spin_unlock_irq(&bp->lock);
2330 		return rc;
2331 	}
2332 
2333 	netif_device_attach(bp->dev);
2334 
2335 	b44_enable_ints(bp);
2336 	netif_wake_queue(dev);
2337 
2338 	mod_timer(&bp->timer, jiffies + 1);
2339 
2340 	return 0;
2341 }
2342 
2343 static struct ssb_driver b44_ssb_driver = {
2344 	.name		= DRV_MODULE_NAME,
2345 	.id_table	= b44_ssb_tbl,
2346 	.probe		= b44_init_one,
2347 	.remove		= b44_remove_one,
2348 	.suspend	= b44_suspend,
2349 	.resume		= b44_resume,
2350 };
2351 
2352 static inline int __init b44_pci_init(void)
2353 {
2354 	int err = 0;
2355 #ifdef CONFIG_B44_PCI
2356 	err = ssb_pcihost_register(&b44_pci_driver);
2357 #endif
2358 	return err;
2359 }
2360 
2361 static inline void b44_pci_exit(void)
2362 {
2363 #ifdef CONFIG_B44_PCI
2364 	ssb_pcihost_unregister(&b44_pci_driver);
2365 #endif
2366 }
2367 
2368 static int __init b44_init(void)
2369 {
2370 	unsigned int dma_desc_align_size = dma_get_cache_alignment();
2371 	int err;
2372 
2373 	/* Setup paramaters for syncing RX/TX DMA descriptors */
2374 	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2375 
2376 	err = b44_pci_init();
2377 	if (err)
2378 		return err;
2379 	err = ssb_driver_register(&b44_ssb_driver);
2380 	if (err)
2381 		b44_pci_exit();
2382 	return err;
2383 }
2384 
2385 static void __exit b44_cleanup(void)
2386 {
2387 	ssb_driver_unregister(&b44_ssb_driver);
2388 	b44_pci_exit();
2389 }
2390 
2391 module_init(b44_init);
2392 module_exit(b44_cleanup);
2393 
2394