xref: /linux/drivers/net/ethernet/broadcom/b44.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9  * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
10  *
11  * Distribute under GPL.
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/netdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/mii.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/etherdevice.h>
26 #include <linux/pci.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/ssb/ssb.h>
32 #include <linux/slab.h>
33 #include <linux/phy.h>
34 #include <linux/phy_fixed.h>
35 
36 #include <linux/uaccess.h>
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 
40 
41 #include "b44.h"
42 
43 #define DRV_MODULE_NAME		"b44"
44 #define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
45 
46 #define B44_DEF_MSG_ENABLE	  \
47 	(NETIF_MSG_DRV		| \
48 	 NETIF_MSG_PROBE	| \
49 	 NETIF_MSG_LINK		| \
50 	 NETIF_MSG_TIMER	| \
51 	 NETIF_MSG_IFDOWN	| \
52 	 NETIF_MSG_IFUP		| \
53 	 NETIF_MSG_RX_ERR	| \
54 	 NETIF_MSG_TX_ERR)
55 
56 /* length of time before we decide the hardware is borked,
57  * and dev->tx_timeout() should be called to fix the problem
58  */
59 #define B44_TX_TIMEOUT			(5 * HZ)
60 
61 /* hardware minimum and maximum for a single frame's data payload */
62 #define B44_MIN_MTU			ETH_ZLEN
63 #define B44_MAX_MTU			ETH_DATA_LEN
64 
65 #define B44_RX_RING_SIZE		512
66 #define B44_DEF_RX_RING_PENDING		200
67 #define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
68 				 B44_RX_RING_SIZE)
69 #define B44_TX_RING_SIZE		512
70 #define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
71 #define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
72 				 B44_TX_RING_SIZE)
73 
74 #define TX_RING_GAP(BP)	\
75 	(B44_TX_RING_SIZE - (BP)->tx_pending)
76 #define TX_BUFFS_AVAIL(BP)						\
77 	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
78 	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
79 	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
80 #define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
81 
82 #define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
83 #define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
84 
85 /* minimum number of free TX descriptors required to wake up TX process */
86 #define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
87 
88 /* b44 internal pattern match filter info */
89 #define B44_PATTERN_BASE	0x400
90 #define B44_PATTERN_SIZE	0x80
91 #define B44_PMASK_BASE		0x600
92 #define B44_PMASK_SIZE		0x10
93 #define B44_MAX_PATTERNS	16
94 #define B44_ETHIPV6UDP_HLEN	62
95 #define B44_ETHIPV4UDP_HLEN	42
96 
97 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
98 MODULE_DESCRIPTION(DRV_DESCRIPTION);
99 MODULE_LICENSE("GPL");
100 
101 static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
102 module_param(b44_debug, int, 0);
103 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
104 
105 
106 #ifdef CONFIG_B44_PCI
107 static const struct pci_device_id b44_pci_tbl[] = {
108 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
109 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
110 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
111 	{ 0 } /* terminate list with empty entry */
112 };
113 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
114 
115 static struct pci_driver b44_pci_driver = {
116 	.name		= DRV_MODULE_NAME,
117 	.id_table	= b44_pci_tbl,
118 };
119 #endif /* CONFIG_B44_PCI */
120 
121 static const struct ssb_device_id b44_ssb_tbl[] = {
122 	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
123 	{},
124 };
125 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
126 
127 static void b44_halt(struct b44 *);
128 static void b44_init_rings(struct b44 *);
129 
130 #define B44_FULL_RESET		1
131 #define B44_FULL_RESET_SKIP_PHY	2
132 #define B44_PARTIAL_RESET	3
133 #define B44_CHIP_RESET_FULL	4
134 #define B44_CHIP_RESET_PARTIAL	5
135 
136 static void b44_init_hw(struct b44 *, int);
137 
138 static int dma_desc_sync_size;
139 static int instance;
140 
141 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
142 #define _B44(x...)	# x,
143 B44_STAT_REG_DECLARE
144 #undef _B44
145 };
146 
147 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148 						dma_addr_t dma_base,
149 						unsigned long offset,
150 						enum dma_data_direction dir)
151 {
152 	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
153 				   dma_desc_sync_size, dir);
154 }
155 
156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157 					     dma_addr_t dma_base,
158 					     unsigned long offset,
159 					     enum dma_data_direction dir)
160 {
161 	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
162 				dma_desc_sync_size, dir);
163 }
164 
165 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
166 {
167 	return ssb_read32(bp->sdev, reg);
168 }
169 
170 static inline void bw32(const struct b44 *bp,
171 			unsigned long reg, unsigned long val)
172 {
173 	ssb_write32(bp->sdev, reg, val);
174 }
175 
176 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
177 			u32 bit, unsigned long timeout, const int clear)
178 {
179 	unsigned long i;
180 
181 	for (i = 0; i < timeout; i++) {
182 		u32 val = br32(bp, reg);
183 
184 		if (clear && !(val & bit))
185 			break;
186 		if (!clear && (val & bit))
187 			break;
188 		udelay(10);
189 	}
190 	if (i == timeout) {
191 		if (net_ratelimit())
192 			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
193 				   bit, reg, clear ? "clear" : "set");
194 
195 		return -ENODEV;
196 	}
197 	return 0;
198 }
199 
200 static inline void __b44_cam_write(struct b44 *bp,
201 				   const unsigned char *data, int index)
202 {
203 	u32 val;
204 
205 	val  = ((u32) data[2]) << 24;
206 	val |= ((u32) data[3]) << 16;
207 	val |= ((u32) data[4]) <<  8;
208 	val |= ((u32) data[5]) <<  0;
209 	bw32(bp, B44_CAM_DATA_LO, val);
210 	val = (CAM_DATA_HI_VALID |
211 	       (((u32) data[0]) << 8) |
212 	       (((u32) data[1]) << 0));
213 	bw32(bp, B44_CAM_DATA_HI, val);
214 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
215 			    (index << CAM_CTRL_INDEX_SHIFT)));
216 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
217 }
218 
219 static inline void __b44_disable_ints(struct b44 *bp)
220 {
221 	bw32(bp, B44_IMASK, 0);
222 }
223 
224 static void b44_disable_ints(struct b44 *bp)
225 {
226 	__b44_disable_ints(bp);
227 
228 	/* Flush posted writes. */
229 	br32(bp, B44_IMASK);
230 }
231 
232 static void b44_enable_ints(struct b44 *bp)
233 {
234 	bw32(bp, B44_IMASK, bp->imask);
235 }
236 
237 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
238 {
239 	int err;
240 
241 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
242 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
243 			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
244 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
245 			     (reg << MDIO_DATA_RA_SHIFT) |
246 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
247 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
248 	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
249 
250 	return err;
251 }
252 
253 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
254 {
255 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
256 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
257 			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
258 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
259 			     (reg << MDIO_DATA_RA_SHIFT) |
260 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
261 			     (val & MDIO_DATA_DATA)));
262 	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
263 }
264 
265 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
266 {
267 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
268 		return 0;
269 
270 	return __b44_readphy(bp, bp->phy_addr, reg, val);
271 }
272 
273 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
274 {
275 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
276 		return 0;
277 
278 	return __b44_writephy(bp, bp->phy_addr, reg, val);
279 }
280 
281 /* miilib interface */
282 static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
283 {
284 	u32 val;
285 	struct b44 *bp = netdev_priv(dev);
286 	int rc = __b44_readphy(bp, phy_id, location, &val);
287 	if (rc)
288 		return 0xffffffff;
289 	return val;
290 }
291 
292 static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
293 			       int val)
294 {
295 	struct b44 *bp = netdev_priv(dev);
296 	__b44_writephy(bp, phy_id, location, val);
297 }
298 
299 static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
300 {
301 	u32 val;
302 	struct b44 *bp = bus->priv;
303 	int rc = __b44_readphy(bp, phy_id, location, &val);
304 	if (rc)
305 		return 0xffffffff;
306 	return val;
307 }
308 
309 static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
310 				 u16 val)
311 {
312 	struct b44 *bp = bus->priv;
313 	return __b44_writephy(bp, phy_id, location, val);
314 }
315 
316 static int b44_phy_reset(struct b44 *bp)
317 {
318 	u32 val;
319 	int err;
320 
321 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
322 		return 0;
323 	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
324 	if (err)
325 		return err;
326 	udelay(100);
327 	err = b44_readphy(bp, MII_BMCR, &val);
328 	if (!err) {
329 		if (val & BMCR_RESET) {
330 			netdev_err(bp->dev, "PHY Reset would not complete\n");
331 			err = -ENODEV;
332 		}
333 	}
334 
335 	return err;
336 }
337 
338 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
339 {
340 	u32 val;
341 
342 	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
343 	bp->flags |= pause_flags;
344 
345 	val = br32(bp, B44_RXCONFIG);
346 	if (pause_flags & B44_FLAG_RX_PAUSE)
347 		val |= RXCONFIG_FLOW;
348 	else
349 		val &= ~RXCONFIG_FLOW;
350 	bw32(bp, B44_RXCONFIG, val);
351 
352 	val = br32(bp, B44_MAC_FLOW);
353 	if (pause_flags & B44_FLAG_TX_PAUSE)
354 		val |= (MAC_FLOW_PAUSE_ENAB |
355 			(0xc0 & MAC_FLOW_RX_HI_WATER));
356 	else
357 		val &= ~MAC_FLOW_PAUSE_ENAB;
358 	bw32(bp, B44_MAC_FLOW, val);
359 }
360 
361 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
362 {
363 	u32 pause_enab = 0;
364 
365 	/* The driver supports only rx pause by default because
366 	   the b44 mac tx pause mechanism generates excessive
367 	   pause frames.
368 	   Use ethtool to turn on b44 tx pause if necessary.
369 	 */
370 	if ((local & ADVERTISE_PAUSE_CAP) &&
371 	    (local & ADVERTISE_PAUSE_ASYM)){
372 		if ((remote & LPA_PAUSE_ASYM) &&
373 		    !(remote & LPA_PAUSE_CAP))
374 			pause_enab |= B44_FLAG_RX_PAUSE;
375 	}
376 
377 	__b44_set_flow_ctrl(bp, pause_enab);
378 }
379 
380 #ifdef CONFIG_BCM47XX
381 #include <linux/bcm47xx_nvram.h>
382 static void b44_wap54g10_workaround(struct b44 *bp)
383 {
384 	char buf[20];
385 	u32 val;
386 	int err;
387 
388 	/*
389 	 * workaround for bad hardware design in Linksys WAP54G v1.0
390 	 * see https://dev.openwrt.org/ticket/146
391 	 * check and reset bit "isolate"
392 	 */
393 	if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
394 		return;
395 	if (simple_strtoul(buf, NULL, 0) == 2) {
396 		err = __b44_readphy(bp, 0, MII_BMCR, &val);
397 		if (err)
398 			goto error;
399 		if (!(val & BMCR_ISOLATE))
400 			return;
401 		val &= ~BMCR_ISOLATE;
402 		err = __b44_writephy(bp, 0, MII_BMCR, val);
403 		if (err)
404 			goto error;
405 	}
406 	return;
407 error:
408 	pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
409 }
410 #else
411 static inline void b44_wap54g10_workaround(struct b44 *bp)
412 {
413 }
414 #endif
415 
416 static int b44_setup_phy(struct b44 *bp)
417 {
418 	u32 val;
419 	int err;
420 
421 	b44_wap54g10_workaround(bp);
422 
423 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
424 		return 0;
425 	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
426 		goto out;
427 	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
428 				val & MII_ALEDCTRL_ALLMSK)) != 0)
429 		goto out;
430 	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
431 		goto out;
432 	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
433 				val | MII_TLEDCTRL_ENABLE)) != 0)
434 		goto out;
435 
436 	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
437 		u32 adv = ADVERTISE_CSMA;
438 
439 		if (bp->flags & B44_FLAG_ADV_10HALF)
440 			adv |= ADVERTISE_10HALF;
441 		if (bp->flags & B44_FLAG_ADV_10FULL)
442 			adv |= ADVERTISE_10FULL;
443 		if (bp->flags & B44_FLAG_ADV_100HALF)
444 			adv |= ADVERTISE_100HALF;
445 		if (bp->flags & B44_FLAG_ADV_100FULL)
446 			adv |= ADVERTISE_100FULL;
447 
448 		if (bp->flags & B44_FLAG_PAUSE_AUTO)
449 			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
450 
451 		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
452 			goto out;
453 		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
454 						       BMCR_ANRESTART))) != 0)
455 			goto out;
456 	} else {
457 		u32 bmcr;
458 
459 		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
460 			goto out;
461 		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
462 		if (bp->flags & B44_FLAG_100_BASE_T)
463 			bmcr |= BMCR_SPEED100;
464 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
465 			bmcr |= BMCR_FULLDPLX;
466 		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
467 			goto out;
468 
469 		/* Since we will not be negotiating there is no safe way
470 		 * to determine if the link partner supports flow control
471 		 * or not.  So just disable it completely in this case.
472 		 */
473 		b44_set_flow_ctrl(bp, 0, 0);
474 	}
475 
476 out:
477 	return err;
478 }
479 
480 static void b44_stats_update(struct b44 *bp)
481 {
482 	unsigned long reg;
483 	u64 *val;
484 
485 	val = &bp->hw_stats.tx_good_octets;
486 	u64_stats_update_begin(&bp->hw_stats.syncp);
487 
488 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
489 		*val++ += br32(bp, reg);
490 	}
491 
492 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
493 		*val++ += br32(bp, reg);
494 	}
495 
496 	u64_stats_update_end(&bp->hw_stats.syncp);
497 }
498 
499 static void b44_link_report(struct b44 *bp)
500 {
501 	if (!netif_carrier_ok(bp->dev)) {
502 		netdev_info(bp->dev, "Link is down\n");
503 	} else {
504 		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
505 			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
506 			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
507 
508 		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
509 			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
510 			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
511 	}
512 }
513 
514 static void b44_check_phy(struct b44 *bp)
515 {
516 	u32 bmsr, aux;
517 
518 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
519 		bp->flags |= B44_FLAG_100_BASE_T;
520 		if (!netif_carrier_ok(bp->dev)) {
521 			u32 val = br32(bp, B44_TX_CTRL);
522 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
523 				val |= TX_CTRL_DUPLEX;
524 			else
525 				val &= ~TX_CTRL_DUPLEX;
526 			bw32(bp, B44_TX_CTRL, val);
527 			netif_carrier_on(bp->dev);
528 			b44_link_report(bp);
529 		}
530 		return;
531 	}
532 
533 	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
534 	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
535 	    (bmsr != 0xffff)) {
536 		if (aux & MII_AUXCTRL_SPEED)
537 			bp->flags |= B44_FLAG_100_BASE_T;
538 		else
539 			bp->flags &= ~B44_FLAG_100_BASE_T;
540 		if (aux & MII_AUXCTRL_DUPLEX)
541 			bp->flags |= B44_FLAG_FULL_DUPLEX;
542 		else
543 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
544 
545 		if (!netif_carrier_ok(bp->dev) &&
546 		    (bmsr & BMSR_LSTATUS)) {
547 			u32 val = br32(bp, B44_TX_CTRL);
548 			u32 local_adv, remote_adv;
549 
550 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
551 				val |= TX_CTRL_DUPLEX;
552 			else
553 				val &= ~TX_CTRL_DUPLEX;
554 			bw32(bp, B44_TX_CTRL, val);
555 
556 			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
557 			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
558 			    !b44_readphy(bp, MII_LPA, &remote_adv))
559 				b44_set_flow_ctrl(bp, local_adv, remote_adv);
560 
561 			/* Link now up */
562 			netif_carrier_on(bp->dev);
563 			b44_link_report(bp);
564 		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
565 			/* Link now down */
566 			netif_carrier_off(bp->dev);
567 			b44_link_report(bp);
568 		}
569 
570 		if (bmsr & BMSR_RFAULT)
571 			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
572 		if (bmsr & BMSR_JCD)
573 			netdev_warn(bp->dev, "Jabber detected in PHY\n");
574 	}
575 }
576 
577 static void b44_timer(struct timer_list *t)
578 {
579 	struct b44 *bp = timer_container_of(bp, t, timer);
580 
581 	spin_lock_irq(&bp->lock);
582 
583 	b44_check_phy(bp);
584 
585 	b44_stats_update(bp);
586 
587 	spin_unlock_irq(&bp->lock);
588 
589 	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
590 }
591 
592 static void b44_tx(struct b44 *bp)
593 {
594 	u32 cur, cons;
595 	unsigned bytes_compl = 0, pkts_compl = 0;
596 
597 	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598 	cur /= sizeof(struct dma_desc);
599 
600 	/* XXX needs updating when NETIF_F_SG is supported */
601 	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602 		struct ring_info *rp = &bp->tx_buffers[cons];
603 		struct sk_buff *skb = rp->skb;
604 
605 		BUG_ON(skb == NULL);
606 
607 		dma_unmap_single(bp->sdev->dma_dev,
608 				 rp->mapping,
609 				 skb->len,
610 				 DMA_TO_DEVICE);
611 		rp->skb = NULL;
612 
613 		bytes_compl += skb->len;
614 		pkts_compl++;
615 
616 		dev_consume_skb_irq(skb);
617 	}
618 
619 	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
620 	bp->tx_cons = cons;
621 	if (netif_queue_stopped(bp->dev) &&
622 	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
623 		netif_wake_queue(bp->dev);
624 
625 	bw32(bp, B44_GPTIMER, 0);
626 }
627 
628 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
629  * before the DMA address you give it.  So we allocate 30 more bytes
630  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
631  * point the chip at 30 bytes past where the rx_header will go.
632  */
633 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
634 {
635 	struct dma_desc *dp;
636 	struct ring_info *src_map, *map;
637 	struct rx_header *rh;
638 	struct sk_buff *skb;
639 	dma_addr_t mapping;
640 	int dest_idx;
641 	u32 ctrl;
642 
643 	src_map = NULL;
644 	if (src_idx >= 0)
645 		src_map = &bp->rx_buffers[src_idx];
646 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
647 	map = &bp->rx_buffers[dest_idx];
648 	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
649 	if (skb == NULL)
650 		return -ENOMEM;
651 
652 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
653 				 RX_PKT_BUF_SZ,
654 				 DMA_FROM_DEVICE);
655 
656 	/* Hardware bug work-around, the chip is unable to do PCI DMA
657 	   to/from anything above 1GB :-( */
658 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
659 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
660 		/* Sigh... */
661 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
662 			dma_unmap_single(bp->sdev->dma_dev, mapping,
663 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
664 		dev_kfree_skb_any(skb);
665 		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
666 		if (skb == NULL)
667 			return -ENOMEM;
668 		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
669 					 RX_PKT_BUF_SZ,
670 					 DMA_FROM_DEVICE);
671 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
672 		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
673 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
674 				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
675 			dev_kfree_skb_any(skb);
676 			return -ENOMEM;
677 		}
678 		bp->force_copybreak = 1;
679 	}
680 
681 	rh = (struct rx_header *) skb->data;
682 
683 	rh->len = 0;
684 	rh->flags = 0;
685 
686 	map->skb = skb;
687 	map->mapping = mapping;
688 
689 	if (src_map != NULL)
690 		src_map->skb = NULL;
691 
692 	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
693 	if (dest_idx == (B44_RX_RING_SIZE - 1))
694 		ctrl |= DESC_CTRL_EOT;
695 
696 	dp = &bp->rx_ring[dest_idx];
697 	dp->ctrl = cpu_to_le32(ctrl);
698 	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
699 
700 	if (bp->flags & B44_FLAG_RX_RING_HACK)
701 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
702 			                    dest_idx * sizeof(*dp),
703 			                    DMA_BIDIRECTIONAL);
704 
705 	return RX_PKT_BUF_SZ;
706 }
707 
708 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
709 {
710 	struct dma_desc *src_desc, *dest_desc;
711 	struct ring_info *src_map, *dest_map;
712 	struct rx_header *rh;
713 	int dest_idx;
714 	__le32 ctrl;
715 
716 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
717 	dest_desc = &bp->rx_ring[dest_idx];
718 	dest_map = &bp->rx_buffers[dest_idx];
719 	src_desc = &bp->rx_ring[src_idx];
720 	src_map = &bp->rx_buffers[src_idx];
721 
722 	dest_map->skb = src_map->skb;
723 	rh = (struct rx_header *) src_map->skb->data;
724 	rh->len = 0;
725 	rh->flags = 0;
726 	dest_map->mapping = src_map->mapping;
727 
728 	if (bp->flags & B44_FLAG_RX_RING_HACK)
729 		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
730 			                 src_idx * sizeof(*src_desc),
731 			                 DMA_BIDIRECTIONAL);
732 
733 	ctrl = src_desc->ctrl;
734 	if (dest_idx == (B44_RX_RING_SIZE - 1))
735 		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
736 	else
737 		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
738 
739 	dest_desc->ctrl = ctrl;
740 	dest_desc->addr = src_desc->addr;
741 
742 	src_map->skb = NULL;
743 
744 	if (bp->flags & B44_FLAG_RX_RING_HACK)
745 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
746 					     dest_idx * sizeof(*dest_desc),
747 					     DMA_BIDIRECTIONAL);
748 
749 	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
750 				   RX_PKT_BUF_SZ,
751 				   DMA_FROM_DEVICE);
752 }
753 
754 static int b44_rx(struct b44 *bp, int budget)
755 {
756 	int received;
757 	u32 cons, prod;
758 
759 	received = 0;
760 	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
761 	prod /= sizeof(struct dma_desc);
762 	cons = bp->rx_cons;
763 
764 	while (cons != prod && budget > 0) {
765 		struct ring_info *rp = &bp->rx_buffers[cons];
766 		struct sk_buff *skb = rp->skb;
767 		dma_addr_t map = rp->mapping;
768 		struct rx_header *rh;
769 		u16 len;
770 
771 		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
772 					RX_PKT_BUF_SZ,
773 					DMA_FROM_DEVICE);
774 		rh = (struct rx_header *) skb->data;
775 		len = le16_to_cpu(rh->len);
776 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
777 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
778 		drop_it:
779 			b44_recycle_rx(bp, cons, bp->rx_prod);
780 		drop_it_no_recycle:
781 			bp->dev->stats.rx_dropped++;
782 			goto next_pkt;
783 		}
784 
785 		if (len == 0) {
786 			int i = 0;
787 
788 			do {
789 				udelay(2);
790 				barrier();
791 				len = le16_to_cpu(rh->len);
792 			} while (len == 0 && i++ < 5);
793 			if (len == 0)
794 				goto drop_it;
795 		}
796 
797 		/* Omit CRC. */
798 		len -= 4;
799 
800 		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
801 			int skb_size;
802 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
803 			if (skb_size < 0)
804 				goto drop_it;
805 			dma_unmap_single(bp->sdev->dma_dev, map,
806 					 skb_size, DMA_FROM_DEVICE);
807 			/* Leave out rx_header */
808 			skb_put(skb, len + RX_PKT_OFFSET);
809 			skb_pull(skb, RX_PKT_OFFSET);
810 		} else {
811 			struct sk_buff *copy_skb;
812 
813 			b44_recycle_rx(bp, cons, bp->rx_prod);
814 			copy_skb = napi_alloc_skb(&bp->napi, len);
815 			if (copy_skb == NULL)
816 				goto drop_it_no_recycle;
817 
818 			skb_put(copy_skb, len);
819 			/* DMA sync done above, copy just the actual packet */
820 			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
821 							 copy_skb->data, len);
822 			skb = copy_skb;
823 		}
824 		skb_checksum_none_assert(skb);
825 		skb->protocol = eth_type_trans(skb, bp->dev);
826 		netif_receive_skb(skb);
827 		received++;
828 		budget--;
829 	next_pkt:
830 		bp->rx_prod = (bp->rx_prod + 1) &
831 			(B44_RX_RING_SIZE - 1);
832 		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
833 	}
834 
835 	bp->rx_cons = cons;
836 	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
837 
838 	return received;
839 }
840 
841 static int b44_poll(struct napi_struct *napi, int budget)
842 {
843 	struct b44 *bp = container_of(napi, struct b44, napi);
844 	int work_done;
845 	unsigned long flags;
846 
847 	spin_lock_irqsave(&bp->lock, flags);
848 
849 	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
850 		/* spin_lock(&bp->tx_lock); */
851 		b44_tx(bp);
852 		/* spin_unlock(&bp->tx_lock); */
853 	}
854 	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
855 		bp->istat &= ~ISTAT_RFO;
856 		b44_disable_ints(bp);
857 		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
858 		b44_init_rings(bp);
859 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
860 		netif_wake_queue(bp->dev);
861 	}
862 
863 	spin_unlock_irqrestore(&bp->lock, flags);
864 
865 	work_done = 0;
866 	if (bp->istat & ISTAT_RX)
867 		work_done += b44_rx(bp, budget);
868 
869 	if (bp->istat & ISTAT_ERRORS) {
870 		spin_lock_irqsave(&bp->lock, flags);
871 		b44_halt(bp);
872 		b44_init_rings(bp);
873 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
874 		netif_wake_queue(bp->dev);
875 		spin_unlock_irqrestore(&bp->lock, flags);
876 		work_done = 0;
877 	}
878 
879 	if (work_done < budget) {
880 		napi_complete_done(napi, work_done);
881 		b44_enable_ints(bp);
882 	}
883 
884 	return work_done;
885 }
886 
887 static irqreturn_t b44_interrupt(int irq, void *dev_id)
888 {
889 	struct net_device *dev = dev_id;
890 	struct b44 *bp = netdev_priv(dev);
891 	u32 istat, imask;
892 	int handled = 0;
893 
894 	spin_lock(&bp->lock);
895 
896 	istat = br32(bp, B44_ISTAT);
897 	imask = br32(bp, B44_IMASK);
898 
899 	/* The interrupt mask register controls which interrupt bits
900 	 * will actually raise an interrupt to the CPU when set by hw/firmware,
901 	 * but doesn't mask off the bits.
902 	 */
903 	istat &= imask;
904 	if (istat) {
905 		handled = 1;
906 
907 		if (unlikely(!netif_running(dev))) {
908 			netdev_info(dev, "late interrupt\n");
909 			goto irq_ack;
910 		}
911 
912 		if (napi_schedule_prep(&bp->napi)) {
913 			/* NOTE: These writes are posted by the readback of
914 			 *       the ISTAT register below.
915 			 */
916 			bp->istat = istat;
917 			__b44_disable_ints(bp);
918 			__napi_schedule(&bp->napi);
919 		}
920 
921 irq_ack:
922 		bw32(bp, B44_ISTAT, istat);
923 		br32(bp, B44_ISTAT);
924 	}
925 	spin_unlock(&bp->lock);
926 	return IRQ_RETVAL(handled);
927 }
928 
929 static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
930 {
931 	struct b44 *bp = netdev_priv(dev);
932 
933 	netdev_err(dev, "transmit timed out, resetting\n");
934 
935 	spin_lock_irq(&bp->lock);
936 
937 	b44_halt(bp);
938 	b44_init_rings(bp);
939 	b44_init_hw(bp, B44_FULL_RESET);
940 
941 	spin_unlock_irq(&bp->lock);
942 
943 	b44_enable_ints(bp);
944 
945 	netif_wake_queue(dev);
946 }
947 
948 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
949 {
950 	struct b44 *bp = netdev_priv(dev);
951 	int rc = NETDEV_TX_OK;
952 	dma_addr_t mapping;
953 	u32 len, entry, ctrl;
954 	unsigned long flags;
955 
956 	len = skb->len;
957 	spin_lock_irqsave(&bp->lock, flags);
958 
959 	/* This is a hard error, log it. */
960 	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
961 		netif_stop_queue(dev);
962 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
963 		goto err_out;
964 	}
965 
966 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
967 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
968 		struct sk_buff *bounce_skb;
969 
970 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
971 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
972 			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
973 					     DMA_TO_DEVICE);
974 
975 		bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
976 		if (!bounce_skb)
977 			goto err_out;
978 
979 		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
980 					 len, DMA_TO_DEVICE);
981 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
982 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
983 				dma_unmap_single(bp->sdev->dma_dev, mapping,
984 						     len, DMA_TO_DEVICE);
985 			dev_kfree_skb_any(bounce_skb);
986 			goto err_out;
987 		}
988 
989 		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
990 		dev_consume_skb_any(skb);
991 		skb = bounce_skb;
992 	}
993 
994 	entry = bp->tx_prod;
995 	bp->tx_buffers[entry].skb = skb;
996 	bp->tx_buffers[entry].mapping = mapping;
997 
998 	ctrl  = (len & DESC_CTRL_LEN);
999 	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1000 	if (entry == (B44_TX_RING_SIZE - 1))
1001 		ctrl |= DESC_CTRL_EOT;
1002 
1003 	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1004 	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1005 
1006 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1007 		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1008 			                    entry * sizeof(bp->tx_ring[0]),
1009 			                    DMA_TO_DEVICE);
1010 
1011 	entry = NEXT_TX(entry);
1012 
1013 	bp->tx_prod = entry;
1014 
1015 	wmb();
1016 
1017 	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1018 	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1019 		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1020 	if (bp->flags & B44_FLAG_REORDER_BUG)
1021 		br32(bp, B44_DMATX_PTR);
1022 
1023 	netdev_sent_queue(dev, skb->len);
1024 
1025 	if (TX_BUFFS_AVAIL(bp) < 1)
1026 		netif_stop_queue(dev);
1027 
1028 out_unlock:
1029 	spin_unlock_irqrestore(&bp->lock, flags);
1030 
1031 	return rc;
1032 
1033 err_out:
1034 	rc = NETDEV_TX_BUSY;
1035 	goto out_unlock;
1036 }
1037 
1038 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1039 {
1040 	struct b44 *bp = netdev_priv(dev);
1041 
1042 	if (!netif_running(dev)) {
1043 		/* We'll just catch it later when the
1044 		 * device is up'd.
1045 		 */
1046 		WRITE_ONCE(dev->mtu, new_mtu);
1047 		return 0;
1048 	}
1049 
1050 	spin_lock_irq(&bp->lock);
1051 	b44_halt(bp);
1052 	WRITE_ONCE(dev->mtu, new_mtu);
1053 	b44_init_rings(bp);
1054 	b44_init_hw(bp, B44_FULL_RESET);
1055 	spin_unlock_irq(&bp->lock);
1056 
1057 	b44_enable_ints(bp);
1058 
1059 	return 0;
1060 }
1061 
1062 /* Free up pending packets in all rx/tx rings.
1063  *
1064  * The chip has been shut down and the driver detached from
1065  * the networking, so no interrupts or new tx packets will
1066  * end up in the driver.  bp->lock is not held and we are not
1067  * in an interrupt context and thus may sleep.
1068  */
1069 static void b44_free_rings(struct b44 *bp)
1070 {
1071 	struct ring_info *rp;
1072 	int i;
1073 
1074 	for (i = 0; i < B44_RX_RING_SIZE; i++) {
1075 		rp = &bp->rx_buffers[i];
1076 
1077 		if (rp->skb == NULL)
1078 			continue;
1079 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1080 				 DMA_FROM_DEVICE);
1081 		dev_kfree_skb_any(rp->skb);
1082 		rp->skb = NULL;
1083 	}
1084 
1085 	/* XXX needs changes once NETIF_F_SG is set... */
1086 	for (i = 0; i < B44_TX_RING_SIZE; i++) {
1087 		rp = &bp->tx_buffers[i];
1088 
1089 		if (rp->skb == NULL)
1090 			continue;
1091 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1092 				 DMA_TO_DEVICE);
1093 		dev_kfree_skb_any(rp->skb);
1094 		rp->skb = NULL;
1095 	}
1096 }
1097 
1098 /* Initialize tx/rx rings for packet processing.
1099  *
1100  * The chip has been shut down and the driver detached from
1101  * the networking, so no interrupts or new tx packets will
1102  * end up in the driver.
1103  */
1104 static void b44_init_rings(struct b44 *bp)
1105 {
1106 	int i;
1107 
1108 	b44_free_rings(bp);
1109 
1110 	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1111 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1112 
1113 	if (bp->flags & B44_FLAG_RX_RING_HACK)
1114 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1115 					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1116 
1117 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1118 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1119 					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
1120 
1121 	for (i = 0; i < bp->rx_pending; i++) {
1122 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
1123 			break;
1124 	}
1125 }
1126 
1127 /*
1128  * Must not be invoked with interrupt sources disabled and
1129  * the hardware shutdown down.
1130  */
1131 static void b44_free_consistent(struct b44 *bp)
1132 {
1133 	kfree(bp->rx_buffers);
1134 	bp->rx_buffers = NULL;
1135 	kfree(bp->tx_buffers);
1136 	bp->tx_buffers = NULL;
1137 	if (bp->rx_ring) {
1138 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
1139 			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1140 					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1141 			kfree(bp->rx_ring);
1142 		} else
1143 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1144 					  bp->rx_ring, bp->rx_ring_dma);
1145 		bp->rx_ring = NULL;
1146 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
1147 	}
1148 	if (bp->tx_ring) {
1149 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
1150 			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1151 					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1152 			kfree(bp->tx_ring);
1153 		} else
1154 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1155 					  bp->tx_ring, bp->tx_ring_dma);
1156 		bp->tx_ring = NULL;
1157 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
1158 	}
1159 }
1160 
1161 /*
1162  * Must not be invoked with interrupt sources disabled and
1163  * the hardware shutdown down.  Can sleep.
1164  */
1165 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1166 {
1167 	int size;
1168 
1169 	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1170 	bp->rx_buffers = kzalloc(size, gfp);
1171 	if (!bp->rx_buffers)
1172 		goto out_err;
1173 
1174 	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1175 	bp->tx_buffers = kzalloc(size, gfp);
1176 	if (!bp->tx_buffers)
1177 		goto out_err;
1178 
1179 	size = DMA_TABLE_BYTES;
1180 	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1181 					 &bp->rx_ring_dma, gfp);
1182 	if (!bp->rx_ring) {
1183 		/* Allocation may have failed due to dma_alloc_coherent
1184 		   insisting on use of GFP_DMA, which is more restrictive
1185 		   than necessary...  */
1186 		struct dma_desc *rx_ring;
1187 		dma_addr_t rx_ring_dma;
1188 
1189 		rx_ring = kzalloc(size, gfp);
1190 		if (!rx_ring)
1191 			goto out_err;
1192 
1193 		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1194 					     DMA_TABLE_BYTES,
1195 					     DMA_BIDIRECTIONAL);
1196 
1197 		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1198 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
1199 			kfree(rx_ring);
1200 			goto out_err;
1201 		}
1202 
1203 		bp->rx_ring = rx_ring;
1204 		bp->rx_ring_dma = rx_ring_dma;
1205 		bp->flags |= B44_FLAG_RX_RING_HACK;
1206 	}
1207 
1208 	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1209 					 &bp->tx_ring_dma, gfp);
1210 	if (!bp->tx_ring) {
1211 		/* Allocation may have failed due to ssb_dma_alloc_consistent
1212 		   insisting on use of GFP_DMA, which is more restrictive
1213 		   than necessary...  */
1214 		struct dma_desc *tx_ring;
1215 		dma_addr_t tx_ring_dma;
1216 
1217 		tx_ring = kzalloc(size, gfp);
1218 		if (!tx_ring)
1219 			goto out_err;
1220 
1221 		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1222 					     DMA_TABLE_BYTES,
1223 					     DMA_TO_DEVICE);
1224 
1225 		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1226 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
1227 			kfree(tx_ring);
1228 			goto out_err;
1229 		}
1230 
1231 		bp->tx_ring = tx_ring;
1232 		bp->tx_ring_dma = tx_ring_dma;
1233 		bp->flags |= B44_FLAG_TX_RING_HACK;
1234 	}
1235 
1236 	return 0;
1237 
1238 out_err:
1239 	b44_free_consistent(bp);
1240 	return -ENOMEM;
1241 }
1242 
1243 /* bp->lock is held. */
1244 static void b44_clear_stats(struct b44 *bp)
1245 {
1246 	unsigned long reg;
1247 
1248 	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1249 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1250 		br32(bp, reg);
1251 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1252 		br32(bp, reg);
1253 }
1254 
1255 /* bp->lock is held. */
1256 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1257 {
1258 	struct ssb_device *sdev = bp->sdev;
1259 	bool was_enabled;
1260 
1261 	was_enabled = ssb_device_is_enabled(bp->sdev);
1262 
1263 	ssb_device_enable(bp->sdev, 0);
1264 	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1265 
1266 	if (was_enabled) {
1267 		bw32(bp, B44_RCV_LAZY, 0);
1268 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1269 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1270 		bw32(bp, B44_DMATX_CTRL, 0);
1271 		bp->tx_prod = bp->tx_cons = 0;
1272 		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1273 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1274 				     100, 0);
1275 		}
1276 		bw32(bp, B44_DMARX_CTRL, 0);
1277 		bp->rx_prod = bp->rx_cons = 0;
1278 	}
1279 
1280 	b44_clear_stats(bp);
1281 
1282 	/*
1283 	 * Don't enable PHY if we are doing a partial reset
1284 	 * we are probably going to power down
1285 	 */
1286 	if (reset_kind == B44_CHIP_RESET_PARTIAL)
1287 		return;
1288 
1289 	switch (sdev->bus->bustype) {
1290 	case SSB_BUSTYPE_SSB:
1291 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1292 		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1293 					B44_MDC_RATIO)
1294 		     & MDIO_CTRL_MAXF_MASK)));
1295 		break;
1296 	case SSB_BUSTYPE_PCI:
1297 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1298 		     (0x0d & MDIO_CTRL_MAXF_MASK)));
1299 		break;
1300 	case SSB_BUSTYPE_PCMCIA:
1301 	case SSB_BUSTYPE_SDIO:
1302 		WARN_ON(1); /* A device with this bus does not exist. */
1303 		break;
1304 	}
1305 
1306 	br32(bp, B44_MDIO_CTRL);
1307 
1308 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1309 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1310 		br32(bp, B44_ENET_CTRL);
1311 		bp->flags |= B44_FLAG_EXTERNAL_PHY;
1312 	} else {
1313 		u32 val = br32(bp, B44_DEVCTRL);
1314 
1315 		if (val & DEVCTRL_EPR) {
1316 			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1317 			br32(bp, B44_DEVCTRL);
1318 			udelay(100);
1319 		}
1320 		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1321 	}
1322 }
1323 
1324 /* bp->lock is held. */
1325 static void b44_halt(struct b44 *bp)
1326 {
1327 	b44_disable_ints(bp);
1328 	/* reset PHY */
1329 	b44_phy_reset(bp);
1330 	/* power down PHY */
1331 	netdev_info(bp->dev, "powering down PHY\n");
1332 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1333 	/* now reset the chip, but without enabling the MAC&PHY
1334 	 * part of it. This has to be done _after_ we shut down the PHY */
1335 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1336 		b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1337 	else
1338 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1339 }
1340 
1341 /* bp->lock is held. */
1342 static void __b44_set_mac_addr(struct b44 *bp)
1343 {
1344 	bw32(bp, B44_CAM_CTRL, 0);
1345 	if (!(bp->dev->flags & IFF_PROMISC)) {
1346 		u32 val;
1347 
1348 		__b44_cam_write(bp, bp->dev->dev_addr, 0);
1349 		val = br32(bp, B44_CAM_CTRL);
1350 		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1351 	}
1352 }
1353 
1354 static int b44_set_mac_addr(struct net_device *dev, void *p)
1355 {
1356 	struct b44 *bp = netdev_priv(dev);
1357 	struct sockaddr *addr = p;
1358 	u32 val;
1359 
1360 	if (netif_running(dev))
1361 		return -EBUSY;
1362 
1363 	if (!is_valid_ether_addr(addr->sa_data))
1364 		return -EINVAL;
1365 
1366 	eth_hw_addr_set(dev, addr->sa_data);
1367 
1368 	spin_lock_irq(&bp->lock);
1369 
1370 	val = br32(bp, B44_RXCONFIG);
1371 	if (!(val & RXCONFIG_CAM_ABSENT))
1372 		__b44_set_mac_addr(bp);
1373 
1374 	spin_unlock_irq(&bp->lock);
1375 
1376 	return 0;
1377 }
1378 
1379 /* Called at device open time to get the chip ready for
1380  * packet processing.  Invoked with bp->lock held.
1381  */
1382 static void __b44_set_rx_mode(struct net_device *);
1383 static void b44_init_hw(struct b44 *bp, int reset_kind)
1384 {
1385 	u32 val;
1386 
1387 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1388 	if (reset_kind == B44_FULL_RESET) {
1389 		b44_phy_reset(bp);
1390 		b44_setup_phy(bp);
1391 	}
1392 
1393 	/* Enable CRC32, set proper LED modes and power on PHY */
1394 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1395 	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1396 
1397 	/* This sets the MAC address too.  */
1398 	__b44_set_rx_mode(bp->dev);
1399 
1400 	/* MTU + eth header + possible VLAN tag + struct rx_header */
1401 	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1402 	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1403 
1404 	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1405 	if (reset_kind == B44_PARTIAL_RESET) {
1406 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1407 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1408 	} else {
1409 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1410 		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1411 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1412 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1413 		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1414 
1415 		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1416 		bp->rx_prod = bp->rx_pending;
1417 
1418 		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1419 	}
1420 
1421 	val = br32(bp, B44_ENET_CTRL);
1422 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1423 
1424 	netdev_reset_queue(bp->dev);
1425 }
1426 
1427 static int b44_open(struct net_device *dev)
1428 {
1429 	struct b44 *bp = netdev_priv(dev);
1430 	int err;
1431 
1432 	err = b44_alloc_consistent(bp, GFP_KERNEL);
1433 	if (err)
1434 		goto out;
1435 
1436 	napi_enable(&bp->napi);
1437 
1438 	b44_init_rings(bp);
1439 	b44_init_hw(bp, B44_FULL_RESET);
1440 
1441 	b44_check_phy(bp);
1442 
1443 	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1444 	if (unlikely(err < 0)) {
1445 		napi_disable(&bp->napi);
1446 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1447 		b44_free_rings(bp);
1448 		b44_free_consistent(bp);
1449 		goto out;
1450 	}
1451 
1452 	timer_setup(&bp->timer, b44_timer, 0);
1453 	bp->timer.expires = jiffies + HZ;
1454 	add_timer(&bp->timer);
1455 
1456 	b44_enable_ints(bp);
1457 
1458 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1459 		phy_start(dev->phydev);
1460 
1461 	netif_start_queue(dev);
1462 out:
1463 	return err;
1464 }
1465 
1466 #ifdef CONFIG_NET_POLL_CONTROLLER
1467 /*
1468  * Polling receive - used by netconsole and other diagnostic tools
1469  * to allow network i/o with interrupts disabled.
1470  */
1471 static void b44_poll_controller(struct net_device *dev)
1472 {
1473 	disable_irq(dev->irq);
1474 	b44_interrupt(dev->irq, dev);
1475 	enable_irq(dev->irq);
1476 }
1477 #endif
1478 
1479 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1480 {
1481 	u32 i;
1482 	u32 *pattern = (u32 *) pp;
1483 
1484 	for (i = 0; i < bytes; i += sizeof(u32)) {
1485 		bw32(bp, B44_FILT_ADDR, table_offset + i);
1486 		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1487 	}
1488 }
1489 
1490 static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
1491 			     int offset)
1492 {
1493 	int magicsync = 6;
1494 	int k, j, len = offset;
1495 	int ethaddr_bytes = ETH_ALEN;
1496 
1497 	memset(ppattern + offset, 0xff, magicsync);
1498 	for (j = 0; j < magicsync; j++) {
1499 		pmask[len >> 3] |= BIT(len & 7);
1500 		len++;
1501 	}
1502 
1503 	for (j = 0; j < B44_MAX_PATTERNS; j++) {
1504 		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1505 			ethaddr_bytes = ETH_ALEN;
1506 		else
1507 			ethaddr_bytes = B44_PATTERN_SIZE - len;
1508 		if (ethaddr_bytes <=0)
1509 			break;
1510 		for (k = 0; k< ethaddr_bytes; k++) {
1511 			ppattern[offset + magicsync +
1512 				(j * ETH_ALEN) + k] = macaddr[k];
1513 			pmask[len >> 3] |= BIT(len & 7);
1514 			len++;
1515 		}
1516 	}
1517 	return len - 1;
1518 }
1519 
1520 /* Setup magic packet patterns in the b44 WOL
1521  * pattern matching filter.
1522  */
1523 static void b44_setup_pseudo_magicp(struct b44 *bp)
1524 {
1525 
1526 	u32 val;
1527 	int plen0, plen1, plen2;
1528 	u8 *pwol_pattern;
1529 	u8 pwol_mask[B44_PMASK_SIZE];
1530 
1531 	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1532 	if (!pwol_pattern)
1533 		return;
1534 
1535 	/* Ipv4 magic packet pattern - pattern 0.*/
1536 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1537 	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1538 				  B44_ETHIPV4UDP_HLEN);
1539 
1540 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1541 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1542 
1543 	/* Raw ethernet II magic packet pattern - pattern 1 */
1544 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1545 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1546 	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1547 				  ETH_HLEN);
1548 
1549 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1550 		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1551 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1552 		       B44_PMASK_BASE + B44_PMASK_SIZE);
1553 
1554 	/* Ipv6 magic packet pattern - pattern 2 */
1555 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1556 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1557 	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1558 				  B44_ETHIPV6UDP_HLEN);
1559 
1560 	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1561 		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1562 	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1563 		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1564 
1565 	kfree(pwol_pattern);
1566 
1567 	/* set these pattern's lengths: one less than each real length */
1568 	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1569 	bw32(bp, B44_WKUP_LEN, val);
1570 
1571 	/* enable wakeup pattern matching */
1572 	val = br32(bp, B44_DEVCTRL);
1573 	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1574 
1575 }
1576 
1577 #ifdef CONFIG_B44_PCI
1578 static void b44_setup_wol_pci(struct b44 *bp)
1579 {
1580 	u16 val;
1581 
1582 	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1583 		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1584 		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1585 		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1586 	}
1587 }
1588 #else
1589 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1590 #endif /* CONFIG_B44_PCI */
1591 
1592 static void b44_setup_wol(struct b44 *bp)
1593 {
1594 	u32 val;
1595 
1596 	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1597 
1598 	if (bp->flags & B44_FLAG_B0_ANDLATER) {
1599 
1600 		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1601 
1602 		val = bp->dev->dev_addr[2] << 24 |
1603 			bp->dev->dev_addr[3] << 16 |
1604 			bp->dev->dev_addr[4] << 8 |
1605 			bp->dev->dev_addr[5];
1606 		bw32(bp, B44_ADDR_LO, val);
1607 
1608 		val = bp->dev->dev_addr[0] << 8 |
1609 			bp->dev->dev_addr[1];
1610 		bw32(bp, B44_ADDR_HI, val);
1611 
1612 		val = br32(bp, B44_DEVCTRL);
1613 		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1614 
1615 	} else {
1616 		b44_setup_pseudo_magicp(bp);
1617 	}
1618 	b44_setup_wol_pci(bp);
1619 }
1620 
1621 static int b44_close(struct net_device *dev)
1622 {
1623 	struct b44 *bp = netdev_priv(dev);
1624 
1625 	netif_stop_queue(dev);
1626 
1627 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1628 		phy_stop(dev->phydev);
1629 
1630 	napi_disable(&bp->napi);
1631 
1632 	timer_delete_sync(&bp->timer);
1633 
1634 	spin_lock_irq(&bp->lock);
1635 
1636 	b44_halt(bp);
1637 	b44_free_rings(bp);
1638 	netif_carrier_off(dev);
1639 
1640 	spin_unlock_irq(&bp->lock);
1641 
1642 	free_irq(dev->irq, dev);
1643 
1644 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
1645 		b44_init_hw(bp, B44_PARTIAL_RESET);
1646 		b44_setup_wol(bp);
1647 	}
1648 
1649 	b44_free_consistent(bp);
1650 
1651 	return 0;
1652 }
1653 
1654 static void b44_get_stats64(struct net_device *dev,
1655 			    struct rtnl_link_stats64 *nstat)
1656 {
1657 	struct b44 *bp = netdev_priv(dev);
1658 	struct b44_hw_stats *hwstat = &bp->hw_stats;
1659 	unsigned int start;
1660 
1661 	do {
1662 		start = u64_stats_fetch_begin(&hwstat->syncp);
1663 
1664 		/* Convert HW stats into rtnl_link_stats64 stats. */
1665 		nstat->rx_packets = hwstat->rx_pkts;
1666 		nstat->tx_packets = hwstat->tx_pkts;
1667 		nstat->rx_bytes   = hwstat->rx_octets;
1668 		nstat->tx_bytes   = hwstat->tx_octets;
1669 		nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1670 				     hwstat->tx_oversize_pkts +
1671 				     hwstat->tx_underruns +
1672 				     hwstat->tx_excessive_cols +
1673 				     hwstat->tx_late_cols);
1674 		nstat->multicast  = hwstat->rx_multicast_pkts;
1675 		nstat->collisions = hwstat->tx_total_cols;
1676 
1677 		nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1678 					   hwstat->rx_undersize);
1679 		nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1680 		nstat->rx_frame_errors  = hwstat->rx_align_errs;
1681 		nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1682 		nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1683 					   hwstat->rx_oversize_pkts +
1684 					   hwstat->rx_missed_pkts +
1685 					   hwstat->rx_crc_align_errs +
1686 					   hwstat->rx_undersize +
1687 					   hwstat->rx_crc_errs +
1688 					   hwstat->rx_align_errs +
1689 					   hwstat->rx_symbol_errs);
1690 
1691 		nstat->tx_aborted_errors = hwstat->tx_underruns;
1692 #if 0
1693 		/* Carrier lost counter seems to be broken for some devices */
1694 		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1695 #endif
1696 	} while (u64_stats_fetch_retry(&hwstat->syncp, start));
1697 
1698 }
1699 
1700 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1701 {
1702 	struct netdev_hw_addr *ha;
1703 	int i, num_ents;
1704 
1705 	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1706 	i = 0;
1707 	netdev_for_each_mc_addr(ha, dev) {
1708 		if (i == num_ents)
1709 			break;
1710 		__b44_cam_write(bp, ha->addr, i++ + 1);
1711 	}
1712 	return i+1;
1713 }
1714 
1715 static void __b44_set_rx_mode(struct net_device *dev)
1716 {
1717 	struct b44 *bp = netdev_priv(dev);
1718 	u32 val;
1719 
1720 	val = br32(bp, B44_RXCONFIG);
1721 	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1722 	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1723 		val |= RXCONFIG_PROMISC;
1724 		bw32(bp, B44_RXCONFIG, val);
1725 	} else {
1726 		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1727 		int i = 1;
1728 
1729 		__b44_set_mac_addr(bp);
1730 
1731 		if ((dev->flags & IFF_ALLMULTI) ||
1732 		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1733 			val |= RXCONFIG_ALLMULTI;
1734 		else
1735 			i = __b44_load_mcast(bp, dev);
1736 
1737 		for (; i < 64; i++)
1738 			__b44_cam_write(bp, zero, i);
1739 
1740 		bw32(bp, B44_RXCONFIG, val);
1741 		val = br32(bp, B44_CAM_CTRL);
1742 	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1743 	}
1744 }
1745 
1746 static void b44_set_rx_mode(struct net_device *dev)
1747 {
1748 	struct b44 *bp = netdev_priv(dev);
1749 
1750 	spin_lock_irq(&bp->lock);
1751 	__b44_set_rx_mode(dev);
1752 	spin_unlock_irq(&bp->lock);
1753 }
1754 
1755 static u32 b44_get_msglevel(struct net_device *dev)
1756 {
1757 	struct b44 *bp = netdev_priv(dev);
1758 	return bp->msg_enable;
1759 }
1760 
1761 static void b44_set_msglevel(struct net_device *dev, u32 value)
1762 {
1763 	struct b44 *bp = netdev_priv(dev);
1764 	bp->msg_enable = value;
1765 }
1766 
1767 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1768 {
1769 	struct b44 *bp = netdev_priv(dev);
1770 	struct ssb_bus *bus = bp->sdev->bus;
1771 
1772 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1773 	switch (bus->bustype) {
1774 	case SSB_BUSTYPE_PCI:
1775 		strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1776 		break;
1777 	case SSB_BUSTYPE_SSB:
1778 		strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
1779 		break;
1780 	case SSB_BUSTYPE_PCMCIA:
1781 	case SSB_BUSTYPE_SDIO:
1782 		WARN_ON(1); /* A device with this bus does not exist. */
1783 		break;
1784 	}
1785 }
1786 
1787 static int b44_nway_reset(struct net_device *dev)
1788 {
1789 	struct b44 *bp = netdev_priv(dev);
1790 	u32 bmcr;
1791 	int r;
1792 
1793 	spin_lock_irq(&bp->lock);
1794 	b44_readphy(bp, MII_BMCR, &bmcr);
1795 	b44_readphy(bp, MII_BMCR, &bmcr);
1796 	r = -EINVAL;
1797 	if (bmcr & BMCR_ANENABLE)
1798 		r = b44_writephy(bp, MII_BMCR,
1799 				 bmcr | BMCR_ANRESTART);
1800 	spin_unlock_irq(&bp->lock);
1801 
1802 	return r;
1803 }
1804 
1805 static int b44_get_link_ksettings(struct net_device *dev,
1806 				  struct ethtool_link_ksettings *cmd)
1807 {
1808 	struct b44 *bp = netdev_priv(dev);
1809 	u32 supported, advertising;
1810 
1811 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1812 		BUG_ON(!dev->phydev);
1813 		phy_ethtool_ksettings_get(dev->phydev, cmd);
1814 
1815 		return 0;
1816 	}
1817 
1818 	supported = (SUPPORTED_Autoneg);
1819 	supported |= (SUPPORTED_100baseT_Half |
1820 		      SUPPORTED_100baseT_Full |
1821 		      SUPPORTED_10baseT_Half |
1822 		      SUPPORTED_10baseT_Full |
1823 		      SUPPORTED_MII);
1824 
1825 	advertising = 0;
1826 	if (bp->flags & B44_FLAG_ADV_10HALF)
1827 		advertising |= ADVERTISED_10baseT_Half;
1828 	if (bp->flags & B44_FLAG_ADV_10FULL)
1829 		advertising |= ADVERTISED_10baseT_Full;
1830 	if (bp->flags & B44_FLAG_ADV_100HALF)
1831 		advertising |= ADVERTISED_100baseT_Half;
1832 	if (bp->flags & B44_FLAG_ADV_100FULL)
1833 		advertising |= ADVERTISED_100baseT_Full;
1834 	advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1835 	cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1836 		SPEED_100 : SPEED_10;
1837 	cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1838 		DUPLEX_FULL : DUPLEX_HALF;
1839 	cmd->base.port = 0;
1840 	cmd->base.phy_address = bp->phy_addr;
1841 	cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1842 		AUTONEG_DISABLE : AUTONEG_ENABLE;
1843 	if (cmd->base.autoneg == AUTONEG_ENABLE)
1844 		advertising |= ADVERTISED_Autoneg;
1845 
1846 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1847 						supported);
1848 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1849 						advertising);
1850 
1851 	if (!netif_running(dev)){
1852 		cmd->base.speed = 0;
1853 		cmd->base.duplex = 0xff;
1854 	}
1855 
1856 	return 0;
1857 }
1858 
1859 static int b44_set_link_ksettings(struct net_device *dev,
1860 				  const struct ethtool_link_ksettings *cmd)
1861 {
1862 	struct b44 *bp = netdev_priv(dev);
1863 	u32 speed;
1864 	int ret;
1865 	u32 advertising;
1866 
1867 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1868 		BUG_ON(!dev->phydev);
1869 		spin_lock_irq(&bp->lock);
1870 		if (netif_running(dev))
1871 			b44_setup_phy(bp);
1872 
1873 		ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1874 
1875 		spin_unlock_irq(&bp->lock);
1876 
1877 		return ret;
1878 	}
1879 
1880 	speed = cmd->base.speed;
1881 
1882 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1883 						cmd->link_modes.advertising);
1884 
1885 	/* We do not support gigabit. */
1886 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1887 		if (advertising &
1888 		    (ADVERTISED_1000baseT_Half |
1889 		     ADVERTISED_1000baseT_Full))
1890 			return -EINVAL;
1891 	} else if ((speed != SPEED_100 &&
1892 		    speed != SPEED_10) ||
1893 		   (cmd->base.duplex != DUPLEX_HALF &&
1894 		    cmd->base.duplex != DUPLEX_FULL)) {
1895 			return -EINVAL;
1896 	}
1897 
1898 	spin_lock_irq(&bp->lock);
1899 
1900 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1901 		bp->flags &= ~(B44_FLAG_FORCE_LINK |
1902 			       B44_FLAG_100_BASE_T |
1903 			       B44_FLAG_FULL_DUPLEX |
1904 			       B44_FLAG_ADV_10HALF |
1905 			       B44_FLAG_ADV_10FULL |
1906 			       B44_FLAG_ADV_100HALF |
1907 			       B44_FLAG_ADV_100FULL);
1908 		if (advertising == 0) {
1909 			bp->flags |= (B44_FLAG_ADV_10HALF |
1910 				      B44_FLAG_ADV_10FULL |
1911 				      B44_FLAG_ADV_100HALF |
1912 				      B44_FLAG_ADV_100FULL);
1913 		} else {
1914 			if (advertising & ADVERTISED_10baseT_Half)
1915 				bp->flags |= B44_FLAG_ADV_10HALF;
1916 			if (advertising & ADVERTISED_10baseT_Full)
1917 				bp->flags |= B44_FLAG_ADV_10FULL;
1918 			if (advertising & ADVERTISED_100baseT_Half)
1919 				bp->flags |= B44_FLAG_ADV_100HALF;
1920 			if (advertising & ADVERTISED_100baseT_Full)
1921 				bp->flags |= B44_FLAG_ADV_100FULL;
1922 		}
1923 	} else {
1924 		bp->flags |= B44_FLAG_FORCE_LINK;
1925 		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1926 		if (speed == SPEED_100)
1927 			bp->flags |= B44_FLAG_100_BASE_T;
1928 		if (cmd->base.duplex == DUPLEX_FULL)
1929 			bp->flags |= B44_FLAG_FULL_DUPLEX;
1930 	}
1931 
1932 	if (netif_running(dev))
1933 		b44_setup_phy(bp);
1934 
1935 	spin_unlock_irq(&bp->lock);
1936 
1937 	return 0;
1938 }
1939 
1940 static void b44_get_ringparam(struct net_device *dev,
1941 			      struct ethtool_ringparam *ering,
1942 			      struct kernel_ethtool_ringparam *kernel_ering,
1943 			      struct netlink_ext_ack *extack)
1944 {
1945 	struct b44 *bp = netdev_priv(dev);
1946 
1947 	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1948 	ering->rx_pending = bp->rx_pending;
1949 
1950 	/* XXX ethtool lacks a tx_max_pending, oops... */
1951 }
1952 
1953 static int b44_set_ringparam(struct net_device *dev,
1954 			     struct ethtool_ringparam *ering,
1955 			     struct kernel_ethtool_ringparam *kernel_ering,
1956 			     struct netlink_ext_ack *extack)
1957 {
1958 	struct b44 *bp = netdev_priv(dev);
1959 
1960 	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1961 	    (ering->rx_mini_pending != 0) ||
1962 	    (ering->rx_jumbo_pending != 0) ||
1963 	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
1964 		return -EINVAL;
1965 
1966 	spin_lock_irq(&bp->lock);
1967 
1968 	bp->rx_pending = ering->rx_pending;
1969 	bp->tx_pending = ering->tx_pending;
1970 
1971 	b44_halt(bp);
1972 	b44_init_rings(bp);
1973 	b44_init_hw(bp, B44_FULL_RESET);
1974 	netif_wake_queue(bp->dev);
1975 	spin_unlock_irq(&bp->lock);
1976 
1977 	b44_enable_ints(bp);
1978 
1979 	return 0;
1980 }
1981 
1982 static void b44_get_pauseparam(struct net_device *dev,
1983 				struct ethtool_pauseparam *epause)
1984 {
1985 	struct b44 *bp = netdev_priv(dev);
1986 
1987 	epause->autoneg =
1988 		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1989 	epause->rx_pause =
1990 		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
1991 	epause->tx_pause =
1992 		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
1993 }
1994 
1995 static int b44_set_pauseparam(struct net_device *dev,
1996 				struct ethtool_pauseparam *epause)
1997 {
1998 	struct b44 *bp = netdev_priv(dev);
1999 
2000 	spin_lock_irq(&bp->lock);
2001 	if (epause->autoneg)
2002 		bp->flags |= B44_FLAG_PAUSE_AUTO;
2003 	else
2004 		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2005 	if (epause->rx_pause)
2006 		bp->flags |= B44_FLAG_RX_PAUSE;
2007 	else
2008 		bp->flags &= ~B44_FLAG_RX_PAUSE;
2009 	if (epause->tx_pause)
2010 		bp->flags |= B44_FLAG_TX_PAUSE;
2011 	else
2012 		bp->flags &= ~B44_FLAG_TX_PAUSE;
2013 	if (netif_running(dev)) {
2014 		if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2015 			b44_halt(bp);
2016 			b44_init_rings(bp);
2017 			b44_init_hw(bp, B44_FULL_RESET);
2018 		} else {
2019 			__b44_set_flow_ctrl(bp, bp->flags);
2020 		}
2021 	}
2022 	spin_unlock_irq(&bp->lock);
2023 
2024 	b44_enable_ints(bp);
2025 
2026 	return 0;
2027 }
2028 
2029 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2030 {
2031 	switch(stringset) {
2032 	case ETH_SS_STATS:
2033 		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2034 		break;
2035 	}
2036 }
2037 
2038 static int b44_get_sset_count(struct net_device *dev, int sset)
2039 {
2040 	switch (sset) {
2041 	case ETH_SS_STATS:
2042 		return ARRAY_SIZE(b44_gstrings);
2043 	default:
2044 		return -EOPNOTSUPP;
2045 	}
2046 }
2047 
2048 static void b44_get_ethtool_stats(struct net_device *dev,
2049 				  struct ethtool_stats *stats, u64 *data)
2050 {
2051 	struct b44 *bp = netdev_priv(dev);
2052 	struct b44_hw_stats *hwstat = &bp->hw_stats;
2053 	u64 *data_src, *data_dst;
2054 	unsigned int start;
2055 	u32 i;
2056 
2057 	spin_lock_irq(&bp->lock);
2058 	b44_stats_update(bp);
2059 	spin_unlock_irq(&bp->lock);
2060 
2061 	do {
2062 		data_src = &hwstat->tx_good_octets;
2063 		data_dst = data;
2064 		start = u64_stats_fetch_begin(&hwstat->syncp);
2065 
2066 		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2067 			*data_dst++ = *data_src++;
2068 
2069 	} while (u64_stats_fetch_retry(&hwstat->syncp, start));
2070 }
2071 
2072 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2073 {
2074 	struct b44 *bp = netdev_priv(dev);
2075 
2076 	wol->supported = WAKE_MAGIC;
2077 	if (bp->flags & B44_FLAG_WOL_ENABLE)
2078 		wol->wolopts = WAKE_MAGIC;
2079 	else
2080 		wol->wolopts = 0;
2081 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2082 }
2083 
2084 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2085 {
2086 	struct b44 *bp = netdev_priv(dev);
2087 
2088 	spin_lock_irq(&bp->lock);
2089 	if (wol->wolopts & WAKE_MAGIC)
2090 		bp->flags |= B44_FLAG_WOL_ENABLE;
2091 	else
2092 		bp->flags &= ~B44_FLAG_WOL_ENABLE;
2093 	spin_unlock_irq(&bp->lock);
2094 
2095 	device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2096 	return 0;
2097 }
2098 
2099 static const struct ethtool_ops b44_ethtool_ops = {
2100 	.get_drvinfo		= b44_get_drvinfo,
2101 	.nway_reset		= b44_nway_reset,
2102 	.get_link		= ethtool_op_get_link,
2103 	.get_wol		= b44_get_wol,
2104 	.set_wol		= b44_set_wol,
2105 	.get_ringparam		= b44_get_ringparam,
2106 	.set_ringparam		= b44_set_ringparam,
2107 	.get_pauseparam		= b44_get_pauseparam,
2108 	.set_pauseparam		= b44_set_pauseparam,
2109 	.get_msglevel		= b44_get_msglevel,
2110 	.set_msglevel		= b44_set_msglevel,
2111 	.get_strings		= b44_get_strings,
2112 	.get_sset_count		= b44_get_sset_count,
2113 	.get_ethtool_stats	= b44_get_ethtool_stats,
2114 	.get_link_ksettings	= b44_get_link_ksettings,
2115 	.set_link_ksettings	= b44_set_link_ksettings,
2116 };
2117 
2118 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2119 {
2120 	struct b44 *bp = netdev_priv(dev);
2121 	int err = -EINVAL;
2122 
2123 	if (!netif_running(dev))
2124 		goto out;
2125 
2126 	spin_lock_irq(&bp->lock);
2127 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2128 		BUG_ON(!dev->phydev);
2129 		err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2130 	} else {
2131 		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2132 	}
2133 	spin_unlock_irq(&bp->lock);
2134 out:
2135 	return err;
2136 }
2137 
2138 static int b44_get_invariants(struct b44 *bp)
2139 {
2140 	struct ssb_device *sdev = bp->sdev;
2141 	int err = 0;
2142 	u8 *addr;
2143 
2144 	bp->dma_offset = ssb_dma_translation(sdev);
2145 
2146 	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2147 	    instance > 1) {
2148 		addr = sdev->bus->sprom.et1mac;
2149 		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2150 	} else {
2151 		addr = sdev->bus->sprom.et0mac;
2152 		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2153 	}
2154 	/* Some ROMs have buggy PHY addresses with the high
2155 	 * bits set (sign extension?). Truncate them to a
2156 	 * valid PHY address. */
2157 	bp->phy_addr &= 0x1F;
2158 
2159 	eth_hw_addr_set(bp->dev, addr);
2160 
2161 	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2162 		pr_err("Invalid MAC address found in EEPROM\n");
2163 		return -EINVAL;
2164 	}
2165 
2166 	bp->imask = IMASK_DEF;
2167 
2168 	/* XXX - really required?
2169 	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
2170 	*/
2171 
2172 	if (bp->sdev->id.revision >= 7)
2173 		bp->flags |= B44_FLAG_B0_ANDLATER;
2174 
2175 	return err;
2176 }
2177 
2178 static const struct net_device_ops b44_netdev_ops = {
2179 	.ndo_open		= b44_open,
2180 	.ndo_stop		= b44_close,
2181 	.ndo_start_xmit		= b44_start_xmit,
2182 	.ndo_get_stats64	= b44_get_stats64,
2183 	.ndo_set_rx_mode	= b44_set_rx_mode,
2184 	.ndo_set_mac_address	= b44_set_mac_addr,
2185 	.ndo_validate_addr	= eth_validate_addr,
2186 	.ndo_eth_ioctl		= b44_ioctl,
2187 	.ndo_tx_timeout		= b44_tx_timeout,
2188 	.ndo_change_mtu		= b44_change_mtu,
2189 #ifdef CONFIG_NET_POLL_CONTROLLER
2190 	.ndo_poll_controller	= b44_poll_controller,
2191 #endif
2192 };
2193 
2194 static void b44_adjust_link(struct net_device *dev)
2195 {
2196 	struct b44 *bp = netdev_priv(dev);
2197 	struct phy_device *phydev = dev->phydev;
2198 	bool status_changed = false;
2199 
2200 	BUG_ON(!phydev);
2201 
2202 	if (bp->old_link != phydev->link) {
2203 		status_changed = true;
2204 		bp->old_link = phydev->link;
2205 	}
2206 
2207 	/* reflect duplex change */
2208 	if (phydev->link) {
2209 		if ((phydev->duplex == DUPLEX_HALF) &&
2210 		    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2211 			status_changed = true;
2212 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2213 		} else if ((phydev->duplex == DUPLEX_FULL) &&
2214 			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2215 			status_changed = true;
2216 			bp->flags |= B44_FLAG_FULL_DUPLEX;
2217 		}
2218 	}
2219 
2220 	if (status_changed) {
2221 		u32 val = br32(bp, B44_TX_CTRL);
2222 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
2223 			val |= TX_CTRL_DUPLEX;
2224 		else
2225 			val &= ~TX_CTRL_DUPLEX;
2226 		bw32(bp, B44_TX_CTRL, val);
2227 		phy_print_status(phydev);
2228 	}
2229 }
2230 
2231 static int b44_register_phy_one(struct b44 *bp)
2232 {
2233 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2234 	struct mii_bus *mii_bus;
2235 	struct ssb_device *sdev = bp->sdev;
2236 	struct phy_device *phydev;
2237 	struct ssb_sprom *sprom = &sdev->bus->sprom;
2238 	int err;
2239 
2240 	mii_bus = mdiobus_alloc();
2241 	if (!mii_bus) {
2242 		dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2243 		err = -ENOMEM;
2244 		goto err_out;
2245 	}
2246 
2247 	mii_bus->priv = bp;
2248 	mii_bus->read = b44_mdio_read_phylib;
2249 	mii_bus->write = b44_mdio_write_phylib;
2250 	mii_bus->name = "b44_eth_mii";
2251 	mii_bus->parent = sdev->dev;
2252 	mii_bus->phy_mask = ~(1 << bp->phy_addr);
2253 	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2254 
2255 	bp->mii_bus = mii_bus;
2256 
2257 	err = mdiobus_register(mii_bus);
2258 	if (err) {
2259 		dev_err(sdev->dev, "failed to register MII bus\n");
2260 		goto err_out_mdiobus;
2261 	}
2262 
2263 	phydev = mdiobus_get_phy(bp->mii_bus, bp->phy_addr);
2264 	if (!phydev &&
2265 	    sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM)) {
2266 		dev_info(sdev->dev,
2267 			 "could not find PHY at %i, use fixed one\n",
2268 			 bp->phy_addr);
2269 
2270 		phydev = fixed_phy_register_100fd();
2271 		if (!IS_ERR(phydev))
2272 			bp->phy_addr = phydev->mdio.addr;
2273 	}
2274 
2275 	if (IS_ERR_OR_NULL(phydev))
2276 		err = -ENODEV;
2277 	else
2278 		err = phy_connect_direct(bp->dev, phydev, &b44_adjust_link,
2279 					 PHY_INTERFACE_MODE_MII);
2280 	if (err) {
2281 		dev_err(sdev->dev, "could not attach PHY at %i\n",
2282 			bp->phy_addr);
2283 		goto err_out_mdiobus_unregister;
2284 	}
2285 
2286 	/* mask with MAC supported features */
2287 	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2288 	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2289 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2290 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2291 	linkmode_and(phydev->supported, phydev->supported, mask);
2292 	linkmode_copy(phydev->advertising, phydev->supported);
2293 
2294 	bp->old_link = 0;
2295 
2296 	phy_attached_info(phydev);
2297 
2298 	return 0;
2299 
2300 err_out_mdiobus_unregister:
2301 	mdiobus_unregister(mii_bus);
2302 
2303 err_out_mdiobus:
2304 	mdiobus_free(mii_bus);
2305 
2306 err_out:
2307 	return err;
2308 }
2309 
2310 static void b44_unregister_phy_one(struct b44 *bp)
2311 {
2312 	struct mii_bus *mii_bus = bp->mii_bus;
2313 	struct net_device *dev = bp->dev;
2314 	struct phy_device *phydev;
2315 
2316 	phydev = dev->phydev;
2317 
2318 	phy_disconnect(phydev);
2319 	if (phy_is_pseudo_fixed_link(phydev))
2320 		fixed_phy_unregister(phydev);
2321 	mdiobus_unregister(mii_bus);
2322 	mdiobus_free(mii_bus);
2323 }
2324 
2325 static int b44_init_one(struct ssb_device *sdev,
2326 			const struct ssb_device_id *ent)
2327 {
2328 	struct net_device *dev;
2329 	struct b44 *bp;
2330 	int err;
2331 
2332 	instance++;
2333 
2334 	dev = alloc_etherdev(sizeof(*bp));
2335 	if (!dev) {
2336 		err = -ENOMEM;
2337 		goto out;
2338 	}
2339 
2340 	SET_NETDEV_DEV(dev, sdev->dev);
2341 
2342 	/* No interesting netdevice features in this card... */
2343 	dev->features |= 0;
2344 
2345 	bp = netdev_priv(dev);
2346 	bp->sdev = sdev;
2347 	bp->dev = dev;
2348 	bp->force_copybreak = 0;
2349 
2350 	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2351 
2352 	spin_lock_init(&bp->lock);
2353 	u64_stats_init(&bp->hw_stats.syncp);
2354 
2355 	bp->rx_pending = B44_DEF_RX_RING_PENDING;
2356 	bp->tx_pending = B44_DEF_TX_RING_PENDING;
2357 
2358 	dev->netdev_ops = &b44_netdev_ops;
2359 	netif_napi_add(dev, &bp->napi, b44_poll);
2360 	dev->watchdog_timeo = B44_TX_TIMEOUT;
2361 	dev->min_mtu = B44_MIN_MTU;
2362 	dev->max_mtu = B44_MAX_MTU;
2363 	dev->irq = sdev->irq;
2364 	dev->ethtool_ops = &b44_ethtool_ops;
2365 
2366 	err = ssb_bus_powerup(sdev->bus, 0);
2367 	if (err) {
2368 		dev_err(sdev->dev,
2369 			"Failed to powerup the bus\n");
2370 		goto err_out_free_dev;
2371 	}
2372 
2373 	err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
2374 	if (err) {
2375 		dev_err(sdev->dev,
2376 			"Required 30BIT DMA mask unsupported by the system\n");
2377 		goto err_out_powerdown;
2378 	}
2379 
2380 	err = b44_get_invariants(bp);
2381 	if (err) {
2382 		dev_err(sdev->dev,
2383 			"Problem fetching invariants of chip, aborting\n");
2384 		goto err_out_powerdown;
2385 	}
2386 
2387 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2388 		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2389 		err = -ENODEV;
2390 		goto err_out_powerdown;
2391 	}
2392 
2393 	bp->mii_if.dev = dev;
2394 	bp->mii_if.mdio_read = b44_mdio_read_mii;
2395 	bp->mii_if.mdio_write = b44_mdio_write_mii;
2396 	bp->mii_if.phy_id = bp->phy_addr;
2397 	bp->mii_if.phy_id_mask = 0x1f;
2398 	bp->mii_if.reg_num_mask = 0x1f;
2399 
2400 	/* By default, advertise all speed/duplex settings. */
2401 	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2402 		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2403 
2404 	/* By default, auto-negotiate PAUSE. */
2405 	bp->flags |= B44_FLAG_PAUSE_AUTO;
2406 
2407 	err = register_netdev(dev);
2408 	if (err) {
2409 		dev_err(sdev->dev, "Cannot register net device, aborting\n");
2410 		goto err_out_powerdown;
2411 	}
2412 
2413 	netif_carrier_off(dev);
2414 
2415 	ssb_set_drvdata(sdev, dev);
2416 
2417 	/* Chip reset provides power to the b44 MAC & PCI cores, which
2418 	 * is necessary for MAC register access.
2419 	 */
2420 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2421 
2422 	/* do a phy reset to test if there is an active phy */
2423 	err = b44_phy_reset(bp);
2424 	if (err < 0) {
2425 		dev_err(sdev->dev, "phy reset failed\n");
2426 		goto err_out_unregister_netdev;
2427 	}
2428 
2429 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2430 		err = b44_register_phy_one(bp);
2431 		if (err) {
2432 			dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2433 			goto err_out_unregister_netdev;
2434 		}
2435 	}
2436 
2437 	device_set_wakeup_capable(sdev->dev, true);
2438 	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2439 
2440 	return 0;
2441 
2442 err_out_unregister_netdev:
2443 	unregister_netdev(dev);
2444 err_out_powerdown:
2445 	ssb_bus_may_powerdown(sdev->bus);
2446 
2447 err_out_free_dev:
2448 	netif_napi_del(&bp->napi);
2449 	free_netdev(dev);
2450 
2451 out:
2452 	return err;
2453 }
2454 
2455 static void b44_remove_one(struct ssb_device *sdev)
2456 {
2457 	struct net_device *dev = ssb_get_drvdata(sdev);
2458 	struct b44 *bp = netdev_priv(dev);
2459 
2460 	unregister_netdev(dev);
2461 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2462 		b44_unregister_phy_one(bp);
2463 	ssb_device_disable(sdev, 0);
2464 	ssb_bus_may_powerdown(sdev->bus);
2465 	netif_napi_del(&bp->napi);
2466 	free_netdev(dev);
2467 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2468 	ssb_set_drvdata(sdev, NULL);
2469 }
2470 
2471 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2472 {
2473 	struct net_device *dev = ssb_get_drvdata(sdev);
2474 	struct b44 *bp = netdev_priv(dev);
2475 
2476 	if (!netif_running(dev))
2477 		return 0;
2478 
2479 	timer_delete_sync(&bp->timer);
2480 
2481 	spin_lock_irq(&bp->lock);
2482 
2483 	b44_halt(bp);
2484 	netif_carrier_off(bp->dev);
2485 	netif_device_detach(bp->dev);
2486 	b44_free_rings(bp);
2487 
2488 	spin_unlock_irq(&bp->lock);
2489 
2490 	free_irq(dev->irq, dev);
2491 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
2492 		b44_init_hw(bp, B44_PARTIAL_RESET);
2493 		b44_setup_wol(bp);
2494 	}
2495 
2496 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2497 	return 0;
2498 }
2499 
2500 static int b44_resume(struct ssb_device *sdev)
2501 {
2502 	struct net_device *dev = ssb_get_drvdata(sdev);
2503 	struct b44 *bp = netdev_priv(dev);
2504 	int rc = 0;
2505 
2506 	rc = ssb_bus_powerup(sdev->bus, 0);
2507 	if (rc) {
2508 		dev_err(sdev->dev,
2509 			"Failed to powerup the bus\n");
2510 		return rc;
2511 	}
2512 
2513 	if (!netif_running(dev))
2514 		return 0;
2515 
2516 	spin_lock_irq(&bp->lock);
2517 	b44_init_rings(bp);
2518 	b44_init_hw(bp, B44_FULL_RESET);
2519 	spin_unlock_irq(&bp->lock);
2520 
2521 	/*
2522 	 * As a shared interrupt, the handler can be called immediately. To be
2523 	 * able to check the interrupt status the hardware must already be
2524 	 * powered back on (b44_init_hw).
2525 	 */
2526 	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2527 	if (rc) {
2528 		netdev_err(dev, "request_irq failed\n");
2529 		spin_lock_irq(&bp->lock);
2530 		b44_halt(bp);
2531 		b44_free_rings(bp);
2532 		spin_unlock_irq(&bp->lock);
2533 		return rc;
2534 	}
2535 
2536 	netif_device_attach(bp->dev);
2537 
2538 	b44_enable_ints(bp);
2539 	netif_wake_queue(dev);
2540 
2541 	mod_timer(&bp->timer, jiffies + 1);
2542 
2543 	return 0;
2544 }
2545 
2546 static struct ssb_driver b44_ssb_driver = {
2547 	.name		= DRV_MODULE_NAME,
2548 	.id_table	= b44_ssb_tbl,
2549 	.probe		= b44_init_one,
2550 	.remove		= b44_remove_one,
2551 	.suspend	= b44_suspend,
2552 	.resume		= b44_resume,
2553 };
2554 
2555 static inline int __init b44_pci_init(void)
2556 {
2557 	int err = 0;
2558 #ifdef CONFIG_B44_PCI
2559 	err = ssb_pcihost_register(&b44_pci_driver);
2560 #endif
2561 	return err;
2562 }
2563 
2564 static inline void b44_pci_exit(void)
2565 {
2566 #ifdef CONFIG_B44_PCI
2567 	ssb_pcihost_unregister(&b44_pci_driver);
2568 #endif
2569 }
2570 
2571 static int __init b44_init(void)
2572 {
2573 	unsigned int dma_desc_align_size = dma_get_cache_alignment();
2574 	int err;
2575 
2576 	/* Setup parameters for syncing RX/TX DMA descriptors */
2577 	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2578 
2579 	err = b44_pci_init();
2580 	if (err)
2581 		return err;
2582 	err = ssb_driver_register(&b44_ssb_driver);
2583 	if (err)
2584 		b44_pci_exit();
2585 	return err;
2586 }
2587 
2588 static void __exit b44_cleanup(void)
2589 {
2590 	ssb_driver_unregister(&b44_ssb_driver);
2591 	b44_pci_exit();
2592 }
2593 
2594 module_init(b44_init);
2595 module_exit(b44_cleanup);
2596 
2597