1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9 * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
10 *
11 * Distribute under GPL.
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/netdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/mii.h>
23 #include <linux/if_ether.h>
24 #include <linux/if_vlan.h>
25 #include <linux/etherdevice.h>
26 #include <linux/pci.h>
27 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/ssb/ssb.h>
32 #include <linux/slab.h>
33 #include <linux/phy.h>
34 #include <linux/phy_fixed.h>
35
36 #include <linux/uaccess.h>
37 #include <asm/io.h>
38 #include <asm/irq.h>
39
40
41 #include "b44.h"
42
43 #define DRV_MODULE_NAME "b44"
44 #define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
45
46 #define B44_DEF_MSG_ENABLE \
47 (NETIF_MSG_DRV | \
48 NETIF_MSG_PROBE | \
49 NETIF_MSG_LINK | \
50 NETIF_MSG_TIMER | \
51 NETIF_MSG_IFDOWN | \
52 NETIF_MSG_IFUP | \
53 NETIF_MSG_RX_ERR | \
54 NETIF_MSG_TX_ERR)
55
56 /* length of time before we decide the hardware is borked,
57 * and dev->tx_timeout() should be called to fix the problem
58 */
59 #define B44_TX_TIMEOUT (5 * HZ)
60
61 /* hardware minimum and maximum for a single frame's data payload */
62 #define B44_MIN_MTU ETH_ZLEN
63 #define B44_MAX_MTU ETH_DATA_LEN
64
65 #define B44_RX_RING_SIZE 512
66 #define B44_DEF_RX_RING_PENDING 200
67 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
68 B44_RX_RING_SIZE)
69 #define B44_TX_RING_SIZE 512
70 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
71 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
72 B44_TX_RING_SIZE)
73
74 #define TX_RING_GAP(BP) \
75 (B44_TX_RING_SIZE - (BP)->tx_pending)
76 #define TX_BUFFS_AVAIL(BP) \
77 (((BP)->tx_cons <= (BP)->tx_prod) ? \
78 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
79 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
80 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
81
82 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
83 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
84
85 /* minimum number of free TX descriptors required to wake up TX process */
86 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
87
88 /* b44 internal pattern match filter info */
89 #define B44_PATTERN_BASE 0x400
90 #define B44_PATTERN_SIZE 0x80
91 #define B44_PMASK_BASE 0x600
92 #define B44_PMASK_SIZE 0x10
93 #define B44_MAX_PATTERNS 16
94 #define B44_ETHIPV6UDP_HLEN 62
95 #define B44_ETHIPV4UDP_HLEN 42
96
97 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
98 MODULE_DESCRIPTION(DRV_DESCRIPTION);
99 MODULE_LICENSE("GPL");
100
101 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
102 module_param(b44_debug, int, 0);
103 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
104
105
106 #ifdef CONFIG_B44_PCI
107 static const struct pci_device_id b44_pci_tbl[] = {
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
110 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
111 { 0 } /* terminate list with empty entry */
112 };
113 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
114
115 static struct pci_driver b44_pci_driver = {
116 .name = DRV_MODULE_NAME,
117 .id_table = b44_pci_tbl,
118 };
119 #endif /* CONFIG_B44_PCI */
120
121 static const struct ssb_device_id b44_ssb_tbl[] = {
122 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
123 {},
124 };
125 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
126
127 static void b44_halt(struct b44 *);
128 static void b44_init_rings(struct b44 *);
129
130 #define B44_FULL_RESET 1
131 #define B44_FULL_RESET_SKIP_PHY 2
132 #define B44_PARTIAL_RESET 3
133 #define B44_CHIP_RESET_FULL 4
134 #define B44_CHIP_RESET_PARTIAL 5
135
136 static void b44_init_hw(struct b44 *, int);
137
138 static int dma_desc_sync_size;
139 static int instance;
140
141 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
142 #define _B44(x...) # x,
143 B44_STAT_REG_DECLARE
144 #undef _B44
145 };
146
b44_sync_dma_desc_for_device(struct ssb_device * sdev,dma_addr_t dma_base,unsigned long offset,enum dma_data_direction dir)147 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148 dma_addr_t dma_base,
149 unsigned long offset,
150 enum dma_data_direction dir)
151 {
152 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
153 dma_desc_sync_size, dir);
154 }
155
b44_sync_dma_desc_for_cpu(struct ssb_device * sdev,dma_addr_t dma_base,unsigned long offset,enum dma_data_direction dir)156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157 dma_addr_t dma_base,
158 unsigned long offset,
159 enum dma_data_direction dir)
160 {
161 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
162 dma_desc_sync_size, dir);
163 }
164
br32(const struct b44 * bp,unsigned long reg)165 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
166 {
167 return ssb_read32(bp->sdev, reg);
168 }
169
bw32(const struct b44 * bp,unsigned long reg,unsigned long val)170 static inline void bw32(const struct b44 *bp,
171 unsigned long reg, unsigned long val)
172 {
173 ssb_write32(bp->sdev, reg, val);
174 }
175
b44_wait_bit(struct b44 * bp,unsigned long reg,u32 bit,unsigned long timeout,const int clear)176 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
177 u32 bit, unsigned long timeout, const int clear)
178 {
179 unsigned long i;
180
181 for (i = 0; i < timeout; i++) {
182 u32 val = br32(bp, reg);
183
184 if (clear && !(val & bit))
185 break;
186 if (!clear && (val & bit))
187 break;
188 udelay(10);
189 }
190 if (i == timeout) {
191 if (net_ratelimit())
192 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
193 bit, reg, clear ? "clear" : "set");
194
195 return -ENODEV;
196 }
197 return 0;
198 }
199
__b44_cam_write(struct b44 * bp,const unsigned char * data,int index)200 static inline void __b44_cam_write(struct b44 *bp,
201 const unsigned char *data, int index)
202 {
203 u32 val;
204
205 val = ((u32) data[2]) << 24;
206 val |= ((u32) data[3]) << 16;
207 val |= ((u32) data[4]) << 8;
208 val |= ((u32) data[5]) << 0;
209 bw32(bp, B44_CAM_DATA_LO, val);
210 val = (CAM_DATA_HI_VALID |
211 (((u32) data[0]) << 8) |
212 (((u32) data[1]) << 0));
213 bw32(bp, B44_CAM_DATA_HI, val);
214 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
215 (index << CAM_CTRL_INDEX_SHIFT)));
216 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
217 }
218
__b44_disable_ints(struct b44 * bp)219 static inline void __b44_disable_ints(struct b44 *bp)
220 {
221 bw32(bp, B44_IMASK, 0);
222 }
223
b44_disable_ints(struct b44 * bp)224 static void b44_disable_ints(struct b44 *bp)
225 {
226 __b44_disable_ints(bp);
227
228 /* Flush posted writes. */
229 br32(bp, B44_IMASK);
230 }
231
b44_enable_ints(struct b44 * bp)232 static void b44_enable_ints(struct b44 *bp)
233 {
234 bw32(bp, B44_IMASK, bp->imask);
235 }
236
__b44_readphy(struct b44 * bp,int phy_addr,int reg,u32 * val)237 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
238 {
239 int err;
240
241 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
242 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
243 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
244 (phy_addr << MDIO_DATA_PMD_SHIFT) |
245 (reg << MDIO_DATA_RA_SHIFT) |
246 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
247 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
248 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
249
250 return err;
251 }
252
__b44_writephy(struct b44 * bp,int phy_addr,int reg,u32 val)253 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
254 {
255 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
256 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
257 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
258 (phy_addr << MDIO_DATA_PMD_SHIFT) |
259 (reg << MDIO_DATA_RA_SHIFT) |
260 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
261 (val & MDIO_DATA_DATA)));
262 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
263 }
264
b44_readphy(struct b44 * bp,int reg,u32 * val)265 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
266 {
267 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
268 return 0;
269
270 return __b44_readphy(bp, bp->phy_addr, reg, val);
271 }
272
b44_writephy(struct b44 * bp,int reg,u32 val)273 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
274 {
275 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
276 return 0;
277
278 return __b44_writephy(bp, bp->phy_addr, reg, val);
279 }
280
281 /* miilib interface */
b44_mdio_read_mii(struct net_device * dev,int phy_id,int location)282 static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
283 {
284 u32 val;
285 struct b44 *bp = netdev_priv(dev);
286 int rc = __b44_readphy(bp, phy_id, location, &val);
287 if (rc)
288 return 0xffffffff;
289 return val;
290 }
291
b44_mdio_write_mii(struct net_device * dev,int phy_id,int location,int val)292 static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
293 int val)
294 {
295 struct b44 *bp = netdev_priv(dev);
296 __b44_writephy(bp, phy_id, location, val);
297 }
298
b44_mdio_read_phylib(struct mii_bus * bus,int phy_id,int location)299 static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
300 {
301 u32 val;
302 struct b44 *bp = bus->priv;
303 int rc = __b44_readphy(bp, phy_id, location, &val);
304 if (rc)
305 return 0xffffffff;
306 return val;
307 }
308
b44_mdio_write_phylib(struct mii_bus * bus,int phy_id,int location,u16 val)309 static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
310 u16 val)
311 {
312 struct b44 *bp = bus->priv;
313 return __b44_writephy(bp, phy_id, location, val);
314 }
315
b44_phy_reset(struct b44 * bp)316 static int b44_phy_reset(struct b44 *bp)
317 {
318 u32 val;
319 int err;
320
321 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
322 return 0;
323 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
324 if (err)
325 return err;
326 udelay(100);
327 err = b44_readphy(bp, MII_BMCR, &val);
328 if (!err) {
329 if (val & BMCR_RESET) {
330 netdev_err(bp->dev, "PHY Reset would not complete\n");
331 err = -ENODEV;
332 }
333 }
334
335 return err;
336 }
337
__b44_set_flow_ctrl(struct b44 * bp,u32 pause_flags)338 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
339 {
340 u32 val;
341
342 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
343 bp->flags |= pause_flags;
344
345 val = br32(bp, B44_RXCONFIG);
346 if (pause_flags & B44_FLAG_RX_PAUSE)
347 val |= RXCONFIG_FLOW;
348 else
349 val &= ~RXCONFIG_FLOW;
350 bw32(bp, B44_RXCONFIG, val);
351
352 val = br32(bp, B44_MAC_FLOW);
353 if (pause_flags & B44_FLAG_TX_PAUSE)
354 val |= (MAC_FLOW_PAUSE_ENAB |
355 (0xc0 & MAC_FLOW_RX_HI_WATER));
356 else
357 val &= ~MAC_FLOW_PAUSE_ENAB;
358 bw32(bp, B44_MAC_FLOW, val);
359 }
360
b44_set_flow_ctrl(struct b44 * bp,u32 local,u32 remote)361 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
362 {
363 u32 pause_enab = 0;
364
365 /* The driver supports only rx pause by default because
366 the b44 mac tx pause mechanism generates excessive
367 pause frames.
368 Use ethtool to turn on b44 tx pause if necessary.
369 */
370 if ((local & ADVERTISE_PAUSE_CAP) &&
371 (local & ADVERTISE_PAUSE_ASYM)){
372 if ((remote & LPA_PAUSE_ASYM) &&
373 !(remote & LPA_PAUSE_CAP))
374 pause_enab |= B44_FLAG_RX_PAUSE;
375 }
376
377 __b44_set_flow_ctrl(bp, pause_enab);
378 }
379
380 #ifdef CONFIG_BCM47XX
381 #include <linux/bcm47xx_nvram.h>
b44_wap54g10_workaround(struct b44 * bp)382 static void b44_wap54g10_workaround(struct b44 *bp)
383 {
384 char buf[20];
385 u32 val;
386 int err;
387
388 /*
389 * workaround for bad hardware design in Linksys WAP54G v1.0
390 * see https://dev.openwrt.org/ticket/146
391 * check and reset bit "isolate"
392 */
393 if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
394 return;
395 if (simple_strtoul(buf, NULL, 0) == 2) {
396 err = __b44_readphy(bp, 0, MII_BMCR, &val);
397 if (err)
398 goto error;
399 if (!(val & BMCR_ISOLATE))
400 return;
401 val &= ~BMCR_ISOLATE;
402 err = __b44_writephy(bp, 0, MII_BMCR, val);
403 if (err)
404 goto error;
405 }
406 return;
407 error:
408 pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
409 }
410 #else
b44_wap54g10_workaround(struct b44 * bp)411 static inline void b44_wap54g10_workaround(struct b44 *bp)
412 {
413 }
414 #endif
415
b44_setup_phy(struct b44 * bp)416 static int b44_setup_phy(struct b44 *bp)
417 {
418 u32 val;
419 int err;
420
421 b44_wap54g10_workaround(bp);
422
423 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
424 return 0;
425 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
426 goto out;
427 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
428 val & MII_ALEDCTRL_ALLMSK)) != 0)
429 goto out;
430 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
431 goto out;
432 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
433 val | MII_TLEDCTRL_ENABLE)) != 0)
434 goto out;
435
436 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
437 u32 adv = ADVERTISE_CSMA;
438
439 if (bp->flags & B44_FLAG_ADV_10HALF)
440 adv |= ADVERTISE_10HALF;
441 if (bp->flags & B44_FLAG_ADV_10FULL)
442 adv |= ADVERTISE_10FULL;
443 if (bp->flags & B44_FLAG_ADV_100HALF)
444 adv |= ADVERTISE_100HALF;
445 if (bp->flags & B44_FLAG_ADV_100FULL)
446 adv |= ADVERTISE_100FULL;
447
448 if (bp->flags & B44_FLAG_PAUSE_AUTO)
449 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
450
451 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
452 goto out;
453 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
454 BMCR_ANRESTART))) != 0)
455 goto out;
456 } else {
457 u32 bmcr;
458
459 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
460 goto out;
461 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
462 if (bp->flags & B44_FLAG_100_BASE_T)
463 bmcr |= BMCR_SPEED100;
464 if (bp->flags & B44_FLAG_FULL_DUPLEX)
465 bmcr |= BMCR_FULLDPLX;
466 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
467 goto out;
468
469 /* Since we will not be negotiating there is no safe way
470 * to determine if the link partner supports flow control
471 * or not. So just disable it completely in this case.
472 */
473 b44_set_flow_ctrl(bp, 0, 0);
474 }
475
476 out:
477 return err;
478 }
479
b44_stats_update(struct b44 * bp)480 static void b44_stats_update(struct b44 *bp)
481 {
482 unsigned long reg;
483 u64 *val;
484
485 val = &bp->hw_stats.tx_good_octets;
486 u64_stats_update_begin(&bp->hw_stats.syncp);
487
488 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
489 *val++ += br32(bp, reg);
490 }
491
492 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
493 *val++ += br32(bp, reg);
494 }
495
496 u64_stats_update_end(&bp->hw_stats.syncp);
497 }
498
b44_link_report(struct b44 * bp)499 static void b44_link_report(struct b44 *bp)
500 {
501 if (!netif_carrier_ok(bp->dev)) {
502 netdev_info(bp->dev, "Link is down\n");
503 } else {
504 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
505 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
506 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
507
508 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
509 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
510 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
511 }
512 }
513
b44_check_phy(struct b44 * bp)514 static void b44_check_phy(struct b44 *bp)
515 {
516 u32 bmsr, aux;
517
518 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
519 bp->flags |= B44_FLAG_100_BASE_T;
520 if (!netif_carrier_ok(bp->dev)) {
521 u32 val = br32(bp, B44_TX_CTRL);
522 if (bp->flags & B44_FLAG_FULL_DUPLEX)
523 val |= TX_CTRL_DUPLEX;
524 else
525 val &= ~TX_CTRL_DUPLEX;
526 bw32(bp, B44_TX_CTRL, val);
527 netif_carrier_on(bp->dev);
528 b44_link_report(bp);
529 }
530 return;
531 }
532
533 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
534 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
535 (bmsr != 0xffff)) {
536 if (aux & MII_AUXCTRL_SPEED)
537 bp->flags |= B44_FLAG_100_BASE_T;
538 else
539 bp->flags &= ~B44_FLAG_100_BASE_T;
540 if (aux & MII_AUXCTRL_DUPLEX)
541 bp->flags |= B44_FLAG_FULL_DUPLEX;
542 else
543 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
544
545 if (!netif_carrier_ok(bp->dev) &&
546 (bmsr & BMSR_LSTATUS)) {
547 u32 val = br32(bp, B44_TX_CTRL);
548 u32 local_adv, remote_adv;
549
550 if (bp->flags & B44_FLAG_FULL_DUPLEX)
551 val |= TX_CTRL_DUPLEX;
552 else
553 val &= ~TX_CTRL_DUPLEX;
554 bw32(bp, B44_TX_CTRL, val);
555
556 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
557 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
558 !b44_readphy(bp, MII_LPA, &remote_adv))
559 b44_set_flow_ctrl(bp, local_adv, remote_adv);
560
561 /* Link now up */
562 netif_carrier_on(bp->dev);
563 b44_link_report(bp);
564 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
565 /* Link now down */
566 netif_carrier_off(bp->dev);
567 b44_link_report(bp);
568 }
569
570 if (bmsr & BMSR_RFAULT)
571 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
572 if (bmsr & BMSR_JCD)
573 netdev_warn(bp->dev, "Jabber detected in PHY\n");
574 }
575 }
576
b44_timer(struct timer_list * t)577 static void b44_timer(struct timer_list *t)
578 {
579 struct b44 *bp = timer_container_of(bp, t, timer);
580
581 spin_lock_irq(&bp->lock);
582
583 b44_check_phy(bp);
584
585 b44_stats_update(bp);
586
587 spin_unlock_irq(&bp->lock);
588
589 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
590 }
591
b44_tx(struct b44 * bp)592 static void b44_tx(struct b44 *bp)
593 {
594 u32 cur, cons;
595 unsigned bytes_compl = 0, pkts_compl = 0;
596
597 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
598 cur /= sizeof(struct dma_desc);
599
600 /* XXX needs updating when NETIF_F_SG is supported */
601 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
602 struct ring_info *rp = &bp->tx_buffers[cons];
603 struct sk_buff *skb = rp->skb;
604
605 BUG_ON(skb == NULL);
606
607 dma_unmap_single(bp->sdev->dma_dev,
608 rp->mapping,
609 skb->len,
610 DMA_TO_DEVICE);
611 rp->skb = NULL;
612
613 bytes_compl += skb->len;
614 pkts_compl++;
615
616 dev_consume_skb_irq(skb);
617 }
618
619 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
620 bp->tx_cons = cons;
621 if (netif_queue_stopped(bp->dev) &&
622 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
623 netif_wake_queue(bp->dev);
624
625 bw32(bp, B44_GPTIMER, 0);
626 }
627
628 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
629 * before the DMA address you give it. So we allocate 30 more bytes
630 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
631 * point the chip at 30 bytes past where the rx_header will go.
632 */
b44_alloc_rx_skb(struct b44 * bp,int src_idx,u32 dest_idx_unmasked)633 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
634 {
635 struct dma_desc *dp;
636 struct ring_info *src_map, *map;
637 struct rx_header *rh;
638 struct sk_buff *skb;
639 dma_addr_t mapping;
640 int dest_idx;
641 u32 ctrl;
642
643 src_map = NULL;
644 if (src_idx >= 0)
645 src_map = &bp->rx_buffers[src_idx];
646 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
647 map = &bp->rx_buffers[dest_idx];
648 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
649 if (skb == NULL)
650 return -ENOMEM;
651
652 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
653 RX_PKT_BUF_SZ,
654 DMA_FROM_DEVICE);
655
656 /* Hardware bug work-around, the chip is unable to do PCI DMA
657 to/from anything above 1GB :-( */
658 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
659 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
660 /* Sigh... */
661 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
662 dma_unmap_single(bp->sdev->dma_dev, mapping,
663 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
664 dev_kfree_skb_any(skb);
665 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
666 if (skb == NULL)
667 return -ENOMEM;
668 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
669 RX_PKT_BUF_SZ,
670 DMA_FROM_DEVICE);
671 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
672 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
673 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
674 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
675 dev_kfree_skb_any(skb);
676 return -ENOMEM;
677 }
678 bp->force_copybreak = 1;
679 }
680
681 rh = (struct rx_header *) skb->data;
682
683 rh->len = 0;
684 rh->flags = 0;
685
686 map->skb = skb;
687 map->mapping = mapping;
688
689 if (src_map != NULL)
690 src_map->skb = NULL;
691
692 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
693 if (dest_idx == (B44_RX_RING_SIZE - 1))
694 ctrl |= DESC_CTRL_EOT;
695
696 dp = &bp->rx_ring[dest_idx];
697 dp->ctrl = cpu_to_le32(ctrl);
698 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
699
700 if (bp->flags & B44_FLAG_RX_RING_HACK)
701 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
702 dest_idx * sizeof(*dp),
703 DMA_BIDIRECTIONAL);
704
705 return RX_PKT_BUF_SZ;
706 }
707
b44_recycle_rx(struct b44 * bp,int src_idx,u32 dest_idx_unmasked)708 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
709 {
710 struct dma_desc *src_desc, *dest_desc;
711 struct ring_info *src_map, *dest_map;
712 struct rx_header *rh;
713 int dest_idx;
714 __le32 ctrl;
715
716 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
717 dest_desc = &bp->rx_ring[dest_idx];
718 dest_map = &bp->rx_buffers[dest_idx];
719 src_desc = &bp->rx_ring[src_idx];
720 src_map = &bp->rx_buffers[src_idx];
721
722 dest_map->skb = src_map->skb;
723 rh = (struct rx_header *) src_map->skb->data;
724 rh->len = 0;
725 rh->flags = 0;
726 dest_map->mapping = src_map->mapping;
727
728 if (bp->flags & B44_FLAG_RX_RING_HACK)
729 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
730 src_idx * sizeof(*src_desc),
731 DMA_BIDIRECTIONAL);
732
733 ctrl = src_desc->ctrl;
734 if (dest_idx == (B44_RX_RING_SIZE - 1))
735 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
736 else
737 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
738
739 dest_desc->ctrl = ctrl;
740 dest_desc->addr = src_desc->addr;
741
742 src_map->skb = NULL;
743
744 if (bp->flags & B44_FLAG_RX_RING_HACK)
745 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
746 dest_idx * sizeof(*dest_desc),
747 DMA_BIDIRECTIONAL);
748
749 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
750 RX_PKT_BUF_SZ,
751 DMA_FROM_DEVICE);
752 }
753
b44_rx(struct b44 * bp,int budget)754 static int b44_rx(struct b44 *bp, int budget)
755 {
756 int received;
757 u32 cons, prod;
758
759 received = 0;
760 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
761 prod /= sizeof(struct dma_desc);
762 cons = bp->rx_cons;
763
764 while (cons != prod && budget > 0) {
765 struct ring_info *rp = &bp->rx_buffers[cons];
766 struct sk_buff *skb = rp->skb;
767 dma_addr_t map = rp->mapping;
768 struct rx_header *rh;
769 u16 len;
770
771 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
772 RX_PKT_BUF_SZ,
773 DMA_FROM_DEVICE);
774 rh = (struct rx_header *) skb->data;
775 len = le16_to_cpu(rh->len);
776 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
777 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
778 drop_it:
779 b44_recycle_rx(bp, cons, bp->rx_prod);
780 drop_it_no_recycle:
781 bp->dev->stats.rx_dropped++;
782 goto next_pkt;
783 }
784
785 if (len == 0) {
786 int i = 0;
787
788 do {
789 udelay(2);
790 barrier();
791 len = le16_to_cpu(rh->len);
792 } while (len == 0 && i++ < 5);
793 if (len == 0)
794 goto drop_it;
795 }
796
797 /* Omit CRC. */
798 len -= 4;
799
800 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
801 int skb_size;
802 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
803 if (skb_size < 0)
804 goto drop_it;
805 dma_unmap_single(bp->sdev->dma_dev, map,
806 skb_size, DMA_FROM_DEVICE);
807 /* Leave out rx_header */
808 skb_put(skb, len + RX_PKT_OFFSET);
809 skb_pull(skb, RX_PKT_OFFSET);
810 } else {
811 struct sk_buff *copy_skb;
812
813 b44_recycle_rx(bp, cons, bp->rx_prod);
814 copy_skb = napi_alloc_skb(&bp->napi, len);
815 if (copy_skb == NULL)
816 goto drop_it_no_recycle;
817
818 skb_put(copy_skb, len);
819 /* DMA sync done above, copy just the actual packet */
820 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
821 copy_skb->data, len);
822 skb = copy_skb;
823 }
824 skb_checksum_none_assert(skb);
825 skb->protocol = eth_type_trans(skb, bp->dev);
826 netif_receive_skb(skb);
827 received++;
828 budget--;
829 next_pkt:
830 bp->rx_prod = (bp->rx_prod + 1) &
831 (B44_RX_RING_SIZE - 1);
832 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
833 }
834
835 bp->rx_cons = cons;
836 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
837
838 return received;
839 }
840
b44_poll(struct napi_struct * napi,int budget)841 static int b44_poll(struct napi_struct *napi, int budget)
842 {
843 struct b44 *bp = container_of(napi, struct b44, napi);
844 int work_done;
845 unsigned long flags;
846
847 spin_lock_irqsave(&bp->lock, flags);
848
849 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
850 /* spin_lock(&bp->tx_lock); */
851 b44_tx(bp);
852 /* spin_unlock(&bp->tx_lock); */
853 }
854 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
855 bp->istat &= ~ISTAT_RFO;
856 b44_disable_ints(bp);
857 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
858 b44_init_rings(bp);
859 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
860 netif_wake_queue(bp->dev);
861 }
862
863 spin_unlock_irqrestore(&bp->lock, flags);
864
865 work_done = 0;
866 if (bp->istat & ISTAT_RX)
867 work_done += b44_rx(bp, budget);
868
869 if (bp->istat & ISTAT_ERRORS) {
870 spin_lock_irqsave(&bp->lock, flags);
871 b44_halt(bp);
872 b44_init_rings(bp);
873 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
874 netif_wake_queue(bp->dev);
875 spin_unlock_irqrestore(&bp->lock, flags);
876 work_done = 0;
877 }
878
879 if (work_done < budget) {
880 napi_complete_done(napi, work_done);
881 b44_enable_ints(bp);
882 }
883
884 return work_done;
885 }
886
b44_interrupt(int irq,void * dev_id)887 static irqreturn_t b44_interrupt(int irq, void *dev_id)
888 {
889 struct net_device *dev = dev_id;
890 struct b44 *bp = netdev_priv(dev);
891 u32 istat, imask;
892 int handled = 0;
893
894 spin_lock(&bp->lock);
895
896 istat = br32(bp, B44_ISTAT);
897 imask = br32(bp, B44_IMASK);
898
899 /* The interrupt mask register controls which interrupt bits
900 * will actually raise an interrupt to the CPU when set by hw/firmware,
901 * but doesn't mask off the bits.
902 */
903 istat &= imask;
904 if (istat) {
905 handled = 1;
906
907 if (unlikely(!netif_running(dev))) {
908 netdev_info(dev, "late interrupt\n");
909 goto irq_ack;
910 }
911
912 if (napi_schedule_prep(&bp->napi)) {
913 /* NOTE: These writes are posted by the readback of
914 * the ISTAT register below.
915 */
916 bp->istat = istat;
917 __b44_disable_ints(bp);
918 __napi_schedule(&bp->napi);
919 }
920
921 irq_ack:
922 bw32(bp, B44_ISTAT, istat);
923 br32(bp, B44_ISTAT);
924 }
925 spin_unlock(&bp->lock);
926 return IRQ_RETVAL(handled);
927 }
928
b44_tx_timeout(struct net_device * dev,unsigned int txqueue)929 static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
930 {
931 struct b44 *bp = netdev_priv(dev);
932
933 netdev_err(dev, "transmit timed out, resetting\n");
934
935 spin_lock_irq(&bp->lock);
936
937 b44_halt(bp);
938 b44_init_rings(bp);
939 b44_init_hw(bp, B44_FULL_RESET);
940
941 spin_unlock_irq(&bp->lock);
942
943 b44_enable_ints(bp);
944
945 netif_wake_queue(dev);
946 }
947
b44_start_xmit(struct sk_buff * skb,struct net_device * dev)948 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
949 {
950 struct b44 *bp = netdev_priv(dev);
951 int rc = NETDEV_TX_OK;
952 dma_addr_t mapping;
953 u32 len, entry, ctrl;
954 unsigned long flags;
955
956 len = skb->len;
957 spin_lock_irqsave(&bp->lock, flags);
958
959 /* This is a hard error, log it. */
960 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
961 netif_stop_queue(dev);
962 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
963 goto err_out;
964 }
965
966 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
967 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
968 struct sk_buff *bounce_skb;
969
970 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
971 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
972 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
973 DMA_TO_DEVICE);
974
975 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
976 if (!bounce_skb)
977 goto err_out;
978
979 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
980 len, DMA_TO_DEVICE);
981 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
982 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
983 dma_unmap_single(bp->sdev->dma_dev, mapping,
984 len, DMA_TO_DEVICE);
985 dev_kfree_skb_any(bounce_skb);
986 goto err_out;
987 }
988
989 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
990 dev_consume_skb_any(skb);
991 skb = bounce_skb;
992 }
993
994 entry = bp->tx_prod;
995 bp->tx_buffers[entry].skb = skb;
996 bp->tx_buffers[entry].mapping = mapping;
997
998 ctrl = (len & DESC_CTRL_LEN);
999 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1000 if (entry == (B44_TX_RING_SIZE - 1))
1001 ctrl |= DESC_CTRL_EOT;
1002
1003 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1004 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1005
1006 if (bp->flags & B44_FLAG_TX_RING_HACK)
1007 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1008 entry * sizeof(bp->tx_ring[0]),
1009 DMA_TO_DEVICE);
1010
1011 entry = NEXT_TX(entry);
1012
1013 bp->tx_prod = entry;
1014
1015 wmb();
1016
1017 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1018 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1019 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1020 if (bp->flags & B44_FLAG_REORDER_BUG)
1021 br32(bp, B44_DMATX_PTR);
1022
1023 netdev_sent_queue(dev, skb->len);
1024
1025 if (TX_BUFFS_AVAIL(bp) < 1)
1026 netif_stop_queue(dev);
1027
1028 out_unlock:
1029 spin_unlock_irqrestore(&bp->lock, flags);
1030
1031 return rc;
1032
1033 err_out:
1034 rc = NETDEV_TX_BUSY;
1035 goto out_unlock;
1036 }
1037
b44_change_mtu(struct net_device * dev,int new_mtu)1038 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1039 {
1040 struct b44 *bp = netdev_priv(dev);
1041
1042 if (!netif_running(dev)) {
1043 /* We'll just catch it later when the
1044 * device is up'd.
1045 */
1046 WRITE_ONCE(dev->mtu, new_mtu);
1047 return 0;
1048 }
1049
1050 spin_lock_irq(&bp->lock);
1051 b44_halt(bp);
1052 WRITE_ONCE(dev->mtu, new_mtu);
1053 b44_init_rings(bp);
1054 b44_init_hw(bp, B44_FULL_RESET);
1055 spin_unlock_irq(&bp->lock);
1056
1057 b44_enable_ints(bp);
1058
1059 return 0;
1060 }
1061
1062 /* Free up pending packets in all rx/tx rings.
1063 *
1064 * The chip has been shut down and the driver detached from
1065 * the networking, so no interrupts or new tx packets will
1066 * end up in the driver. bp->lock is not held and we are not
1067 * in an interrupt context and thus may sleep.
1068 */
b44_free_rings(struct b44 * bp)1069 static void b44_free_rings(struct b44 *bp)
1070 {
1071 struct ring_info *rp;
1072 int i;
1073
1074 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1075 rp = &bp->rx_buffers[i];
1076
1077 if (rp->skb == NULL)
1078 continue;
1079 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1080 DMA_FROM_DEVICE);
1081 dev_kfree_skb_any(rp->skb);
1082 rp->skb = NULL;
1083 }
1084
1085 /* XXX needs changes once NETIF_F_SG is set... */
1086 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1087 rp = &bp->tx_buffers[i];
1088
1089 if (rp->skb == NULL)
1090 continue;
1091 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1092 DMA_TO_DEVICE);
1093 dev_kfree_skb_any(rp->skb);
1094 rp->skb = NULL;
1095 }
1096 }
1097
1098 /* Initialize tx/rx rings for packet processing.
1099 *
1100 * The chip has been shut down and the driver detached from
1101 * the networking, so no interrupts or new tx packets will
1102 * end up in the driver.
1103 */
b44_init_rings(struct b44 * bp)1104 static void b44_init_rings(struct b44 *bp)
1105 {
1106 int i;
1107
1108 b44_free_rings(bp);
1109
1110 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1111 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1112
1113 if (bp->flags & B44_FLAG_RX_RING_HACK)
1114 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1115 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1116
1117 if (bp->flags & B44_FLAG_TX_RING_HACK)
1118 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1119 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1120
1121 for (i = 0; i < bp->rx_pending; i++) {
1122 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1123 break;
1124 }
1125 }
1126
1127 /*
1128 * Must not be invoked with interrupt sources disabled and
1129 * the hardware shutdown down.
1130 */
b44_free_consistent(struct b44 * bp)1131 static void b44_free_consistent(struct b44 *bp)
1132 {
1133 kfree(bp->rx_buffers);
1134 bp->rx_buffers = NULL;
1135 kfree(bp->tx_buffers);
1136 bp->tx_buffers = NULL;
1137 if (bp->rx_ring) {
1138 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1139 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1140 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1141 kfree(bp->rx_ring);
1142 } else
1143 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1144 bp->rx_ring, bp->rx_ring_dma);
1145 bp->rx_ring = NULL;
1146 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1147 }
1148 if (bp->tx_ring) {
1149 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1150 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1151 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1152 kfree(bp->tx_ring);
1153 } else
1154 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1155 bp->tx_ring, bp->tx_ring_dma);
1156 bp->tx_ring = NULL;
1157 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1158 }
1159 }
1160
1161 /*
1162 * Must not be invoked with interrupt sources disabled and
1163 * the hardware shutdown down. Can sleep.
1164 */
b44_alloc_consistent(struct b44 * bp,gfp_t gfp)1165 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1166 {
1167 int size;
1168
1169 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1170 bp->rx_buffers = kzalloc(size, gfp);
1171 if (!bp->rx_buffers)
1172 goto out_err;
1173
1174 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1175 bp->tx_buffers = kzalloc(size, gfp);
1176 if (!bp->tx_buffers)
1177 goto out_err;
1178
1179 size = DMA_TABLE_BYTES;
1180 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1181 &bp->rx_ring_dma, gfp);
1182 if (!bp->rx_ring) {
1183 /* Allocation may have failed due to dma_alloc_coherent
1184 insisting on use of GFP_DMA, which is more restrictive
1185 than necessary... */
1186 struct dma_desc *rx_ring;
1187 dma_addr_t rx_ring_dma;
1188
1189 rx_ring = kzalloc(size, gfp);
1190 if (!rx_ring)
1191 goto out_err;
1192
1193 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1194 DMA_TABLE_BYTES,
1195 DMA_BIDIRECTIONAL);
1196
1197 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1198 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1199 kfree(rx_ring);
1200 goto out_err;
1201 }
1202
1203 bp->rx_ring = rx_ring;
1204 bp->rx_ring_dma = rx_ring_dma;
1205 bp->flags |= B44_FLAG_RX_RING_HACK;
1206 }
1207
1208 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1209 &bp->tx_ring_dma, gfp);
1210 if (!bp->tx_ring) {
1211 /* Allocation may have failed due to ssb_dma_alloc_consistent
1212 insisting on use of GFP_DMA, which is more restrictive
1213 than necessary... */
1214 struct dma_desc *tx_ring;
1215 dma_addr_t tx_ring_dma;
1216
1217 tx_ring = kzalloc(size, gfp);
1218 if (!tx_ring)
1219 goto out_err;
1220
1221 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1222 DMA_TABLE_BYTES,
1223 DMA_TO_DEVICE);
1224
1225 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1226 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1227 kfree(tx_ring);
1228 goto out_err;
1229 }
1230
1231 bp->tx_ring = tx_ring;
1232 bp->tx_ring_dma = tx_ring_dma;
1233 bp->flags |= B44_FLAG_TX_RING_HACK;
1234 }
1235
1236 return 0;
1237
1238 out_err:
1239 b44_free_consistent(bp);
1240 return -ENOMEM;
1241 }
1242
1243 /* bp->lock is held. */
b44_clear_stats(struct b44 * bp)1244 static void b44_clear_stats(struct b44 *bp)
1245 {
1246 unsigned long reg;
1247
1248 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1249 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1250 br32(bp, reg);
1251 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1252 br32(bp, reg);
1253 }
1254
1255 /* bp->lock is held. */
b44_chip_reset(struct b44 * bp,int reset_kind)1256 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1257 {
1258 struct ssb_device *sdev = bp->sdev;
1259 bool was_enabled;
1260
1261 was_enabled = ssb_device_is_enabled(bp->sdev);
1262
1263 ssb_device_enable(bp->sdev, 0);
1264 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1265
1266 if (was_enabled) {
1267 bw32(bp, B44_RCV_LAZY, 0);
1268 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1269 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1270 bw32(bp, B44_DMATX_CTRL, 0);
1271 bp->tx_prod = bp->tx_cons = 0;
1272 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1273 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1274 100, 0);
1275 }
1276 bw32(bp, B44_DMARX_CTRL, 0);
1277 bp->rx_prod = bp->rx_cons = 0;
1278 }
1279
1280 b44_clear_stats(bp);
1281
1282 /*
1283 * Don't enable PHY if we are doing a partial reset
1284 * we are probably going to power down
1285 */
1286 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1287 return;
1288
1289 switch (sdev->bus->bustype) {
1290 case SSB_BUSTYPE_SSB:
1291 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1292 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1293 B44_MDC_RATIO)
1294 & MDIO_CTRL_MAXF_MASK)));
1295 break;
1296 case SSB_BUSTYPE_PCI:
1297 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1298 (0x0d & MDIO_CTRL_MAXF_MASK)));
1299 break;
1300 case SSB_BUSTYPE_PCMCIA:
1301 case SSB_BUSTYPE_SDIO:
1302 WARN_ON(1); /* A device with this bus does not exist. */
1303 break;
1304 }
1305
1306 br32(bp, B44_MDIO_CTRL);
1307
1308 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1309 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1310 br32(bp, B44_ENET_CTRL);
1311 bp->flags |= B44_FLAG_EXTERNAL_PHY;
1312 } else {
1313 u32 val = br32(bp, B44_DEVCTRL);
1314
1315 if (val & DEVCTRL_EPR) {
1316 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1317 br32(bp, B44_DEVCTRL);
1318 udelay(100);
1319 }
1320 bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1321 }
1322 }
1323
1324 /* bp->lock is held. */
b44_halt(struct b44 * bp)1325 static void b44_halt(struct b44 *bp)
1326 {
1327 b44_disable_ints(bp);
1328 /* reset PHY */
1329 b44_phy_reset(bp);
1330 /* power down PHY */
1331 netdev_info(bp->dev, "powering down PHY\n");
1332 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1333 /* now reset the chip, but without enabling the MAC&PHY
1334 * part of it. This has to be done _after_ we shut down the PHY */
1335 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1336 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1337 else
1338 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1339 }
1340
1341 /* bp->lock is held. */
__b44_set_mac_addr(struct b44 * bp)1342 static void __b44_set_mac_addr(struct b44 *bp)
1343 {
1344 bw32(bp, B44_CAM_CTRL, 0);
1345 if (!(bp->dev->flags & IFF_PROMISC)) {
1346 u32 val;
1347
1348 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1349 val = br32(bp, B44_CAM_CTRL);
1350 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1351 }
1352 }
1353
b44_set_mac_addr(struct net_device * dev,void * p)1354 static int b44_set_mac_addr(struct net_device *dev, void *p)
1355 {
1356 struct b44 *bp = netdev_priv(dev);
1357 struct sockaddr *addr = p;
1358 u32 val;
1359
1360 if (netif_running(dev))
1361 return -EBUSY;
1362
1363 if (!is_valid_ether_addr(addr->sa_data))
1364 return -EINVAL;
1365
1366 eth_hw_addr_set(dev, addr->sa_data);
1367
1368 spin_lock_irq(&bp->lock);
1369
1370 val = br32(bp, B44_RXCONFIG);
1371 if (!(val & RXCONFIG_CAM_ABSENT))
1372 __b44_set_mac_addr(bp);
1373
1374 spin_unlock_irq(&bp->lock);
1375
1376 return 0;
1377 }
1378
1379 /* Called at device open time to get the chip ready for
1380 * packet processing. Invoked with bp->lock held.
1381 */
1382 static void __b44_set_rx_mode(struct net_device *);
b44_init_hw(struct b44 * bp,int reset_kind)1383 static void b44_init_hw(struct b44 *bp, int reset_kind)
1384 {
1385 u32 val;
1386
1387 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1388 if (reset_kind == B44_FULL_RESET) {
1389 b44_phy_reset(bp);
1390 b44_setup_phy(bp);
1391 }
1392
1393 /* Enable CRC32, set proper LED modes and power on PHY */
1394 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1395 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1396
1397 /* This sets the MAC address too. */
1398 __b44_set_rx_mode(bp->dev);
1399
1400 /* MTU + eth header + possible VLAN tag + struct rx_header */
1401 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1402 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1403
1404 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1405 if (reset_kind == B44_PARTIAL_RESET) {
1406 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1407 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1408 } else {
1409 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1410 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1411 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1412 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1413 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1414
1415 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1416 bp->rx_prod = bp->rx_pending;
1417
1418 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1419 }
1420
1421 val = br32(bp, B44_ENET_CTRL);
1422 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1423
1424 netdev_reset_queue(bp->dev);
1425 }
1426
b44_open(struct net_device * dev)1427 static int b44_open(struct net_device *dev)
1428 {
1429 struct b44 *bp = netdev_priv(dev);
1430 int err;
1431
1432 err = b44_alloc_consistent(bp, GFP_KERNEL);
1433 if (err)
1434 goto out;
1435
1436 napi_enable(&bp->napi);
1437
1438 b44_init_rings(bp);
1439 b44_init_hw(bp, B44_FULL_RESET);
1440
1441 b44_check_phy(bp);
1442
1443 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1444 if (unlikely(err < 0)) {
1445 napi_disable(&bp->napi);
1446 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1447 b44_free_rings(bp);
1448 b44_free_consistent(bp);
1449 goto out;
1450 }
1451
1452 timer_setup(&bp->timer, b44_timer, 0);
1453 bp->timer.expires = jiffies + HZ;
1454 add_timer(&bp->timer);
1455
1456 b44_enable_ints(bp);
1457
1458 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1459 phy_start(dev->phydev);
1460
1461 netif_start_queue(dev);
1462 out:
1463 return err;
1464 }
1465
1466 #ifdef CONFIG_NET_POLL_CONTROLLER
1467 /*
1468 * Polling receive - used by netconsole and other diagnostic tools
1469 * to allow network i/o with interrupts disabled.
1470 */
b44_poll_controller(struct net_device * dev)1471 static void b44_poll_controller(struct net_device *dev)
1472 {
1473 disable_irq(dev->irq);
1474 b44_interrupt(dev->irq, dev);
1475 enable_irq(dev->irq);
1476 }
1477 #endif
1478
bwfilter_table(struct b44 * bp,u8 * pp,u32 bytes,u32 table_offset)1479 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1480 {
1481 u32 i;
1482 u32 *pattern = (u32 *) pp;
1483
1484 for (i = 0; i < bytes; i += sizeof(u32)) {
1485 bw32(bp, B44_FILT_ADDR, table_offset + i);
1486 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1487 }
1488 }
1489
b44_magic_pattern(const u8 * macaddr,u8 * ppattern,u8 * pmask,int offset)1490 static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
1491 int offset)
1492 {
1493 int magicsync = 6;
1494 int k, j, len = offset;
1495 int ethaddr_bytes = ETH_ALEN;
1496
1497 memset(ppattern + offset, 0xff, magicsync);
1498 for (j = 0; j < magicsync; j++) {
1499 pmask[len >> 3] |= BIT(len & 7);
1500 len++;
1501 }
1502
1503 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1504 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1505 ethaddr_bytes = ETH_ALEN;
1506 else
1507 ethaddr_bytes = B44_PATTERN_SIZE - len;
1508 if (ethaddr_bytes <=0)
1509 break;
1510 for (k = 0; k< ethaddr_bytes; k++) {
1511 ppattern[offset + magicsync +
1512 (j * ETH_ALEN) + k] = macaddr[k];
1513 pmask[len >> 3] |= BIT(len & 7);
1514 len++;
1515 }
1516 }
1517 return len - 1;
1518 }
1519
1520 /* Setup magic packet patterns in the b44 WOL
1521 * pattern matching filter.
1522 */
b44_setup_pseudo_magicp(struct b44 * bp)1523 static void b44_setup_pseudo_magicp(struct b44 *bp)
1524 {
1525
1526 u32 val;
1527 int plen0, plen1, plen2;
1528 u8 *pwol_pattern;
1529 u8 pwol_mask[B44_PMASK_SIZE];
1530
1531 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1532 if (!pwol_pattern)
1533 return;
1534
1535 /* Ipv4 magic packet pattern - pattern 0.*/
1536 memset(pwol_mask, 0, B44_PMASK_SIZE);
1537 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1538 B44_ETHIPV4UDP_HLEN);
1539
1540 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1541 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1542
1543 /* Raw ethernet II magic packet pattern - pattern 1 */
1544 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1545 memset(pwol_mask, 0, B44_PMASK_SIZE);
1546 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1547 ETH_HLEN);
1548
1549 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1550 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1551 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1552 B44_PMASK_BASE + B44_PMASK_SIZE);
1553
1554 /* Ipv6 magic packet pattern - pattern 2 */
1555 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1556 memset(pwol_mask, 0, B44_PMASK_SIZE);
1557 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1558 B44_ETHIPV6UDP_HLEN);
1559
1560 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1561 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1562 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1563 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1564
1565 kfree(pwol_pattern);
1566
1567 /* set these pattern's lengths: one less than each real length */
1568 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1569 bw32(bp, B44_WKUP_LEN, val);
1570
1571 /* enable wakeup pattern matching */
1572 val = br32(bp, B44_DEVCTRL);
1573 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1574
1575 }
1576
1577 #ifdef CONFIG_B44_PCI
b44_setup_wol_pci(struct b44 * bp)1578 static void b44_setup_wol_pci(struct b44 *bp)
1579 {
1580 u16 val;
1581
1582 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1583 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1584 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1585 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1586 }
1587 }
1588 #else
b44_setup_wol_pci(struct b44 * bp)1589 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1590 #endif /* CONFIG_B44_PCI */
1591
b44_setup_wol(struct b44 * bp)1592 static void b44_setup_wol(struct b44 *bp)
1593 {
1594 u32 val;
1595
1596 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1597
1598 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1599
1600 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1601
1602 val = bp->dev->dev_addr[2] << 24 |
1603 bp->dev->dev_addr[3] << 16 |
1604 bp->dev->dev_addr[4] << 8 |
1605 bp->dev->dev_addr[5];
1606 bw32(bp, B44_ADDR_LO, val);
1607
1608 val = bp->dev->dev_addr[0] << 8 |
1609 bp->dev->dev_addr[1];
1610 bw32(bp, B44_ADDR_HI, val);
1611
1612 val = br32(bp, B44_DEVCTRL);
1613 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1614
1615 } else {
1616 b44_setup_pseudo_magicp(bp);
1617 }
1618 b44_setup_wol_pci(bp);
1619 }
1620
b44_close(struct net_device * dev)1621 static int b44_close(struct net_device *dev)
1622 {
1623 struct b44 *bp = netdev_priv(dev);
1624
1625 netif_stop_queue(dev);
1626
1627 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1628 phy_stop(dev->phydev);
1629
1630 napi_disable(&bp->napi);
1631
1632 timer_delete_sync(&bp->timer);
1633
1634 spin_lock_irq(&bp->lock);
1635
1636 b44_halt(bp);
1637 b44_free_rings(bp);
1638 netif_carrier_off(dev);
1639
1640 spin_unlock_irq(&bp->lock);
1641
1642 free_irq(dev->irq, dev);
1643
1644 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1645 b44_init_hw(bp, B44_PARTIAL_RESET);
1646 b44_setup_wol(bp);
1647 }
1648
1649 b44_free_consistent(bp);
1650
1651 return 0;
1652 }
1653
b44_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * nstat)1654 static void b44_get_stats64(struct net_device *dev,
1655 struct rtnl_link_stats64 *nstat)
1656 {
1657 struct b44 *bp = netdev_priv(dev);
1658 struct b44_hw_stats *hwstat = &bp->hw_stats;
1659 unsigned int start;
1660
1661 do {
1662 start = u64_stats_fetch_begin(&hwstat->syncp);
1663
1664 /* Convert HW stats into rtnl_link_stats64 stats. */
1665 nstat->rx_packets = hwstat->rx_pkts;
1666 nstat->tx_packets = hwstat->tx_pkts;
1667 nstat->rx_bytes = hwstat->rx_octets;
1668 nstat->tx_bytes = hwstat->tx_octets;
1669 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1670 hwstat->tx_oversize_pkts +
1671 hwstat->tx_underruns +
1672 hwstat->tx_excessive_cols +
1673 hwstat->tx_late_cols);
1674 nstat->multicast = hwstat->rx_multicast_pkts;
1675 nstat->collisions = hwstat->tx_total_cols;
1676
1677 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1678 hwstat->rx_undersize);
1679 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1680 nstat->rx_frame_errors = hwstat->rx_align_errs;
1681 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1682 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1683 hwstat->rx_oversize_pkts +
1684 hwstat->rx_missed_pkts +
1685 hwstat->rx_crc_align_errs +
1686 hwstat->rx_undersize +
1687 hwstat->rx_crc_errs +
1688 hwstat->rx_align_errs +
1689 hwstat->rx_symbol_errs);
1690
1691 nstat->tx_aborted_errors = hwstat->tx_underruns;
1692 #if 0
1693 /* Carrier lost counter seems to be broken for some devices */
1694 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1695 #endif
1696 } while (u64_stats_fetch_retry(&hwstat->syncp, start));
1697
1698 }
1699
__b44_load_mcast(struct b44 * bp,struct net_device * dev)1700 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1701 {
1702 struct netdev_hw_addr *ha;
1703 int i, num_ents;
1704
1705 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1706 i = 0;
1707 netdev_for_each_mc_addr(ha, dev) {
1708 if (i == num_ents)
1709 break;
1710 __b44_cam_write(bp, ha->addr, i++ + 1);
1711 }
1712 return i+1;
1713 }
1714
__b44_set_rx_mode(struct net_device * dev)1715 static void __b44_set_rx_mode(struct net_device *dev)
1716 {
1717 struct b44 *bp = netdev_priv(dev);
1718 u32 val;
1719
1720 val = br32(bp, B44_RXCONFIG);
1721 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1722 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1723 val |= RXCONFIG_PROMISC;
1724 bw32(bp, B44_RXCONFIG, val);
1725 } else {
1726 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1727 int i = 1;
1728
1729 __b44_set_mac_addr(bp);
1730
1731 if ((dev->flags & IFF_ALLMULTI) ||
1732 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1733 val |= RXCONFIG_ALLMULTI;
1734 else
1735 i = __b44_load_mcast(bp, dev);
1736
1737 for (; i < 64; i++)
1738 __b44_cam_write(bp, zero, i);
1739
1740 bw32(bp, B44_RXCONFIG, val);
1741 val = br32(bp, B44_CAM_CTRL);
1742 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1743 }
1744 }
1745
b44_set_rx_mode(struct net_device * dev)1746 static void b44_set_rx_mode(struct net_device *dev)
1747 {
1748 struct b44 *bp = netdev_priv(dev);
1749
1750 spin_lock_irq(&bp->lock);
1751 __b44_set_rx_mode(dev);
1752 spin_unlock_irq(&bp->lock);
1753 }
1754
b44_get_msglevel(struct net_device * dev)1755 static u32 b44_get_msglevel(struct net_device *dev)
1756 {
1757 struct b44 *bp = netdev_priv(dev);
1758 return bp->msg_enable;
1759 }
1760
b44_set_msglevel(struct net_device * dev,u32 value)1761 static void b44_set_msglevel(struct net_device *dev, u32 value)
1762 {
1763 struct b44 *bp = netdev_priv(dev);
1764 bp->msg_enable = value;
1765 }
1766
b44_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1767 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1768 {
1769 struct b44 *bp = netdev_priv(dev);
1770 struct ssb_bus *bus = bp->sdev->bus;
1771
1772 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1773 switch (bus->bustype) {
1774 case SSB_BUSTYPE_PCI:
1775 strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1776 break;
1777 case SSB_BUSTYPE_SSB:
1778 strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
1779 break;
1780 case SSB_BUSTYPE_PCMCIA:
1781 case SSB_BUSTYPE_SDIO:
1782 WARN_ON(1); /* A device with this bus does not exist. */
1783 break;
1784 }
1785 }
1786
b44_nway_reset(struct net_device * dev)1787 static int b44_nway_reset(struct net_device *dev)
1788 {
1789 struct b44 *bp = netdev_priv(dev);
1790 u32 bmcr;
1791 int r;
1792
1793 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1794 return phy_ethtool_nway_reset(dev);
1795
1796 spin_lock_irq(&bp->lock);
1797 b44_readphy(bp, MII_BMCR, &bmcr);
1798 b44_readphy(bp, MII_BMCR, &bmcr);
1799 r = -EINVAL;
1800 if (bmcr & BMCR_ANENABLE)
1801 r = b44_writephy(bp, MII_BMCR,
1802 bmcr | BMCR_ANRESTART);
1803 spin_unlock_irq(&bp->lock);
1804
1805 return r;
1806 }
1807
b44_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1808 static int b44_get_link_ksettings(struct net_device *dev,
1809 struct ethtool_link_ksettings *cmd)
1810 {
1811 struct b44 *bp = netdev_priv(dev);
1812 u32 supported, advertising;
1813
1814 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1815 BUG_ON(!dev->phydev);
1816 phy_ethtool_ksettings_get(dev->phydev, cmd);
1817
1818 return 0;
1819 }
1820
1821 supported = (SUPPORTED_Autoneg);
1822 supported |= (SUPPORTED_100baseT_Half |
1823 SUPPORTED_100baseT_Full |
1824 SUPPORTED_10baseT_Half |
1825 SUPPORTED_10baseT_Full |
1826 SUPPORTED_MII);
1827
1828 advertising = 0;
1829 if (bp->flags & B44_FLAG_ADV_10HALF)
1830 advertising |= ADVERTISED_10baseT_Half;
1831 if (bp->flags & B44_FLAG_ADV_10FULL)
1832 advertising |= ADVERTISED_10baseT_Full;
1833 if (bp->flags & B44_FLAG_ADV_100HALF)
1834 advertising |= ADVERTISED_100baseT_Half;
1835 if (bp->flags & B44_FLAG_ADV_100FULL)
1836 advertising |= ADVERTISED_100baseT_Full;
1837 advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1838 cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1839 SPEED_100 : SPEED_10;
1840 cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1841 DUPLEX_FULL : DUPLEX_HALF;
1842 cmd->base.port = 0;
1843 cmd->base.phy_address = bp->phy_addr;
1844 cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1845 AUTONEG_DISABLE : AUTONEG_ENABLE;
1846 if (cmd->base.autoneg == AUTONEG_ENABLE)
1847 advertising |= ADVERTISED_Autoneg;
1848
1849 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1850 supported);
1851 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1852 advertising);
1853
1854 if (!netif_running(dev)){
1855 cmd->base.speed = 0;
1856 cmd->base.duplex = 0xff;
1857 }
1858
1859 return 0;
1860 }
1861
b44_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1862 static int b44_set_link_ksettings(struct net_device *dev,
1863 const struct ethtool_link_ksettings *cmd)
1864 {
1865 struct b44 *bp = netdev_priv(dev);
1866 u32 speed;
1867 int ret;
1868 u32 advertising;
1869
1870 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1871 BUG_ON(!dev->phydev);
1872 spin_lock_irq(&bp->lock);
1873 if (netif_running(dev))
1874 b44_setup_phy(bp);
1875
1876 ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1877
1878 spin_unlock_irq(&bp->lock);
1879
1880 return ret;
1881 }
1882
1883 speed = cmd->base.speed;
1884
1885 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1886 cmd->link_modes.advertising);
1887
1888 /* We do not support gigabit. */
1889 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1890 if (advertising &
1891 (ADVERTISED_1000baseT_Half |
1892 ADVERTISED_1000baseT_Full))
1893 return -EINVAL;
1894 } else if ((speed != SPEED_100 &&
1895 speed != SPEED_10) ||
1896 (cmd->base.duplex != DUPLEX_HALF &&
1897 cmd->base.duplex != DUPLEX_FULL)) {
1898 return -EINVAL;
1899 }
1900
1901 spin_lock_irq(&bp->lock);
1902
1903 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1904 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1905 B44_FLAG_100_BASE_T |
1906 B44_FLAG_FULL_DUPLEX |
1907 B44_FLAG_ADV_10HALF |
1908 B44_FLAG_ADV_10FULL |
1909 B44_FLAG_ADV_100HALF |
1910 B44_FLAG_ADV_100FULL);
1911 if (advertising == 0) {
1912 bp->flags |= (B44_FLAG_ADV_10HALF |
1913 B44_FLAG_ADV_10FULL |
1914 B44_FLAG_ADV_100HALF |
1915 B44_FLAG_ADV_100FULL);
1916 } else {
1917 if (advertising & ADVERTISED_10baseT_Half)
1918 bp->flags |= B44_FLAG_ADV_10HALF;
1919 if (advertising & ADVERTISED_10baseT_Full)
1920 bp->flags |= B44_FLAG_ADV_10FULL;
1921 if (advertising & ADVERTISED_100baseT_Half)
1922 bp->flags |= B44_FLAG_ADV_100HALF;
1923 if (advertising & ADVERTISED_100baseT_Full)
1924 bp->flags |= B44_FLAG_ADV_100FULL;
1925 }
1926 } else {
1927 bp->flags |= B44_FLAG_FORCE_LINK;
1928 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1929 if (speed == SPEED_100)
1930 bp->flags |= B44_FLAG_100_BASE_T;
1931 if (cmd->base.duplex == DUPLEX_FULL)
1932 bp->flags |= B44_FLAG_FULL_DUPLEX;
1933 }
1934
1935 if (netif_running(dev))
1936 b44_setup_phy(bp);
1937
1938 spin_unlock_irq(&bp->lock);
1939
1940 return 0;
1941 }
1942
b44_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1943 static void b44_get_ringparam(struct net_device *dev,
1944 struct ethtool_ringparam *ering,
1945 struct kernel_ethtool_ringparam *kernel_ering,
1946 struct netlink_ext_ack *extack)
1947 {
1948 struct b44 *bp = netdev_priv(dev);
1949
1950 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1951 ering->rx_pending = bp->rx_pending;
1952
1953 /* XXX ethtool lacks a tx_max_pending, oops... */
1954 }
1955
b44_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1956 static int b44_set_ringparam(struct net_device *dev,
1957 struct ethtool_ringparam *ering,
1958 struct kernel_ethtool_ringparam *kernel_ering,
1959 struct netlink_ext_ack *extack)
1960 {
1961 struct b44 *bp = netdev_priv(dev);
1962
1963 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1964 (ering->rx_mini_pending != 0) ||
1965 (ering->rx_jumbo_pending != 0) ||
1966 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1967 return -EINVAL;
1968
1969 spin_lock_irq(&bp->lock);
1970
1971 bp->rx_pending = ering->rx_pending;
1972 bp->tx_pending = ering->tx_pending;
1973
1974 b44_halt(bp);
1975 b44_init_rings(bp);
1976 b44_init_hw(bp, B44_FULL_RESET);
1977 netif_wake_queue(bp->dev);
1978 spin_unlock_irq(&bp->lock);
1979
1980 b44_enable_ints(bp);
1981
1982 return 0;
1983 }
1984
b44_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1985 static void b44_get_pauseparam(struct net_device *dev,
1986 struct ethtool_pauseparam *epause)
1987 {
1988 struct b44 *bp = netdev_priv(dev);
1989
1990 epause->autoneg =
1991 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1992 epause->rx_pause =
1993 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1994 epause->tx_pause =
1995 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1996 }
1997
b44_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1998 static int b44_set_pauseparam(struct net_device *dev,
1999 struct ethtool_pauseparam *epause)
2000 {
2001 struct b44 *bp = netdev_priv(dev);
2002
2003 spin_lock_irq(&bp->lock);
2004 if (epause->autoneg)
2005 bp->flags |= B44_FLAG_PAUSE_AUTO;
2006 else
2007 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2008 if (epause->rx_pause)
2009 bp->flags |= B44_FLAG_RX_PAUSE;
2010 else
2011 bp->flags &= ~B44_FLAG_RX_PAUSE;
2012 if (epause->tx_pause)
2013 bp->flags |= B44_FLAG_TX_PAUSE;
2014 else
2015 bp->flags &= ~B44_FLAG_TX_PAUSE;
2016 if (netif_running(dev)) {
2017 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2018 b44_halt(bp);
2019 b44_init_rings(bp);
2020 b44_init_hw(bp, B44_FULL_RESET);
2021 } else {
2022 __b44_set_flow_ctrl(bp, bp->flags);
2023 }
2024 }
2025 spin_unlock_irq(&bp->lock);
2026
2027 b44_enable_ints(bp);
2028
2029 return 0;
2030 }
2031
b44_get_strings(struct net_device * dev,u32 stringset,u8 * data)2032 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2033 {
2034 switch(stringset) {
2035 case ETH_SS_STATS:
2036 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2037 break;
2038 }
2039 }
2040
b44_get_sset_count(struct net_device * dev,int sset)2041 static int b44_get_sset_count(struct net_device *dev, int sset)
2042 {
2043 switch (sset) {
2044 case ETH_SS_STATS:
2045 return ARRAY_SIZE(b44_gstrings);
2046 default:
2047 return -EOPNOTSUPP;
2048 }
2049 }
2050
b44_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2051 static void b44_get_ethtool_stats(struct net_device *dev,
2052 struct ethtool_stats *stats, u64 *data)
2053 {
2054 struct b44 *bp = netdev_priv(dev);
2055 struct b44_hw_stats *hwstat = &bp->hw_stats;
2056 u64 *data_src, *data_dst;
2057 unsigned int start;
2058 u32 i;
2059
2060 spin_lock_irq(&bp->lock);
2061 b44_stats_update(bp);
2062 spin_unlock_irq(&bp->lock);
2063
2064 do {
2065 data_src = &hwstat->tx_good_octets;
2066 data_dst = data;
2067 start = u64_stats_fetch_begin(&hwstat->syncp);
2068
2069 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2070 *data_dst++ = *data_src++;
2071
2072 } while (u64_stats_fetch_retry(&hwstat->syncp, start));
2073 }
2074
b44_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2075 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2076 {
2077 struct b44 *bp = netdev_priv(dev);
2078
2079 wol->supported = WAKE_MAGIC;
2080 if (bp->flags & B44_FLAG_WOL_ENABLE)
2081 wol->wolopts = WAKE_MAGIC;
2082 else
2083 wol->wolopts = 0;
2084 memset(&wol->sopass, 0, sizeof(wol->sopass));
2085 }
2086
b44_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2087 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2088 {
2089 struct b44 *bp = netdev_priv(dev);
2090
2091 spin_lock_irq(&bp->lock);
2092 if (wol->wolopts & WAKE_MAGIC)
2093 bp->flags |= B44_FLAG_WOL_ENABLE;
2094 else
2095 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2096 spin_unlock_irq(&bp->lock);
2097
2098 device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2099 return 0;
2100 }
2101
2102 static const struct ethtool_ops b44_ethtool_ops = {
2103 .get_drvinfo = b44_get_drvinfo,
2104 .nway_reset = b44_nway_reset,
2105 .get_link = ethtool_op_get_link,
2106 .get_wol = b44_get_wol,
2107 .set_wol = b44_set_wol,
2108 .get_ringparam = b44_get_ringparam,
2109 .set_ringparam = b44_set_ringparam,
2110 .get_pauseparam = b44_get_pauseparam,
2111 .set_pauseparam = b44_set_pauseparam,
2112 .get_msglevel = b44_get_msglevel,
2113 .set_msglevel = b44_set_msglevel,
2114 .get_strings = b44_get_strings,
2115 .get_sset_count = b44_get_sset_count,
2116 .get_ethtool_stats = b44_get_ethtool_stats,
2117 .get_link_ksettings = b44_get_link_ksettings,
2118 .set_link_ksettings = b44_set_link_ksettings,
2119 };
2120
b44_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)2121 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2122 {
2123 struct b44 *bp = netdev_priv(dev);
2124 int err = -EINVAL;
2125
2126 if (!netif_running(dev))
2127 goto out;
2128
2129 spin_lock_irq(&bp->lock);
2130 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2131 BUG_ON(!dev->phydev);
2132 err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2133 } else {
2134 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2135 }
2136 spin_unlock_irq(&bp->lock);
2137 out:
2138 return err;
2139 }
2140
b44_get_invariants(struct b44 * bp)2141 static int b44_get_invariants(struct b44 *bp)
2142 {
2143 struct ssb_device *sdev = bp->sdev;
2144 int err = 0;
2145 u8 *addr;
2146
2147 bp->dma_offset = ssb_dma_translation(sdev);
2148
2149 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2150 instance > 1) {
2151 addr = sdev->bus->sprom.et1mac;
2152 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2153 } else {
2154 addr = sdev->bus->sprom.et0mac;
2155 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2156 }
2157 /* Some ROMs have buggy PHY addresses with the high
2158 * bits set (sign extension?). Truncate them to a
2159 * valid PHY address. */
2160 bp->phy_addr &= 0x1F;
2161
2162 eth_hw_addr_set(bp->dev, addr);
2163
2164 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2165 pr_err("Invalid MAC address found in EEPROM\n");
2166 return -EINVAL;
2167 }
2168
2169 bp->imask = IMASK_DEF;
2170
2171 /* XXX - really required?
2172 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2173 */
2174
2175 if (bp->sdev->id.revision >= 7)
2176 bp->flags |= B44_FLAG_B0_ANDLATER;
2177
2178 return err;
2179 }
2180
2181 static const struct net_device_ops b44_netdev_ops = {
2182 .ndo_open = b44_open,
2183 .ndo_stop = b44_close,
2184 .ndo_start_xmit = b44_start_xmit,
2185 .ndo_get_stats64 = b44_get_stats64,
2186 .ndo_set_rx_mode = b44_set_rx_mode,
2187 .ndo_set_mac_address = b44_set_mac_addr,
2188 .ndo_validate_addr = eth_validate_addr,
2189 .ndo_eth_ioctl = b44_ioctl,
2190 .ndo_tx_timeout = b44_tx_timeout,
2191 .ndo_change_mtu = b44_change_mtu,
2192 #ifdef CONFIG_NET_POLL_CONTROLLER
2193 .ndo_poll_controller = b44_poll_controller,
2194 #endif
2195 };
2196
b44_adjust_link(struct net_device * dev)2197 static void b44_adjust_link(struct net_device *dev)
2198 {
2199 struct b44 *bp = netdev_priv(dev);
2200 struct phy_device *phydev = dev->phydev;
2201 bool status_changed = false;
2202
2203 BUG_ON(!phydev);
2204
2205 if (bp->old_link != phydev->link) {
2206 status_changed = true;
2207 bp->old_link = phydev->link;
2208 }
2209
2210 /* reflect duplex change */
2211 if (phydev->link) {
2212 if ((phydev->duplex == DUPLEX_HALF) &&
2213 (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2214 status_changed = true;
2215 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2216 } else if ((phydev->duplex == DUPLEX_FULL) &&
2217 !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2218 status_changed = true;
2219 bp->flags |= B44_FLAG_FULL_DUPLEX;
2220 }
2221 }
2222
2223 if (status_changed) {
2224 u32 val = br32(bp, B44_TX_CTRL);
2225 if (bp->flags & B44_FLAG_FULL_DUPLEX)
2226 val |= TX_CTRL_DUPLEX;
2227 else
2228 val &= ~TX_CTRL_DUPLEX;
2229 bw32(bp, B44_TX_CTRL, val);
2230 phy_print_status(phydev);
2231 }
2232 }
2233
b44_register_phy_one(struct b44 * bp)2234 static int b44_register_phy_one(struct b44 *bp)
2235 {
2236 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2237 struct mii_bus *mii_bus;
2238 struct ssb_device *sdev = bp->sdev;
2239 struct phy_device *phydev;
2240 struct ssb_sprom *sprom = &sdev->bus->sprom;
2241 int err;
2242
2243 mii_bus = mdiobus_alloc();
2244 if (!mii_bus) {
2245 dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2246 err = -ENOMEM;
2247 goto err_out;
2248 }
2249
2250 mii_bus->priv = bp;
2251 mii_bus->read = b44_mdio_read_phylib;
2252 mii_bus->write = b44_mdio_write_phylib;
2253 mii_bus->name = "b44_eth_mii";
2254 mii_bus->parent = sdev->dev;
2255 mii_bus->phy_mask = ~(1 << bp->phy_addr);
2256 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2257
2258 bp->mii_bus = mii_bus;
2259
2260 err = mdiobus_register(mii_bus);
2261 if (err) {
2262 dev_err(sdev->dev, "failed to register MII bus\n");
2263 goto err_out_mdiobus;
2264 }
2265
2266 phydev = mdiobus_get_phy(bp->mii_bus, bp->phy_addr);
2267 if (!phydev &&
2268 sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM)) {
2269 dev_info(sdev->dev,
2270 "could not find PHY at %i, use fixed one\n",
2271 bp->phy_addr);
2272
2273 phydev = fixed_phy_register_100fd();
2274 if (!IS_ERR(phydev))
2275 bp->phy_addr = phydev->mdio.addr;
2276 }
2277
2278 if (IS_ERR_OR_NULL(phydev))
2279 err = -ENODEV;
2280 else
2281 err = phy_connect_direct(bp->dev, phydev, &b44_adjust_link,
2282 PHY_INTERFACE_MODE_MII);
2283 if (err) {
2284 dev_err(sdev->dev, "could not attach PHY at %i\n",
2285 bp->phy_addr);
2286 goto err_out_mdiobus_unregister;
2287 }
2288
2289 /* mask with MAC supported features */
2290 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2291 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2292 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2293 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2294 linkmode_and(phydev->supported, phydev->supported, mask);
2295 linkmode_copy(phydev->advertising, phydev->supported);
2296
2297 bp->old_link = 0;
2298
2299 phy_attached_info(phydev);
2300
2301 return 0;
2302
2303 err_out_mdiobus_unregister:
2304 mdiobus_unregister(mii_bus);
2305
2306 err_out_mdiobus:
2307 mdiobus_free(mii_bus);
2308
2309 err_out:
2310 return err;
2311 }
2312
b44_unregister_phy_one(struct b44 * bp)2313 static void b44_unregister_phy_one(struct b44 *bp)
2314 {
2315 struct mii_bus *mii_bus = bp->mii_bus;
2316 struct net_device *dev = bp->dev;
2317 struct phy_device *phydev;
2318
2319 phydev = dev->phydev;
2320
2321 phy_disconnect(phydev);
2322 if (phy_is_pseudo_fixed_link(phydev))
2323 fixed_phy_unregister(phydev);
2324 mdiobus_unregister(mii_bus);
2325 mdiobus_free(mii_bus);
2326 }
2327
b44_init_one(struct ssb_device * sdev,const struct ssb_device_id * ent)2328 static int b44_init_one(struct ssb_device *sdev,
2329 const struct ssb_device_id *ent)
2330 {
2331 struct net_device *dev;
2332 struct b44 *bp;
2333 int err;
2334
2335 instance++;
2336
2337 dev = alloc_etherdev(sizeof(*bp));
2338 if (!dev) {
2339 err = -ENOMEM;
2340 goto out;
2341 }
2342
2343 SET_NETDEV_DEV(dev, sdev->dev);
2344
2345 /* No interesting netdevice features in this card... */
2346 dev->features |= 0;
2347
2348 bp = netdev_priv(dev);
2349 bp->sdev = sdev;
2350 bp->dev = dev;
2351 bp->force_copybreak = 0;
2352
2353 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2354
2355 spin_lock_init(&bp->lock);
2356 u64_stats_init(&bp->hw_stats.syncp);
2357
2358 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2359 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2360
2361 dev->netdev_ops = &b44_netdev_ops;
2362 netif_napi_add(dev, &bp->napi, b44_poll);
2363 dev->watchdog_timeo = B44_TX_TIMEOUT;
2364 dev->min_mtu = B44_MIN_MTU;
2365 dev->max_mtu = B44_MAX_MTU;
2366 dev->irq = sdev->irq;
2367 dev->ethtool_ops = &b44_ethtool_ops;
2368
2369 err = ssb_bus_powerup(sdev->bus, 0);
2370 if (err) {
2371 dev_err(sdev->dev,
2372 "Failed to powerup the bus\n");
2373 goto err_out_free_dev;
2374 }
2375
2376 err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
2377 if (err) {
2378 dev_err(sdev->dev,
2379 "Required 30BIT DMA mask unsupported by the system\n");
2380 goto err_out_powerdown;
2381 }
2382
2383 err = b44_get_invariants(bp);
2384 if (err) {
2385 dev_err(sdev->dev,
2386 "Problem fetching invariants of chip, aborting\n");
2387 goto err_out_powerdown;
2388 }
2389
2390 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2391 dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2392 err = -ENODEV;
2393 goto err_out_powerdown;
2394 }
2395
2396 bp->mii_if.dev = dev;
2397 bp->mii_if.mdio_read = b44_mdio_read_mii;
2398 bp->mii_if.mdio_write = b44_mdio_write_mii;
2399 bp->mii_if.phy_id = bp->phy_addr;
2400 bp->mii_if.phy_id_mask = 0x1f;
2401 bp->mii_if.reg_num_mask = 0x1f;
2402
2403 /* By default, advertise all speed/duplex settings. */
2404 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2405 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2406
2407 /* By default, auto-negotiate PAUSE. */
2408 bp->flags |= B44_FLAG_PAUSE_AUTO;
2409
2410 err = register_netdev(dev);
2411 if (err) {
2412 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2413 goto err_out_powerdown;
2414 }
2415
2416 netif_carrier_off(dev);
2417
2418 ssb_set_drvdata(sdev, dev);
2419
2420 /* Chip reset provides power to the b44 MAC & PCI cores, which
2421 * is necessary for MAC register access.
2422 */
2423 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2424
2425 /* do a phy reset to test if there is an active phy */
2426 err = b44_phy_reset(bp);
2427 if (err < 0) {
2428 dev_err(sdev->dev, "phy reset failed\n");
2429 goto err_out_unregister_netdev;
2430 }
2431
2432 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2433 err = b44_register_phy_one(bp);
2434 if (err) {
2435 dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2436 goto err_out_unregister_netdev;
2437 }
2438 }
2439
2440 device_set_wakeup_capable(sdev->dev, true);
2441 netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2442
2443 return 0;
2444
2445 err_out_unregister_netdev:
2446 unregister_netdev(dev);
2447 err_out_powerdown:
2448 ssb_bus_may_powerdown(sdev->bus);
2449
2450 err_out_free_dev:
2451 netif_napi_del(&bp->napi);
2452 free_netdev(dev);
2453
2454 out:
2455 return err;
2456 }
2457
b44_remove_one(struct ssb_device * sdev)2458 static void b44_remove_one(struct ssb_device *sdev)
2459 {
2460 struct net_device *dev = ssb_get_drvdata(sdev);
2461 struct b44 *bp = netdev_priv(dev);
2462
2463 unregister_netdev(dev);
2464 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2465 b44_unregister_phy_one(bp);
2466 ssb_device_disable(sdev, 0);
2467 ssb_bus_may_powerdown(sdev->bus);
2468 netif_napi_del(&bp->napi);
2469 free_netdev(dev);
2470 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2471 ssb_set_drvdata(sdev, NULL);
2472 }
2473
b44_suspend(struct ssb_device * sdev,pm_message_t state)2474 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2475 {
2476 struct net_device *dev = ssb_get_drvdata(sdev);
2477 struct b44 *bp = netdev_priv(dev);
2478
2479 if (!netif_running(dev))
2480 return 0;
2481
2482 timer_delete_sync(&bp->timer);
2483
2484 spin_lock_irq(&bp->lock);
2485
2486 b44_halt(bp);
2487 netif_carrier_off(bp->dev);
2488 netif_device_detach(bp->dev);
2489 b44_free_rings(bp);
2490
2491 spin_unlock_irq(&bp->lock);
2492
2493 free_irq(dev->irq, dev);
2494 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2495 b44_init_hw(bp, B44_PARTIAL_RESET);
2496 b44_setup_wol(bp);
2497 }
2498
2499 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2500 return 0;
2501 }
2502
b44_resume(struct ssb_device * sdev)2503 static int b44_resume(struct ssb_device *sdev)
2504 {
2505 struct net_device *dev = ssb_get_drvdata(sdev);
2506 struct b44 *bp = netdev_priv(dev);
2507 int rc = 0;
2508
2509 rc = ssb_bus_powerup(sdev->bus, 0);
2510 if (rc) {
2511 dev_err(sdev->dev,
2512 "Failed to powerup the bus\n");
2513 return rc;
2514 }
2515
2516 if (!netif_running(dev))
2517 return 0;
2518
2519 spin_lock_irq(&bp->lock);
2520 b44_init_rings(bp);
2521 b44_init_hw(bp, B44_FULL_RESET);
2522 spin_unlock_irq(&bp->lock);
2523
2524 /*
2525 * As a shared interrupt, the handler can be called immediately. To be
2526 * able to check the interrupt status the hardware must already be
2527 * powered back on (b44_init_hw).
2528 */
2529 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2530 if (rc) {
2531 netdev_err(dev, "request_irq failed\n");
2532 spin_lock_irq(&bp->lock);
2533 b44_halt(bp);
2534 b44_free_rings(bp);
2535 spin_unlock_irq(&bp->lock);
2536 return rc;
2537 }
2538
2539 netif_device_attach(bp->dev);
2540
2541 b44_enable_ints(bp);
2542 netif_wake_queue(dev);
2543
2544 mod_timer(&bp->timer, jiffies + 1);
2545
2546 return 0;
2547 }
2548
2549 static struct ssb_driver b44_ssb_driver = {
2550 .name = DRV_MODULE_NAME,
2551 .id_table = b44_ssb_tbl,
2552 .probe = b44_init_one,
2553 .remove = b44_remove_one,
2554 .suspend = b44_suspend,
2555 .resume = b44_resume,
2556 };
2557
b44_pci_init(void)2558 static inline int __init b44_pci_init(void)
2559 {
2560 int err = 0;
2561 #ifdef CONFIG_B44_PCI
2562 err = ssb_pcihost_register(&b44_pci_driver);
2563 #endif
2564 return err;
2565 }
2566
b44_pci_exit(void)2567 static inline void b44_pci_exit(void)
2568 {
2569 #ifdef CONFIG_B44_PCI
2570 ssb_pcihost_unregister(&b44_pci_driver);
2571 #endif
2572 }
2573
b44_init(void)2574 static int __init b44_init(void)
2575 {
2576 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2577 int err;
2578
2579 /* Setup parameters for syncing RX/TX DMA descriptors */
2580 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2581
2582 err = b44_pci_init();
2583 if (err)
2584 return err;
2585 err = ssb_driver_register(&b44_ssb_driver);
2586 if (err)
2587 b44_pci_exit();
2588 return err;
2589 }
2590
b44_cleanup(void)2591 static void __exit b44_cleanup(void)
2592 {
2593 ssb_driver_unregister(&b44_ssb_driver);
2594 b44_pci_exit();
2595 }
2596
2597 module_init(b44_init);
2598 module_exit(b44_cleanup);
2599
2600