1 /*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69 #ifdef HAVE_KERNEL_OPTION_HEADERS 70 #include "opt_device_polling.h" 71 #endif 72 73 #include <sys/param.h> 74 #include <sys/endian.h> 75 #include <sys/systm.h> 76 #include <sys/sockio.h> 77 #include <sys/mbuf.h> 78 #include <sys/malloc.h> 79 #include <sys/kernel.h> 80 #include <sys/module.h> 81 #include <sys/socket.h> 82 83 #include <net/if.h> 84 #include <net/if_arp.h> 85 #include <net/ethernet.h> 86 #include <net/if_dl.h> 87 #include <net/if_media.h> 88 89 #include <net/bpf.h> 90 91 #include <net/if_types.h> 92 #include <net/if_vlan_var.h> 93 94 #include <netinet/in_systm.h> 95 #include <netinet/in.h> 96 #include <netinet/ip.h> 97 98 #include <machine/clock.h> /* for DELAY */ 99 #include <machine/bus.h> 100 #include <machine/resource.h> 101 #include <sys/bus.h> 102 #include <sys/rman.h> 103 104 #include <dev/mii/mii.h> 105 #include <dev/mii/miivar.h> 106 #include "miidevs.h" 107 #include <dev/mii/brgphyreg.h> 108 109 #include <dev/pci/pcireg.h> 110 #include <dev/pci/pcivar.h> 111 112 #include <dev/bge/if_bgereg.h> 113 114 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 115 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 116 117 MODULE_DEPEND(bge, pci, 1, 1, 1); 118 MODULE_DEPEND(bge, ether, 1, 1, 1); 119 MODULE_DEPEND(bge, miibus, 1, 1, 1); 120 121 /* "device miibus" required. See GENERIC if you get errors here. */ 122 #include "miibus_if.h" 123 124 /* 125 * Various supported device vendors/types and their names. Note: the 126 * spec seems to indicate that the hardware still has Alteon's vendor 127 * ID burned into it, though it will always be overriden by the vendor 128 * ID in the EEPROM. Just to be safe, we cover all possibilities. 129 */ 130 #define BGE_DEVDESC_MAX 64 /* Maximum device description length */ 131 132 static struct bge_type bge_devs[] = { 133 { ALT_VENDORID, ALT_DEVICEID_BCM5700, 134 "Broadcom BCM5700 Gigabit Ethernet" }, 135 { ALT_VENDORID, ALT_DEVICEID_BCM5701, 136 "Broadcom BCM5701 Gigabit Ethernet" }, 137 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700, 138 "Broadcom BCM5700 Gigabit Ethernet" }, 139 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701, 140 "Broadcom BCM5701 Gigabit Ethernet" }, 141 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702, 142 "Broadcom BCM5702 Gigabit Ethernet" }, 143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X, 144 "Broadcom BCM5702X Gigabit Ethernet" }, 145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703, 146 "Broadcom BCM5703 Gigabit Ethernet" }, 147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X, 148 "Broadcom BCM5703X Gigabit Ethernet" }, 149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C, 150 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S, 152 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705, 154 "Broadcom BCM5705 Gigabit Ethernet" }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K, 156 "Broadcom BCM5705K Gigabit Ethernet" }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M, 158 "Broadcom BCM5705M Gigabit Ethernet" }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT, 160 "Broadcom BCM5705M Gigabit Ethernet" }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C, 162 "Broadcom BCM5714C Gigabit Ethernet" }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721, 164 "Broadcom BCM5721 Gigabit Ethernet" }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750, 166 "Broadcom BCM5750 Gigabit Ethernet" }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M, 168 "Broadcom BCM5750M Gigabit Ethernet" }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751, 170 "Broadcom BCM5751 Gigabit Ethernet" }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M, 172 "Broadcom BCM5751M Gigabit Ethernet" }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752, 174 "Broadcom BCM5752 Gigabit Ethernet" }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782, 176 "Broadcom BCM5782 Gigabit Ethernet" }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788, 178 "Broadcom BCM5788 Gigabit Ethernet" }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789, 180 "Broadcom BCM5789 Gigabit Ethernet" }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901, 182 "Broadcom BCM5901 Fast Ethernet" }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2, 184 "Broadcom BCM5901A2 Fast Ethernet" }, 185 { SK_VENDORID, SK_DEVICEID_ALTIMA, 186 "SysKonnect Gigabit Ethernet" }, 187 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000, 188 "Altima AC1000 Gigabit Ethernet" }, 189 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002, 190 "Altima AC1002 Gigabit Ethernet" }, 191 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100, 192 "Altima AC9100 Gigabit Ethernet" }, 193 { 0, 0, NULL } 194 }; 195 196 static int bge_probe (device_t); 197 static int bge_attach (device_t); 198 static int bge_detach (device_t); 199 static int bge_suspend (device_t); 200 static int bge_resume (device_t); 201 static void bge_release_resources 202 (struct bge_softc *); 203 static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 204 static int bge_dma_alloc (device_t); 205 static void bge_dma_free (struct bge_softc *); 206 207 static void bge_txeof (struct bge_softc *); 208 static void bge_rxeof (struct bge_softc *); 209 210 static void bge_tick_locked (struct bge_softc *); 211 static void bge_tick (void *); 212 static void bge_stats_update (struct bge_softc *); 213 static void bge_stats_update_regs 214 (struct bge_softc *); 215 static int bge_encap (struct bge_softc *, struct mbuf *, 216 u_int32_t *); 217 218 static void bge_intr (void *); 219 static void bge_start_locked (struct ifnet *); 220 static void bge_start (struct ifnet *); 221 static int bge_ioctl (struct ifnet *, u_long, caddr_t); 222 static void bge_init_locked (struct bge_softc *); 223 static void bge_init (void *); 224 static void bge_stop (struct bge_softc *); 225 static void bge_watchdog (struct ifnet *); 226 static void bge_shutdown (device_t); 227 static int bge_ifmedia_upd (struct ifnet *); 228 static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 229 230 static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *); 231 static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int); 232 233 static void bge_setmulti (struct bge_softc *); 234 235 static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *); 236 static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *); 237 static int bge_init_rx_ring_std (struct bge_softc *); 238 static void bge_free_rx_ring_std (struct bge_softc *); 239 static int bge_init_rx_ring_jumbo (struct bge_softc *); 240 static void bge_free_rx_ring_jumbo (struct bge_softc *); 241 static void bge_free_tx_ring (struct bge_softc *); 242 static int bge_init_tx_ring (struct bge_softc *); 243 244 static int bge_chipinit (struct bge_softc *); 245 static int bge_blockinit (struct bge_softc *); 246 247 #ifdef notdef 248 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 249 static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int); 250 static void bge_vpd_read (struct bge_softc *); 251 #endif 252 253 static u_int32_t bge_readmem_ind 254 (struct bge_softc *, int); 255 static void bge_writemem_ind (struct bge_softc *, int, int); 256 #ifdef notdef 257 static u_int32_t bge_readreg_ind 258 (struct bge_softc *, int); 259 #endif 260 static void bge_writereg_ind (struct bge_softc *, int, int); 261 262 static int bge_miibus_readreg (device_t, int, int); 263 static int bge_miibus_writereg (device_t, int, int, int); 264 static void bge_miibus_statchg (device_t); 265 #ifdef DEVICE_POLLING 266 static void bge_poll (struct ifnet *ifp, enum poll_cmd cmd, 267 int count); 268 static void bge_poll_locked (struct ifnet *ifp, enum poll_cmd cmd, 269 int count); 270 #endif 271 272 static void bge_reset (struct bge_softc *); 273 static void bge_link_upd (struct bge_softc *); 274 275 static device_method_t bge_methods[] = { 276 /* Device interface */ 277 DEVMETHOD(device_probe, bge_probe), 278 DEVMETHOD(device_attach, bge_attach), 279 DEVMETHOD(device_detach, bge_detach), 280 DEVMETHOD(device_shutdown, bge_shutdown), 281 DEVMETHOD(device_suspend, bge_suspend), 282 DEVMETHOD(device_resume, bge_resume), 283 284 /* bus interface */ 285 DEVMETHOD(bus_print_child, bus_generic_print_child), 286 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 287 288 /* MII interface */ 289 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 290 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 291 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 292 293 { 0, 0 } 294 }; 295 296 static driver_t bge_driver = { 297 "bge", 298 bge_methods, 299 sizeof(struct bge_softc) 300 }; 301 302 static devclass_t bge_devclass; 303 304 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 305 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 306 307 static int bge_fake_autoneg = 0; 308 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 309 310 static u_int32_t 311 bge_readmem_ind(sc, off) 312 struct bge_softc *sc; 313 int off; 314 { 315 device_t dev; 316 317 dev = sc->bge_dev; 318 319 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 320 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 321 } 322 323 static void 324 bge_writemem_ind(sc, off, val) 325 struct bge_softc *sc; 326 int off, val; 327 { 328 device_t dev; 329 330 dev = sc->bge_dev; 331 332 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 333 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 334 335 return; 336 } 337 338 #ifdef notdef 339 static u_int32_t 340 bge_readreg_ind(sc, off) 341 struct bge_softc *sc; 342 int off; 343 { 344 device_t dev; 345 346 dev = sc->bge_dev; 347 348 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 349 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 350 } 351 #endif 352 353 static void 354 bge_writereg_ind(sc, off, val) 355 struct bge_softc *sc; 356 int off, val; 357 { 358 device_t dev; 359 360 dev = sc->bge_dev; 361 362 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 363 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 364 365 return; 366 } 367 368 /* 369 * Map a single buffer address. 370 */ 371 372 static void 373 bge_dma_map_addr(arg, segs, nseg, error) 374 void *arg; 375 bus_dma_segment_t *segs; 376 int nseg; 377 int error; 378 { 379 struct bge_dmamap_arg *ctx; 380 381 if (error) 382 return; 383 384 ctx = arg; 385 386 if (nseg > ctx->bge_maxsegs) { 387 ctx->bge_maxsegs = 0; 388 return; 389 } 390 391 ctx->bge_busaddr = segs->ds_addr; 392 393 return; 394 } 395 396 #ifdef notdef 397 static u_int8_t 398 bge_vpd_readbyte(sc, addr) 399 struct bge_softc *sc; 400 int addr; 401 { 402 int i; 403 device_t dev; 404 u_int32_t val; 405 406 dev = sc->bge_dev; 407 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 408 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 409 DELAY(10); 410 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 411 break; 412 } 413 414 if (i == BGE_TIMEOUT) { 415 device_printf(sc->bge_dev, "VPD read timed out\n"); 416 return(0); 417 } 418 419 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 420 421 return((val >> ((addr % 4) * 8)) & 0xFF); 422 } 423 424 static void 425 bge_vpd_read_res(sc, res, addr) 426 struct bge_softc *sc; 427 struct vpd_res *res; 428 int addr; 429 { 430 int i; 431 u_int8_t *ptr; 432 433 ptr = (u_int8_t *)res; 434 for (i = 0; i < sizeof(struct vpd_res); i++) 435 ptr[i] = bge_vpd_readbyte(sc, i + addr); 436 437 return; 438 } 439 440 static void 441 bge_vpd_read(sc) 442 struct bge_softc *sc; 443 { 444 int pos = 0, i; 445 struct vpd_res res; 446 447 if (sc->bge_vpd_prodname != NULL) 448 free(sc->bge_vpd_prodname, M_DEVBUF); 449 if (sc->bge_vpd_readonly != NULL) 450 free(sc->bge_vpd_readonly, M_DEVBUF); 451 sc->bge_vpd_prodname = NULL; 452 sc->bge_vpd_readonly = NULL; 453 454 bge_vpd_read_res(sc, &res, pos); 455 456 if (res.vr_id != VPD_RES_ID) { 457 device_printf(sc->bge_dev, 458 "bad VPD resource id: expected %x got %x\n", VPD_RES_ID, 459 res.vr_id); 460 return; 461 } 462 463 pos += sizeof(res); 464 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 465 for (i = 0; i < res.vr_len; i++) 466 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 467 sc->bge_vpd_prodname[i] = '\0'; 468 pos += i; 469 470 bge_vpd_read_res(sc, &res, pos); 471 472 if (res.vr_id != VPD_RES_READ) { 473 device_printf(sc->bge_dev, 474 "bad VPD resource id: expected %x got %x\n", VPD_RES_READ, 475 res.vr_id); 476 return; 477 } 478 479 pos += sizeof(res); 480 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 481 for (i = 0; i < res.vr_len + 1; i++) 482 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 483 484 return; 485 } 486 #endif 487 488 /* 489 * Read a byte of data stored in the EEPROM at address 'addr.' The 490 * BCM570x supports both the traditional bitbang interface and an 491 * auto access interface for reading the EEPROM. We use the auto 492 * access method. 493 */ 494 static u_int8_t 495 bge_eeprom_getbyte(sc, addr, dest) 496 struct bge_softc *sc; 497 int addr; 498 u_int8_t *dest; 499 { 500 int i; 501 u_int32_t byte = 0; 502 503 /* 504 * Enable use of auto EEPROM access so we can avoid 505 * having to use the bitbang method. 506 */ 507 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 508 509 /* Reset the EEPROM, load the clock period. */ 510 CSR_WRITE_4(sc, BGE_EE_ADDR, 511 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 512 DELAY(20); 513 514 /* Issue the read EEPROM command. */ 515 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 516 517 /* Wait for completion */ 518 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 519 DELAY(10); 520 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 521 break; 522 } 523 524 if (i == BGE_TIMEOUT) { 525 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 526 return(1); 527 } 528 529 /* Get result. */ 530 byte = CSR_READ_4(sc, BGE_EE_DATA); 531 532 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 533 534 return(0); 535 } 536 537 /* 538 * Read a sequence of bytes from the EEPROM. 539 */ 540 static int 541 bge_read_eeprom(sc, dest, off, cnt) 542 struct bge_softc *sc; 543 caddr_t dest; 544 int off; 545 int cnt; 546 { 547 int err = 0, i; 548 u_int8_t byte = 0; 549 550 for (i = 0; i < cnt; i++) { 551 err = bge_eeprom_getbyte(sc, off + i, &byte); 552 if (err) 553 break; 554 *(dest + i) = byte; 555 } 556 557 return(err ? 1 : 0); 558 } 559 560 static int 561 bge_miibus_readreg(dev, phy, reg) 562 device_t dev; 563 int phy, reg; 564 { 565 struct bge_softc *sc; 566 u_int32_t val, autopoll; 567 int i; 568 569 sc = device_get_softc(dev); 570 571 /* 572 * Broadcom's own driver always assumes the internal 573 * PHY is at GMII address 1. On some chips, the PHY responds 574 * to accesses at all addresses, which could cause us to 575 * bogusly attach the PHY 32 times at probe type. Always 576 * restricting the lookup to address 1 is simpler than 577 * trying to figure out which chips revisions should be 578 * special-cased. 579 */ 580 if (phy != 1) 581 return(0); 582 583 /* Reading with autopolling on may trigger PCI errors */ 584 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 585 if (autopoll & BGE_MIMODE_AUTOPOLL) { 586 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 587 DELAY(40); 588 } 589 590 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 591 BGE_MIPHY(phy)|BGE_MIREG(reg)); 592 593 for (i = 0; i < BGE_TIMEOUT; i++) { 594 val = CSR_READ_4(sc, BGE_MI_COMM); 595 if (!(val & BGE_MICOMM_BUSY)) 596 break; 597 } 598 599 if (i == BGE_TIMEOUT) { 600 if_printf(sc->bge_ifp, "PHY read timed out\n"); 601 val = 0; 602 goto done; 603 } 604 605 val = CSR_READ_4(sc, BGE_MI_COMM); 606 607 done: 608 if (autopoll & BGE_MIMODE_AUTOPOLL) { 609 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 610 DELAY(40); 611 } 612 613 if (val & BGE_MICOMM_READFAIL) 614 return(0); 615 616 return(val & 0xFFFF); 617 } 618 619 static int 620 bge_miibus_writereg(dev, phy, reg, val) 621 device_t dev; 622 int phy, reg, val; 623 { 624 struct bge_softc *sc; 625 u_int32_t autopoll; 626 int i; 627 628 sc = device_get_softc(dev); 629 630 /* Reading with autopolling on may trigger PCI errors */ 631 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 632 if (autopoll & BGE_MIMODE_AUTOPOLL) { 633 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 634 DELAY(40); 635 } 636 637 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 638 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 639 640 for (i = 0; i < BGE_TIMEOUT; i++) { 641 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 642 break; 643 } 644 645 if (autopoll & BGE_MIMODE_AUTOPOLL) { 646 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 647 DELAY(40); 648 } 649 650 if (i == BGE_TIMEOUT) { 651 if_printf(sc->bge_ifp, "PHY read timed out\n"); 652 return(0); 653 } 654 655 return(0); 656 } 657 658 static void 659 bge_miibus_statchg(dev) 660 device_t dev; 661 { 662 struct bge_softc *sc; 663 struct mii_data *mii; 664 665 sc = device_get_softc(dev); 666 mii = device_get_softc(sc->bge_miibus); 667 668 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 669 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 670 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 671 } else { 672 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 673 } 674 675 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 676 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 677 } else { 678 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 679 } 680 681 return; 682 } 683 684 /* 685 * Intialize a standard receive ring descriptor. 686 */ 687 static int 688 bge_newbuf_std(sc, i, m) 689 struct bge_softc *sc; 690 int i; 691 struct mbuf *m; 692 { 693 struct mbuf *m_new = NULL; 694 struct bge_rx_bd *r; 695 struct bge_dmamap_arg ctx; 696 int error; 697 698 if (m == NULL) { 699 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 700 if (m_new == NULL) 701 return(ENOBUFS); 702 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 703 } else { 704 m_new = m; 705 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 706 m_new->m_data = m_new->m_ext.ext_buf; 707 } 708 709 if (!sc->bge_rx_alignment_bug) 710 m_adj(m_new, ETHER_ALIGN); 711 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 712 r = &sc->bge_ldata.bge_rx_std_ring[i]; 713 ctx.bge_maxsegs = 1; 714 ctx.sc = sc; 715 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 716 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 717 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 718 if (error || ctx.bge_maxsegs == 0) { 719 if (m == NULL) { 720 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 721 m_freem(m_new); 722 } 723 return(ENOMEM); 724 } 725 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 726 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 727 r->bge_flags = BGE_RXBDFLAG_END; 728 r->bge_len = m_new->m_len; 729 r->bge_idx = i; 730 731 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 732 sc->bge_cdata.bge_rx_std_dmamap[i], 733 BUS_DMASYNC_PREREAD); 734 735 return(0); 736 } 737 738 /* 739 * Initialize a jumbo receive ring descriptor. This allocates 740 * a jumbo buffer from the pool managed internally by the driver. 741 */ 742 static int 743 bge_newbuf_jumbo(sc, i, m) 744 struct bge_softc *sc; 745 int i; 746 struct mbuf *m; 747 { 748 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 749 struct bge_extrx_bd *r; 750 struct mbuf *m_new = NULL; 751 int nsegs; 752 int error; 753 754 if (m == NULL) { 755 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 756 if (m_new == NULL) 757 return(ENOBUFS); 758 759 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 760 if (!(m_new->m_flags & M_EXT)) { 761 m_freem(m_new); 762 return(ENOBUFS); 763 } 764 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 765 } else { 766 m_new = m; 767 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 768 m_new->m_data = m_new->m_ext.ext_buf; 769 } 770 771 if (!sc->bge_rx_alignment_bug) 772 m_adj(m_new, ETHER_ALIGN); 773 774 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 775 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 776 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 777 if (error) { 778 if (m == NULL) 779 m_freem(m_new); 780 return(error); 781 } 782 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 783 784 /* 785 * Fill in the extended RX buffer descriptor. 786 */ 787 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 788 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END; 789 r->bge_idx = i; 790 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 791 switch (nsegs) { 792 case 4: 793 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 794 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 795 r->bge_len3 = segs[3].ds_len; 796 case 3: 797 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 798 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 799 r->bge_len2 = segs[2].ds_len; 800 case 2: 801 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 802 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 803 r->bge_len1 = segs[1].ds_len; 804 case 1: 805 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 806 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 807 r->bge_len0 = segs[0].ds_len; 808 break; 809 default: 810 panic("%s: %d segments\n", __func__, nsegs); 811 } 812 813 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 814 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 815 BUS_DMASYNC_PREREAD); 816 817 return (0); 818 } 819 820 /* 821 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 822 * that's 1MB or memory, which is a lot. For now, we fill only the first 823 * 256 ring entries and hope that our CPU is fast enough to keep up with 824 * the NIC. 825 */ 826 static int 827 bge_init_rx_ring_std(sc) 828 struct bge_softc *sc; 829 { 830 int i; 831 832 for (i = 0; i < BGE_SSLOTS; i++) { 833 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 834 return(ENOBUFS); 835 }; 836 837 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 838 sc->bge_cdata.bge_rx_std_ring_map, 839 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 840 841 sc->bge_std = i - 1; 842 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 843 844 return(0); 845 } 846 847 static void 848 bge_free_rx_ring_std(sc) 849 struct bge_softc *sc; 850 { 851 int i; 852 853 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 854 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 855 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 856 sc->bge_cdata.bge_rx_std_dmamap[i], 857 BUS_DMASYNC_POSTREAD); 858 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 859 sc->bge_cdata.bge_rx_std_dmamap[i]); 860 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 861 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 862 } 863 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 864 sizeof(struct bge_rx_bd)); 865 } 866 867 return; 868 } 869 870 static int 871 bge_init_rx_ring_jumbo(sc) 872 struct bge_softc *sc; 873 { 874 struct bge_rcb *rcb; 875 int i; 876 877 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 878 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 879 return(ENOBUFS); 880 }; 881 882 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 883 sc->bge_cdata.bge_rx_jumbo_ring_map, 884 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 885 886 sc->bge_jumbo = i - 1; 887 888 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 889 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 890 BGE_RCB_FLAG_USE_EXT_RX_BD); 891 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 892 893 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 894 895 return(0); 896 } 897 898 static void 899 bge_free_rx_ring_jumbo(sc) 900 struct bge_softc *sc; 901 { 902 int i; 903 904 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 905 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 906 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 907 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 908 BUS_DMASYNC_POSTREAD); 909 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 910 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 911 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 912 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 913 } 914 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 915 sizeof(struct bge_extrx_bd)); 916 } 917 918 return; 919 } 920 921 static void 922 bge_free_tx_ring(sc) 923 struct bge_softc *sc; 924 { 925 int i; 926 927 if (sc->bge_ldata.bge_tx_ring == NULL) 928 return; 929 930 for (i = 0; i < BGE_TX_RING_CNT; i++) { 931 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 932 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 933 sc->bge_cdata.bge_tx_dmamap[i], 934 BUS_DMASYNC_POSTWRITE); 935 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 936 sc->bge_cdata.bge_tx_dmamap[i]); 937 m_freem(sc->bge_cdata.bge_tx_chain[i]); 938 sc->bge_cdata.bge_tx_chain[i] = NULL; 939 } 940 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 941 sizeof(struct bge_tx_bd)); 942 } 943 944 return; 945 } 946 947 static int 948 bge_init_tx_ring(sc) 949 struct bge_softc *sc; 950 { 951 sc->bge_txcnt = 0; 952 sc->bge_tx_saved_considx = 0; 953 954 /* Initialize transmit producer index for host-memory send ring. */ 955 sc->bge_tx_prodidx = 0; 956 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 957 958 /* 5700 b2 errata */ 959 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 960 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 961 962 /* NIC-memory send ring not used; initialize to zero. */ 963 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 964 /* 5700 b2 errata */ 965 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 966 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 967 968 return(0); 969 } 970 971 static void 972 bge_setmulti(sc) 973 struct bge_softc *sc; 974 { 975 struct ifnet *ifp; 976 struct ifmultiaddr *ifma; 977 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 978 int h, i; 979 980 BGE_LOCK_ASSERT(sc); 981 982 ifp = sc->bge_ifp; 983 984 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 985 for (i = 0; i < 4; i++) 986 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 987 return; 988 } 989 990 /* First, zot all the existing filters. */ 991 for (i = 0; i < 4; i++) 992 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 993 994 /* Now program new ones. */ 995 IF_ADDR_LOCK(ifp); 996 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 997 if (ifma->ifma_addr->sa_family != AF_LINK) 998 continue; 999 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1000 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 1001 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1002 } 1003 IF_ADDR_UNLOCK(ifp); 1004 1005 for (i = 0; i < 4; i++) 1006 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1007 1008 return; 1009 } 1010 1011 /* 1012 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1013 * self-test results. 1014 */ 1015 static int 1016 bge_chipinit(sc) 1017 struct bge_softc *sc; 1018 { 1019 int i; 1020 u_int32_t dma_rw_ctl; 1021 1022 /* Set endian type before we access any non-PCI registers. */ 1023 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1024 1025 /* 1026 * Check the 'ROM failed' bit on the RX CPU to see if 1027 * self-tests passed. 1028 */ 1029 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1030 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1031 return(ENODEV); 1032 } 1033 1034 /* Clear the MAC control register */ 1035 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1036 1037 /* 1038 * Clear the MAC statistics block in the NIC's 1039 * internal memory. 1040 */ 1041 for (i = BGE_STATS_BLOCK; 1042 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1043 BGE_MEMWIN_WRITE(sc, i, 0); 1044 1045 for (i = BGE_STATUS_BLOCK; 1046 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1047 BGE_MEMWIN_WRITE(sc, i, 0); 1048 1049 /* Set up the PCI DMA control register. */ 1050 if (sc->bge_pcie) { 1051 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1052 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1053 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1054 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 1055 BGE_PCISTATE_PCI_BUSMODE) { 1056 /* Conventional PCI bus */ 1057 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1058 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1059 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1060 (0x0F); 1061 } else { 1062 /* PCI-X bus */ 1063 /* 1064 * The 5704 uses a different encoding of read/write 1065 * watermarks. 1066 */ 1067 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1068 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1069 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1070 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1071 else 1072 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1073 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1074 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1075 (0x0F); 1076 1077 /* 1078 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1079 * for hardware bugs. 1080 */ 1081 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1082 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1083 u_int32_t tmp; 1084 1085 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1086 if (tmp == 0x6 || tmp == 0x7) 1087 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1088 } 1089 } 1090 1091 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1092 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1093 sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1094 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1095 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1096 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1097 1098 /* 1099 * Set up general mode register. 1100 */ 1101 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1102 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1103 BGE_MODECTL_TX_NO_PHDR_CSUM); 1104 1105 /* 1106 * Disable memory write invalidate. Apparently it is not supported 1107 * properly by these devices. 1108 */ 1109 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1110 1111 #ifdef __brokenalpha__ 1112 /* 1113 * Must insure that we do not cross an 8K (bytes) boundary 1114 * for DMA reads. Our highest limit is 1K bytes. This is a 1115 * restriction on some ALPHA platforms with early revision 1116 * 21174 PCI chipsets, such as the AlphaPC 164lx 1117 */ 1118 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1119 BGE_PCI_READ_BNDRY_1024BYTES, 4); 1120 #endif 1121 1122 /* Set the timer prescaler (always 66Mhz) */ 1123 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1124 1125 return(0); 1126 } 1127 1128 static int 1129 bge_blockinit(sc) 1130 struct bge_softc *sc; 1131 { 1132 struct bge_rcb *rcb; 1133 bus_size_t vrcb; 1134 bge_hostaddr taddr; 1135 int i; 1136 1137 /* 1138 * Initialize the memory window pointer register so that 1139 * we can access the first 32K of internal NIC RAM. This will 1140 * allow us to set up the TX send ring RCBs and the RX return 1141 * ring RCBs, plus other things which live in NIC memory. 1142 */ 1143 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1144 1145 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1146 1147 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1148 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1149 /* Configure mbuf memory pool */ 1150 if (sc->bge_extram) { 1151 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1152 BGE_EXT_SSRAM); 1153 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1154 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1155 else 1156 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1157 } else { 1158 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1159 BGE_BUFFPOOL_1); 1160 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1161 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1162 else 1163 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1164 } 1165 1166 /* Configure DMA resource pool */ 1167 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1168 BGE_DMA_DESCRIPTORS); 1169 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1170 } 1171 1172 /* Configure mbuf pool watermarks */ 1173 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1174 sc->bge_asicrev == BGE_ASICREV_BCM5750) { 1175 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1176 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1177 } else { 1178 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1179 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1180 } 1181 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1182 1183 /* Configure DMA resource watermarks */ 1184 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1185 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1186 1187 /* Enable buffer manager */ 1188 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1189 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1190 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1191 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1192 1193 /* Poll for buffer manager start indication */ 1194 for (i = 0; i < BGE_TIMEOUT; i++) { 1195 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1196 break; 1197 DELAY(10); 1198 } 1199 1200 if (i == BGE_TIMEOUT) { 1201 device_printf(sc->bge_dev, 1202 "buffer manager failed to start\n"); 1203 return(ENXIO); 1204 } 1205 } 1206 1207 /* Enable flow-through queues */ 1208 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1209 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1210 1211 /* Wait until queue initialization is complete */ 1212 for (i = 0; i < BGE_TIMEOUT; i++) { 1213 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1214 break; 1215 DELAY(10); 1216 } 1217 1218 if (i == BGE_TIMEOUT) { 1219 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1220 return(ENXIO); 1221 } 1222 1223 /* Initialize the standard RX ring control block */ 1224 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1225 rcb->bge_hostaddr.bge_addr_lo = 1226 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1227 rcb->bge_hostaddr.bge_addr_hi = 1228 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1229 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1230 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1231 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1232 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1233 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1234 else 1235 rcb->bge_maxlen_flags = 1236 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1237 if (sc->bge_extram) 1238 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1239 else 1240 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1241 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1242 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1243 1244 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1245 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1246 1247 /* 1248 * Initialize the jumbo RX ring control block 1249 * We set the 'ring disabled' bit in the flags 1250 * field until we're actually ready to start 1251 * using this ring (i.e. once we set the MTU 1252 * high enough to require it). 1253 */ 1254 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1255 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1256 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1257 1258 rcb->bge_hostaddr.bge_addr_lo = 1259 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1260 rcb->bge_hostaddr.bge_addr_hi = 1261 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1262 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1263 sc->bge_cdata.bge_rx_jumbo_ring_map, 1264 BUS_DMASYNC_PREREAD); 1265 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1266 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED); 1267 if (sc->bge_extram) 1268 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1269 else 1270 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1271 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1272 rcb->bge_hostaddr.bge_addr_hi); 1273 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1274 rcb->bge_hostaddr.bge_addr_lo); 1275 1276 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1277 rcb->bge_maxlen_flags); 1278 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1279 1280 /* Set up dummy disabled mini ring RCB */ 1281 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1282 rcb->bge_maxlen_flags = 1283 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1284 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1285 rcb->bge_maxlen_flags); 1286 } 1287 1288 /* 1289 * Set the BD ring replentish thresholds. The recommended 1290 * values are 1/8th the number of descriptors allocated to 1291 * each ring. 1292 */ 1293 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1294 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1295 1296 /* 1297 * Disable all unused send rings by setting the 'ring disabled' 1298 * bit in the flags field of all the TX send ring control blocks. 1299 * These are located in NIC memory. 1300 */ 1301 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1302 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1303 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1304 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1305 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1306 vrcb += sizeof(struct bge_rcb); 1307 } 1308 1309 /* Configure TX RCB 0 (we use only the first ring) */ 1310 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1311 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1312 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1313 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1314 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1315 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1316 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1317 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1318 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1319 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1320 1321 /* Disable all unused RX return rings */ 1322 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1323 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1324 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1325 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1326 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1327 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1328 BGE_RCB_FLAG_RING_DISABLED)); 1329 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1330 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1331 (i * (sizeof(u_int64_t))), 0); 1332 vrcb += sizeof(struct bge_rcb); 1333 } 1334 1335 /* Initialize RX ring indexes */ 1336 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1337 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1338 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1339 1340 /* 1341 * Set up RX return ring 0 1342 * Note that the NIC address for RX return rings is 0x00000000. 1343 * The return rings live entirely within the host, so the 1344 * nicaddr field in the RCB isn't used. 1345 */ 1346 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1347 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1348 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1349 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1350 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1351 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1352 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1353 1354 /* Set random backoff seed for TX */ 1355 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1356 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1357 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1358 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1359 BGE_TX_BACKOFF_SEED_MASK); 1360 1361 /* Set inter-packet gap */ 1362 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1363 1364 /* 1365 * Specify which ring to use for packets that don't match 1366 * any RX rules. 1367 */ 1368 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1369 1370 /* 1371 * Configure number of RX lists. One interrupt distribution 1372 * list, sixteen active lists, one bad frames class. 1373 */ 1374 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1375 1376 /* Inialize RX list placement stats mask. */ 1377 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1378 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1379 1380 /* Disable host coalescing until we get it set up */ 1381 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1382 1383 /* Poll to make sure it's shut down. */ 1384 for (i = 0; i < BGE_TIMEOUT; i++) { 1385 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1386 break; 1387 DELAY(10); 1388 } 1389 1390 if (i == BGE_TIMEOUT) { 1391 device_printf(sc->bge_dev, 1392 "host coalescing engine failed to idle\n"); 1393 return(ENXIO); 1394 } 1395 1396 /* Set up host coalescing defaults */ 1397 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1398 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1399 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1400 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1401 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1402 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1403 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1404 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1405 } 1406 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1407 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1408 1409 /* Set up address of statistics block */ 1410 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1411 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1412 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1413 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1414 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1415 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1416 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1417 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1418 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1419 } 1420 1421 /* Set up address of status block */ 1422 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1423 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1424 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1425 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1426 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1427 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1428 1429 /* Turn on host coalescing state machine */ 1430 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1431 1432 /* Turn on RX BD completion state machine and enable attentions */ 1433 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1434 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1435 1436 /* Turn on RX list placement state machine */ 1437 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1438 1439 /* Turn on RX list selector state machine. */ 1440 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1441 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1442 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1443 1444 /* Turn on DMA, clear stats */ 1445 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1446 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1447 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1448 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1449 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1450 1451 /* Set misc. local control, enable interrupts on attentions */ 1452 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1453 1454 #ifdef notdef 1455 /* Assert GPIO pins for PHY reset */ 1456 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1457 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1458 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1459 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1460 #endif 1461 1462 /* Turn on DMA completion state machine */ 1463 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1464 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1465 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1466 1467 /* Turn on write DMA state machine */ 1468 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1469 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1470 1471 /* Turn on read DMA state machine */ 1472 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1473 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1474 1475 /* Turn on RX data completion state machine */ 1476 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1477 1478 /* Turn on RX BD initiator state machine */ 1479 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1480 1481 /* Turn on RX data and RX BD initiator state machine */ 1482 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1483 1484 /* Turn on Mbuf cluster free state machine */ 1485 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1486 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1487 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1488 1489 /* Turn on send BD completion state machine */ 1490 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1491 1492 /* Turn on send data completion state machine */ 1493 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1494 1495 /* Turn on send data initiator state machine */ 1496 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1497 1498 /* Turn on send BD initiator state machine */ 1499 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1500 1501 /* Turn on send BD selector state machine */ 1502 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1503 1504 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1505 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1506 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1507 1508 /* ack/clear link change events */ 1509 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1510 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1511 BGE_MACSTAT_LINK_CHANGED); 1512 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1513 1514 /* Enable PHY auto polling (for MII/GMII only) */ 1515 if (sc->bge_tbi) { 1516 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1517 } else { 1518 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1519 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1520 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) 1521 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1522 BGE_EVTENB_MI_INTERRUPT); 1523 } 1524 1525 /* 1526 * Clear any pending link state attention. 1527 * Otherwise some link state change events may be lost until attention 1528 * is cleared by bge_intr() -> bge_link_upd() sequence. 1529 * It's not necessary on newer BCM chips - perhaps enabling link 1530 * state change attentions implies clearing pending attention. 1531 */ 1532 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1533 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1534 BGE_MACSTAT_LINK_CHANGED); 1535 1536 /* Enable link state change attentions. */ 1537 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1538 1539 return(0); 1540 } 1541 1542 /* 1543 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1544 * against our list and return its name if we find a match. Note 1545 * that since the Broadcom controller contains VPD support, we 1546 * can get the device name string from the controller itself instead 1547 * of the compiled-in string. This is a little slow, but it guarantees 1548 * we'll always announce the right product name. 1549 */ 1550 static int 1551 bge_probe(dev) 1552 device_t dev; 1553 { 1554 struct bge_type *t; 1555 struct bge_softc *sc; 1556 char *descbuf; 1557 1558 t = bge_devs; 1559 1560 sc = device_get_softc(dev); 1561 bzero(sc, sizeof(struct bge_softc)); 1562 sc->bge_dev = dev; 1563 1564 while(t->bge_name != NULL) { 1565 if ((pci_get_vendor(dev) == t->bge_vid) && 1566 (pci_get_device(dev) == t->bge_did)) { 1567 #ifdef notdef 1568 bge_vpd_read(sc); 1569 device_set_desc(dev, sc->bge_vpd_prodname); 1570 #endif 1571 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 1572 if (descbuf == NULL) 1573 return(ENOMEM); 1574 snprintf(descbuf, BGE_DEVDESC_MAX, 1575 "%s, ASIC rev. %#04x", t->bge_name, 1576 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16); 1577 device_set_desc_copy(dev, descbuf); 1578 if (pci_get_subvendor(dev) == DELL_VENDORID) 1579 sc->bge_no_3_led = 1; 1580 free(descbuf, M_TEMP); 1581 return(0); 1582 } 1583 t++; 1584 } 1585 1586 return(ENXIO); 1587 } 1588 1589 static void 1590 bge_dma_free(sc) 1591 struct bge_softc *sc; 1592 { 1593 int i; 1594 1595 1596 /* Destroy DMA maps for RX buffers */ 1597 1598 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1599 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1600 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1601 sc->bge_cdata.bge_rx_std_dmamap[i]); 1602 } 1603 1604 /* Destroy DMA maps for jumbo RX buffers */ 1605 1606 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1607 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1608 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1609 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1610 } 1611 1612 /* Destroy DMA maps for TX buffers */ 1613 1614 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1615 if (sc->bge_cdata.bge_tx_dmamap[i]) 1616 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1617 sc->bge_cdata.bge_tx_dmamap[i]); 1618 } 1619 1620 if (sc->bge_cdata.bge_mtag) 1621 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1622 1623 1624 /* Destroy standard RX ring */ 1625 1626 if (sc->bge_cdata.bge_rx_std_ring_map) 1627 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1628 sc->bge_cdata.bge_rx_std_ring_map); 1629 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1630 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1631 sc->bge_ldata.bge_rx_std_ring, 1632 sc->bge_cdata.bge_rx_std_ring_map); 1633 1634 if (sc->bge_cdata.bge_rx_std_ring_tag) 1635 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1636 1637 /* Destroy jumbo RX ring */ 1638 1639 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1640 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1641 sc->bge_cdata.bge_rx_jumbo_ring_map); 1642 1643 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1644 sc->bge_ldata.bge_rx_jumbo_ring) 1645 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1646 sc->bge_ldata.bge_rx_jumbo_ring, 1647 sc->bge_cdata.bge_rx_jumbo_ring_map); 1648 1649 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1650 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1651 1652 /* Destroy RX return ring */ 1653 1654 if (sc->bge_cdata.bge_rx_return_ring_map) 1655 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1656 sc->bge_cdata.bge_rx_return_ring_map); 1657 1658 if (sc->bge_cdata.bge_rx_return_ring_map && 1659 sc->bge_ldata.bge_rx_return_ring) 1660 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1661 sc->bge_ldata.bge_rx_return_ring, 1662 sc->bge_cdata.bge_rx_return_ring_map); 1663 1664 if (sc->bge_cdata.bge_rx_return_ring_tag) 1665 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1666 1667 /* Destroy TX ring */ 1668 1669 if (sc->bge_cdata.bge_tx_ring_map) 1670 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1671 sc->bge_cdata.bge_tx_ring_map); 1672 1673 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1674 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1675 sc->bge_ldata.bge_tx_ring, 1676 sc->bge_cdata.bge_tx_ring_map); 1677 1678 if (sc->bge_cdata.bge_tx_ring_tag) 1679 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1680 1681 /* Destroy status block */ 1682 1683 if (sc->bge_cdata.bge_status_map) 1684 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1685 sc->bge_cdata.bge_status_map); 1686 1687 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1688 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1689 sc->bge_ldata.bge_status_block, 1690 sc->bge_cdata.bge_status_map); 1691 1692 if (sc->bge_cdata.bge_status_tag) 1693 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1694 1695 /* Destroy statistics block */ 1696 1697 if (sc->bge_cdata.bge_stats_map) 1698 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1699 sc->bge_cdata.bge_stats_map); 1700 1701 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1702 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1703 sc->bge_ldata.bge_stats, 1704 sc->bge_cdata.bge_stats_map); 1705 1706 if (sc->bge_cdata.bge_stats_tag) 1707 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1708 1709 /* Destroy the parent tag */ 1710 1711 if (sc->bge_cdata.bge_parent_tag) 1712 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1713 1714 return; 1715 } 1716 1717 static int 1718 bge_dma_alloc(dev) 1719 device_t dev; 1720 { 1721 struct bge_softc *sc; 1722 int i, error; 1723 struct bge_dmamap_arg ctx; 1724 1725 sc = device_get_softc(dev); 1726 1727 /* 1728 * Allocate the parent bus DMA tag appropriate for PCI. 1729 */ 1730 error = bus_dma_tag_create(NULL, /* parent */ 1731 PAGE_SIZE, 0, /* alignment, boundary */ 1732 BUS_SPACE_MAXADDR, /* lowaddr */ 1733 BUS_SPACE_MAXADDR, /* highaddr */ 1734 NULL, NULL, /* filter, filterarg */ 1735 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1736 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1737 0, /* flags */ 1738 NULL, NULL, /* lockfunc, lockarg */ 1739 &sc->bge_cdata.bge_parent_tag); 1740 1741 if (error != 0) { 1742 device_printf(sc->bge_dev, 1743 "could not allocate parent dma tag\n"); 1744 return (ENOMEM); 1745 } 1746 1747 /* 1748 * Create tag for RX mbufs. 1749 */ 1750 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1751 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1752 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1753 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1754 1755 if (error) { 1756 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1757 return (ENOMEM); 1758 } 1759 1760 /* Create DMA maps for RX buffers */ 1761 1762 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1763 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1764 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1765 if (error) { 1766 device_printf(sc->bge_dev, 1767 "can't create DMA map for RX\n"); 1768 return(ENOMEM); 1769 } 1770 } 1771 1772 /* Create DMA maps for TX buffers */ 1773 1774 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1775 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1776 &sc->bge_cdata.bge_tx_dmamap[i]); 1777 if (error) { 1778 device_printf(sc->bge_dev, 1779 "can't create DMA map for RX\n"); 1780 return(ENOMEM); 1781 } 1782 } 1783 1784 /* Create tag for standard RX ring */ 1785 1786 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1787 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1788 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1789 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1790 1791 if (error) { 1792 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1793 return (ENOMEM); 1794 } 1795 1796 /* Allocate DMA'able memory for standard RX ring */ 1797 1798 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1799 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1800 &sc->bge_cdata.bge_rx_std_ring_map); 1801 if (error) 1802 return (ENOMEM); 1803 1804 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1805 1806 /* Load the address of the standard RX ring */ 1807 1808 ctx.bge_maxsegs = 1; 1809 ctx.sc = sc; 1810 1811 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1812 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1813 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1814 1815 if (error) 1816 return (ENOMEM); 1817 1818 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1819 1820 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1821 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1822 1823 /* 1824 * Create tag for jumbo mbufs. 1825 * This is really a bit of a kludge. We allocate a special 1826 * jumbo buffer pool which (thanks to the way our DMA 1827 * memory allocation works) will consist of contiguous 1828 * pages. This means that even though a jumbo buffer might 1829 * be larger than a page size, we don't really need to 1830 * map it into more than one DMA segment. However, the 1831 * default mbuf tag will result in multi-segment mappings, 1832 * so we have to create a special jumbo mbuf tag that 1833 * lets us get away with mapping the jumbo buffers as 1834 * a single segment. I think eventually the driver should 1835 * be changed so that it uses ordinary mbufs and cluster 1836 * buffers, i.e. jumbo frames can span multiple DMA 1837 * descriptors. But that's a project for another day. 1838 */ 1839 1840 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1841 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1842 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1843 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1844 1845 if (error) { 1846 device_printf(sc->bge_dev, 1847 "could not allocate dma tag\n"); 1848 return (ENOMEM); 1849 } 1850 1851 /* Create tag for jumbo RX ring */ 1852 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1853 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1854 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1855 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1856 1857 if (error) { 1858 device_printf(sc->bge_dev, 1859 "could not allocate dma tag\n"); 1860 return (ENOMEM); 1861 } 1862 1863 /* Allocate DMA'able memory for jumbo RX ring */ 1864 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1865 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 1866 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1867 &sc->bge_cdata.bge_rx_jumbo_ring_map); 1868 if (error) 1869 return (ENOMEM); 1870 1871 /* Load the address of the jumbo RX ring */ 1872 ctx.bge_maxsegs = 1; 1873 ctx.sc = sc; 1874 1875 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1876 sc->bge_cdata.bge_rx_jumbo_ring_map, 1877 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 1878 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1879 1880 if (error) 1881 return (ENOMEM); 1882 1883 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 1884 1885 /* Create DMA maps for jumbo RX buffers */ 1886 1887 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1888 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 1889 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1890 if (error) { 1891 device_printf(sc->bge_dev, 1892 "can't create DMA map for RX\n"); 1893 return(ENOMEM); 1894 } 1895 } 1896 1897 } 1898 1899 /* Create tag for RX return ring */ 1900 1901 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1902 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1903 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 1904 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 1905 1906 if (error) { 1907 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1908 return (ENOMEM); 1909 } 1910 1911 /* Allocate DMA'able memory for RX return ring */ 1912 1913 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 1914 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 1915 &sc->bge_cdata.bge_rx_return_ring_map); 1916 if (error) 1917 return (ENOMEM); 1918 1919 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 1920 BGE_RX_RTN_RING_SZ(sc)); 1921 1922 /* Load the address of the RX return ring */ 1923 1924 ctx.bge_maxsegs = 1; 1925 ctx.sc = sc; 1926 1927 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 1928 sc->bge_cdata.bge_rx_return_ring_map, 1929 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 1930 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1931 1932 if (error) 1933 return (ENOMEM); 1934 1935 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 1936 1937 /* Create tag for TX ring */ 1938 1939 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1940 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1941 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 1942 &sc->bge_cdata.bge_tx_ring_tag); 1943 1944 if (error) { 1945 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1946 return (ENOMEM); 1947 } 1948 1949 /* Allocate DMA'able memory for TX ring */ 1950 1951 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 1952 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 1953 &sc->bge_cdata.bge_tx_ring_map); 1954 if (error) 1955 return (ENOMEM); 1956 1957 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 1958 1959 /* Load the address of the TX ring */ 1960 1961 ctx.bge_maxsegs = 1; 1962 ctx.sc = sc; 1963 1964 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 1965 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 1966 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1967 1968 if (error) 1969 return (ENOMEM); 1970 1971 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 1972 1973 /* Create tag for status block */ 1974 1975 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1976 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1977 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 1978 NULL, NULL, &sc->bge_cdata.bge_status_tag); 1979 1980 if (error) { 1981 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1982 return (ENOMEM); 1983 } 1984 1985 /* Allocate DMA'able memory for status block */ 1986 1987 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 1988 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 1989 &sc->bge_cdata.bge_status_map); 1990 if (error) 1991 return (ENOMEM); 1992 1993 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 1994 1995 /* Load the address of the status block */ 1996 1997 ctx.sc = sc; 1998 ctx.bge_maxsegs = 1; 1999 2000 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2001 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2002 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2003 2004 if (error) 2005 return (ENOMEM); 2006 2007 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2008 2009 /* Create tag for statistics block */ 2010 2011 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2012 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2013 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2014 &sc->bge_cdata.bge_stats_tag); 2015 2016 if (error) { 2017 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2018 return (ENOMEM); 2019 } 2020 2021 /* Allocate DMA'able memory for statistics block */ 2022 2023 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2024 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2025 &sc->bge_cdata.bge_stats_map); 2026 if (error) 2027 return (ENOMEM); 2028 2029 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2030 2031 /* Load the address of the statstics block */ 2032 2033 ctx.sc = sc; 2034 ctx.bge_maxsegs = 1; 2035 2036 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2037 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2038 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2039 2040 if (error) 2041 return (ENOMEM); 2042 2043 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2044 2045 return(0); 2046 } 2047 2048 static int 2049 bge_attach(dev) 2050 device_t dev; 2051 { 2052 struct ifnet *ifp; 2053 struct bge_softc *sc; 2054 u_int32_t hwcfg = 0; 2055 u_int32_t mac_tmp = 0; 2056 u_char eaddr[6]; 2057 int error = 0, rid; 2058 2059 sc = device_get_softc(dev); 2060 sc->bge_dev = dev; 2061 2062 /* 2063 * Map control/status registers. 2064 */ 2065 pci_enable_busmaster(dev); 2066 2067 rid = BGE_PCI_BAR0; 2068 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2069 RF_ACTIVE|PCI_RF_DENSE); 2070 2071 if (sc->bge_res == NULL) { 2072 device_printf (sc->bge_dev, "couldn't map memory\n"); 2073 error = ENXIO; 2074 goto fail; 2075 } 2076 2077 sc->bge_btag = rman_get_bustag(sc->bge_res); 2078 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2079 2080 /* Allocate interrupt */ 2081 rid = 0; 2082 2083 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2084 RF_SHAREABLE | RF_ACTIVE); 2085 2086 if (sc->bge_irq == NULL) { 2087 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2088 error = ENXIO; 2089 goto fail; 2090 } 2091 2092 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2093 2094 /* Save ASIC rev. */ 2095 2096 sc->bge_chipid = 2097 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2098 BGE_PCIMISCCTL_ASICREV; 2099 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2100 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2101 2102 /* 2103 * Treat the 5714 and the 5752 like the 5750 until we have more info 2104 * on this chip. 2105 */ 2106 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 2107 sc->bge_asicrev == BGE_ASICREV_BCM5752) 2108 sc->bge_asicrev = BGE_ASICREV_BCM5750; 2109 2110 /* 2111 * XXX: Broadcom Linux driver. Not in specs or eratta. 2112 * PCI-Express? 2113 */ 2114 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2115 u_int32_t v; 2116 2117 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); 2118 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) { 2119 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2120 if ((v & 0xff) == BGE_PCIE_CAPID) 2121 sc->bge_pcie = 1; 2122 } 2123 } 2124 2125 /* Try to reset the chip. */ 2126 bge_reset(sc); 2127 2128 if (bge_chipinit(sc)) { 2129 device_printf(sc->bge_dev, "chip initialization failed\n"); 2130 bge_release_resources(sc); 2131 error = ENXIO; 2132 goto fail; 2133 } 2134 2135 /* 2136 * Get station address from the EEPROM. 2137 */ 2138 mac_tmp = bge_readmem_ind(sc, 0x0c14); 2139 if ((mac_tmp >> 16) == 0x484b) { 2140 eaddr[0] = (u_char)(mac_tmp >> 8); 2141 eaddr[1] = (u_char)mac_tmp; 2142 mac_tmp = bge_readmem_ind(sc, 0x0c18); 2143 eaddr[2] = (u_char)(mac_tmp >> 24); 2144 eaddr[3] = (u_char)(mac_tmp >> 16); 2145 eaddr[4] = (u_char)(mac_tmp >> 8); 2146 eaddr[5] = (u_char)mac_tmp; 2147 } else if (bge_read_eeprom(sc, eaddr, 2148 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2149 device_printf(sc->bge_dev, "failed to read station address\n"); 2150 bge_release_resources(sc); 2151 error = ENXIO; 2152 goto fail; 2153 } 2154 2155 /* 5705 limits RX return ring to 512 entries. */ 2156 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 2157 sc->bge_asicrev == BGE_ASICREV_BCM5750) 2158 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2159 else 2160 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2161 2162 if (bge_dma_alloc(dev)) { 2163 device_printf(sc->bge_dev, 2164 "failed to allocate DMA resources\n"); 2165 bge_release_resources(sc); 2166 error = ENXIO; 2167 goto fail; 2168 } 2169 2170 /* Set default tuneable values. */ 2171 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2172 sc->bge_rx_coal_ticks = 150; 2173 sc->bge_tx_coal_ticks = 150; 2174 sc->bge_rx_max_coal_bds = 64; 2175 sc->bge_tx_max_coal_bds = 128; 2176 2177 /* Set up ifnet structure */ 2178 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2179 if (ifp == NULL) { 2180 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2181 bge_release_resources(sc); 2182 error = ENXIO; 2183 goto fail; 2184 } 2185 ifp->if_softc = sc; 2186 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2187 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2188 ifp->if_ioctl = bge_ioctl; 2189 ifp->if_start = bge_start; 2190 ifp->if_watchdog = bge_watchdog; 2191 ifp->if_init = bge_init; 2192 ifp->if_mtu = ETHERMTU; 2193 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2194 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2195 IFQ_SET_READY(&ifp->if_snd); 2196 ifp->if_hwassist = BGE_CSUM_FEATURES; 2197 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2198 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; 2199 ifp->if_capenable = ifp->if_capabilities; 2200 #ifdef DEVICE_POLLING 2201 ifp->if_capabilities |= IFCAP_POLLING; 2202 #endif 2203 2204 /* 2205 * 5700 B0 chips do not support checksumming correctly due 2206 * to hardware bugs. 2207 */ 2208 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2209 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2210 ifp->if_capenable &= IFCAP_HWCSUM; 2211 ifp->if_hwassist = 0; 2212 } 2213 2214 /* 2215 * Figure out what sort of media we have by checking the 2216 * hardware config word in the first 32k of NIC internal memory, 2217 * or fall back to examining the EEPROM if necessary. 2218 * Note: on some BCM5700 cards, this value appears to be unset. 2219 * If that's the case, we have to rely on identifying the NIC 2220 * by its PCI subsystem ID, as we do below for the SysKonnect 2221 * SK-9D41. 2222 */ 2223 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2224 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2225 else { 2226 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2227 sizeof(hwcfg))) { 2228 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2229 bge_release_resources(sc); 2230 error = ENXIO; 2231 goto fail; 2232 } 2233 hwcfg = ntohl(hwcfg); 2234 } 2235 2236 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2237 sc->bge_tbi = 1; 2238 2239 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2240 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2241 sc->bge_tbi = 1; 2242 2243 if (sc->bge_tbi) { 2244 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2245 bge_ifmedia_upd, bge_ifmedia_sts); 2246 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2247 ifmedia_add(&sc->bge_ifmedia, 2248 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2249 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2250 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2251 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2252 } else { 2253 /* 2254 * Do transceiver setup. 2255 */ 2256 if (mii_phy_probe(dev, &sc->bge_miibus, 2257 bge_ifmedia_upd, bge_ifmedia_sts)) { 2258 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2259 bge_release_resources(sc); 2260 error = ENXIO; 2261 goto fail; 2262 } 2263 } 2264 2265 /* 2266 * When using the BCM5701 in PCI-X mode, data corruption has 2267 * been observed in the first few bytes of some received packets. 2268 * Aligning the packet buffer in memory eliminates the corruption. 2269 * Unfortunately, this misaligns the packet payloads. On platforms 2270 * which do not support unaligned accesses, we will realign the 2271 * payloads by copying the received packets. 2272 */ 2273 switch (sc->bge_chipid) { 2274 case BGE_CHIPID_BCM5701_A0: 2275 case BGE_CHIPID_BCM5701_B0: 2276 case BGE_CHIPID_BCM5701_B2: 2277 case BGE_CHIPID_BCM5701_B5: 2278 /* If in PCI-X mode, work around the alignment bug. */ 2279 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 2280 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2281 BGE_PCISTATE_PCI_BUSSPEED) 2282 sc->bge_rx_alignment_bug = 1; 2283 break; 2284 } 2285 2286 /* 2287 * Call MI attach routine. 2288 */ 2289 ether_ifattach(ifp, eaddr); 2290 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE); 2291 2292 /* 2293 * Hookup IRQ last. 2294 */ 2295 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2296 bge_intr, sc, &sc->bge_intrhand); 2297 2298 if (error) { 2299 bge_detach(dev); 2300 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2301 } 2302 2303 fail: 2304 return(error); 2305 } 2306 2307 static int 2308 bge_detach(dev) 2309 device_t dev; 2310 { 2311 struct bge_softc *sc; 2312 struct ifnet *ifp; 2313 2314 sc = device_get_softc(dev); 2315 ifp = sc->bge_ifp; 2316 2317 #ifdef DEVICE_POLLING 2318 if (ifp->if_capenable & IFCAP_POLLING) 2319 ether_poll_deregister(ifp); 2320 #endif 2321 2322 BGE_LOCK(sc); 2323 bge_stop(sc); 2324 bge_reset(sc); 2325 BGE_UNLOCK(sc); 2326 2327 ether_ifdetach(ifp); 2328 2329 if (sc->bge_tbi) { 2330 ifmedia_removeall(&sc->bge_ifmedia); 2331 } else { 2332 bus_generic_detach(dev); 2333 device_delete_child(dev, sc->bge_miibus); 2334 } 2335 2336 bge_release_resources(sc); 2337 2338 return(0); 2339 } 2340 2341 static void 2342 bge_release_resources(sc) 2343 struct bge_softc *sc; 2344 { 2345 device_t dev; 2346 2347 dev = sc->bge_dev; 2348 2349 if (sc->bge_vpd_prodname != NULL) 2350 free(sc->bge_vpd_prodname, M_DEVBUF); 2351 2352 if (sc->bge_vpd_readonly != NULL) 2353 free(sc->bge_vpd_readonly, M_DEVBUF); 2354 2355 if (sc->bge_intrhand != NULL) 2356 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2357 2358 if (sc->bge_irq != NULL) 2359 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 2360 2361 if (sc->bge_res != NULL) 2362 bus_release_resource(dev, SYS_RES_MEMORY, 2363 BGE_PCI_BAR0, sc->bge_res); 2364 2365 if (sc->bge_ifp != NULL) 2366 if_free(sc->bge_ifp); 2367 2368 bge_dma_free(sc); 2369 2370 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2371 BGE_LOCK_DESTROY(sc); 2372 2373 return; 2374 } 2375 2376 static void 2377 bge_reset(sc) 2378 struct bge_softc *sc; 2379 { 2380 device_t dev; 2381 u_int32_t cachesize, command, pcistate, reset; 2382 int i, val = 0; 2383 2384 dev = sc->bge_dev; 2385 2386 /* Save some important PCI state. */ 2387 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2388 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2389 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2390 2391 pci_write_config(dev, BGE_PCI_MISC_CTL, 2392 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2393 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2394 2395 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2396 2397 /* XXX: Broadcom Linux driver. */ 2398 if (sc->bge_pcie) { 2399 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 2400 CSR_WRITE_4(sc, 0x7e2c, 0x20); 2401 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2402 /* Prevent PCIE link training during global reset */ 2403 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2404 reset |= (1<<29); 2405 } 2406 } 2407 2408 /* Issue global reset */ 2409 bge_writereg_ind(sc, BGE_MISC_CFG, reset); 2410 2411 DELAY(1000); 2412 2413 /* XXX: Broadcom Linux driver. */ 2414 if (sc->bge_pcie) { 2415 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2416 uint32_t v; 2417 2418 DELAY(500000); /* wait for link training to complete */ 2419 v = pci_read_config(dev, 0xc4, 4); 2420 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2421 } 2422 /* Set PCIE max payload size and clear error status. */ 2423 pci_write_config(dev, 0xd8, 0xf5000, 4); 2424 } 2425 2426 /* Reset some of the PCI state that got zapped by reset */ 2427 pci_write_config(dev, BGE_PCI_MISC_CTL, 2428 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2429 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2430 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2431 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2432 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2433 2434 /* Enable memory arbiter. */ 2435 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2436 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2437 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2438 2439 /* 2440 * Prevent PXE restart: write a magic number to the 2441 * general communications memory at 0xB50. 2442 */ 2443 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2444 /* 2445 * Poll the value location we just wrote until 2446 * we see the 1's complement of the magic number. 2447 * This indicates that the firmware initialization 2448 * is complete. 2449 */ 2450 for (i = 0; i < BGE_TIMEOUT; i++) { 2451 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2452 if (val == ~BGE_MAGIC_NUMBER) 2453 break; 2454 DELAY(10); 2455 } 2456 2457 if (i == BGE_TIMEOUT) { 2458 device_printf(sc->bge_dev, "firmware handshake timed out\n"); 2459 return; 2460 } 2461 2462 /* 2463 * XXX Wait for the value of the PCISTATE register to 2464 * return to its original pre-reset state. This is a 2465 * fairly good indicator of reset completion. If we don't 2466 * wait for the reset to fully complete, trying to read 2467 * from the device's non-PCI registers may yield garbage 2468 * results. 2469 */ 2470 for (i = 0; i < BGE_TIMEOUT; i++) { 2471 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2472 break; 2473 DELAY(10); 2474 } 2475 2476 /* Fix up byte swapping */ 2477 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 2478 BGE_MODECTL_BYTESWAP_DATA); 2479 2480 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2481 2482 /* 2483 * The 5704 in TBI mode apparently needs some special 2484 * adjustment to insure the SERDES drive level is set 2485 * to 1.2V. 2486 */ 2487 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) { 2488 uint32_t serdescfg; 2489 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2490 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2491 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2492 } 2493 2494 /* XXX: Broadcom Linux driver. */ 2495 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2496 uint32_t v; 2497 2498 v = CSR_READ_4(sc, 0x7c00); 2499 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2500 } 2501 DELAY(10000); 2502 2503 return; 2504 } 2505 2506 /* 2507 * Frame reception handling. This is called if there's a frame 2508 * on the receive return list. 2509 * 2510 * Note: we have to be able to handle two possibilities here: 2511 * 1) the frame is from the jumbo receive ring 2512 * 2) the frame is from the standard receive ring 2513 */ 2514 2515 static void 2516 bge_rxeof(sc) 2517 struct bge_softc *sc; 2518 { 2519 struct ifnet *ifp; 2520 int stdcnt = 0, jumbocnt = 0; 2521 2522 BGE_LOCK_ASSERT(sc); 2523 2524 /* Nothing to do */ 2525 if (sc->bge_rx_saved_considx == 2526 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) 2527 return; 2528 2529 ifp = sc->bge_ifp; 2530 2531 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2532 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2533 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2534 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2535 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2536 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2537 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2538 sc->bge_cdata.bge_rx_jumbo_ring_map, 2539 BUS_DMASYNC_POSTREAD); 2540 } 2541 2542 while(sc->bge_rx_saved_considx != 2543 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2544 struct bge_rx_bd *cur_rx; 2545 u_int32_t rxidx; 2546 struct mbuf *m = NULL; 2547 u_int16_t vlan_tag = 0; 2548 int have_tag = 0; 2549 2550 #ifdef DEVICE_POLLING 2551 if (ifp->if_capenable & IFCAP_POLLING) { 2552 if (sc->rxcycles <= 0) 2553 break; 2554 sc->rxcycles--; 2555 } 2556 #endif 2557 2558 cur_rx = 2559 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2560 2561 rxidx = cur_rx->bge_idx; 2562 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2563 2564 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2565 have_tag = 1; 2566 vlan_tag = cur_rx->bge_vlan_tag; 2567 } 2568 2569 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2570 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2571 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2572 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2573 BUS_DMASYNC_POSTREAD); 2574 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2575 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2576 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2577 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2578 jumbocnt++; 2579 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2580 ifp->if_ierrors++; 2581 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2582 continue; 2583 } 2584 if (bge_newbuf_jumbo(sc, 2585 sc->bge_jumbo, NULL) == ENOBUFS) { 2586 ifp->if_ierrors++; 2587 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2588 continue; 2589 } 2590 } else { 2591 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2592 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2593 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2594 BUS_DMASYNC_POSTREAD); 2595 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2596 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2597 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2598 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2599 stdcnt++; 2600 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2601 ifp->if_ierrors++; 2602 bge_newbuf_std(sc, sc->bge_std, m); 2603 continue; 2604 } 2605 if (bge_newbuf_std(sc, sc->bge_std, 2606 NULL) == ENOBUFS) { 2607 ifp->if_ierrors++; 2608 bge_newbuf_std(sc, sc->bge_std, m); 2609 continue; 2610 } 2611 } 2612 2613 ifp->if_ipackets++; 2614 #ifndef __NO_STRICT_ALIGNMENT 2615 /* 2616 * For architectures with strict alignment we must make sure 2617 * the payload is aligned. 2618 */ 2619 if (sc->bge_rx_alignment_bug) { 2620 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2621 cur_rx->bge_len); 2622 m->m_data += ETHER_ALIGN; 2623 } 2624 #endif 2625 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2626 m->m_pkthdr.rcvif = ifp; 2627 2628 if (ifp->if_capenable & IFCAP_RXCSUM) { 2629 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2630 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2631 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2632 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2633 } 2634 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2635 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 2636 m->m_pkthdr.csum_data = 2637 cur_rx->bge_tcp_udp_csum; 2638 m->m_pkthdr.csum_flags |= 2639 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2640 } 2641 } 2642 2643 /* 2644 * If we received a packet with a vlan tag, 2645 * attach that information to the packet. 2646 */ 2647 if (have_tag) { 2648 VLAN_INPUT_TAG(ifp, m, vlan_tag); 2649 if (m == NULL) 2650 continue; 2651 } 2652 2653 BGE_UNLOCK(sc); 2654 (*ifp->if_input)(ifp, m); 2655 BGE_LOCK(sc); 2656 } 2657 2658 if (stdcnt > 0) 2659 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2660 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 2661 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2662 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2663 if (jumbocnt > 0) 2664 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2665 sc->bge_cdata.bge_rx_jumbo_ring_map, 2666 BUS_DMASYNC_PREWRITE); 2667 } 2668 2669 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2670 if (stdcnt) 2671 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2672 if (jumbocnt) 2673 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2674 } 2675 2676 static void 2677 bge_txeof(sc) 2678 struct bge_softc *sc; 2679 { 2680 struct bge_tx_bd *cur_tx = NULL; 2681 struct ifnet *ifp; 2682 2683 BGE_LOCK_ASSERT(sc); 2684 2685 /* Nothing to do */ 2686 if (sc->bge_tx_saved_considx == 2687 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 2688 return; 2689 2690 ifp = sc->bge_ifp; 2691 2692 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 2693 sc->bge_cdata.bge_tx_ring_map, 2694 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2695 /* 2696 * Go through our tx ring and free mbufs for those 2697 * frames that have been sent. 2698 */ 2699 while (sc->bge_tx_saved_considx != 2700 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 2701 u_int32_t idx = 0; 2702 2703 idx = sc->bge_tx_saved_considx; 2704 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 2705 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2706 ifp->if_opackets++; 2707 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2708 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2709 sc->bge_cdata.bge_tx_dmamap[idx], 2710 BUS_DMASYNC_POSTWRITE); 2711 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2712 sc->bge_cdata.bge_tx_dmamap[idx]); 2713 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2714 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2715 } 2716 sc->bge_txcnt--; 2717 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2718 ifp->if_timer = 0; 2719 } 2720 2721 if (cur_tx != NULL) 2722 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2723 } 2724 2725 #ifdef DEVICE_POLLING 2726 static void 2727 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2728 { 2729 struct bge_softc *sc = ifp->if_softc; 2730 2731 BGE_LOCK(sc); 2732 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2733 bge_poll_locked(ifp, cmd, count); 2734 BGE_UNLOCK(sc); 2735 } 2736 2737 static void 2738 bge_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2739 { 2740 struct bge_softc *sc = ifp->if_softc; 2741 uint32_t statusword; 2742 2743 BGE_LOCK_ASSERT(sc); 2744 2745 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2746 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2747 2748 statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status); 2749 2750 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2751 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2752 2753 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */ 2754 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 2755 sc->bge_link_evt++; 2756 2757 if (cmd == POLL_AND_CHECK_STATUS) 2758 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2759 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) || 2760 sc->bge_link_evt || sc->bge_tbi) 2761 bge_link_upd(sc); 2762 2763 sc->rxcycles = count; 2764 bge_rxeof(sc); 2765 bge_txeof(sc); 2766 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2767 bge_start_locked(ifp); 2768 } 2769 #endif /* DEVICE_POLLING */ 2770 2771 static void 2772 bge_intr(xsc) 2773 void *xsc; 2774 { 2775 struct bge_softc *sc; 2776 struct ifnet *ifp; 2777 uint32_t statusword; 2778 2779 sc = xsc; 2780 2781 BGE_LOCK(sc); 2782 2783 ifp = sc->bge_ifp; 2784 2785 #ifdef DEVICE_POLLING 2786 if (ifp->if_capenable & IFCAP_POLLING) { 2787 BGE_UNLOCK(sc); 2788 return; 2789 } 2790 #endif 2791 2792 /* 2793 * Do the mandatory PCI flush as well as get the link status. 2794 */ 2795 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 2796 2797 /* Ack interrupt and stop others from occuring. */ 2798 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2799 2800 /* Make sure the descriptor ring indexes are coherent. */ 2801 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2802 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2803 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2804 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2805 2806 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2807 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) || 2808 statusword || sc->bge_link_evt) 2809 bge_link_upd(sc); 2810 2811 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2812 /* Check RX return ring producer/consumer */ 2813 bge_rxeof(sc); 2814 2815 /* Check TX ring producer/consumer */ 2816 bge_txeof(sc); 2817 } 2818 2819 /* Re-enable interrupts. */ 2820 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2821 2822 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2823 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2824 bge_start_locked(ifp); 2825 2826 BGE_UNLOCK(sc); 2827 2828 return; 2829 } 2830 2831 static void 2832 bge_tick_locked(sc) 2833 struct bge_softc *sc; 2834 { 2835 struct mii_data *mii = NULL; 2836 2837 BGE_LOCK_ASSERT(sc); 2838 2839 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 2840 sc->bge_asicrev == BGE_ASICREV_BCM5750) 2841 bge_stats_update_regs(sc); 2842 else 2843 bge_stats_update(sc); 2844 2845 if (!sc->bge_tbi) { 2846 mii = device_get_softc(sc->bge_miibus); 2847 mii_tick(mii); 2848 } else { 2849 /* 2850 * Since in TBI mode auto-polling can't be used we should poll 2851 * link status manually. Here we register pending link event 2852 * and trigger interrupt. 2853 */ 2854 #ifdef DEVICE_POLLING 2855 /* In polling mode we poll link state in bge_poll_locked() */ 2856 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 2857 #endif 2858 { 2859 sc->bge_link_evt++; 2860 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 2861 } 2862 } 2863 2864 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 2865 } 2866 2867 static void 2868 bge_tick(xsc) 2869 void *xsc; 2870 { 2871 struct bge_softc *sc; 2872 2873 sc = xsc; 2874 2875 BGE_LOCK(sc); 2876 bge_tick_locked(sc); 2877 BGE_UNLOCK(sc); 2878 } 2879 2880 static void 2881 bge_stats_update_regs(sc) 2882 struct bge_softc *sc; 2883 { 2884 struct ifnet *ifp; 2885 struct bge_mac_stats_regs stats; 2886 u_int32_t *s; 2887 u_long cnt; /* current register value */ 2888 int i; 2889 2890 ifp = sc->bge_ifp; 2891 2892 s = (u_int32_t *)&stats; 2893 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 2894 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 2895 s++; 2896 } 2897 2898 cnt = stats.dot3StatsSingleCollisionFrames + 2899 stats.dot3StatsMultipleCollisionFrames + 2900 stats.dot3StatsExcessiveCollisions + 2901 stats.dot3StatsLateCollisions; 2902 ifp->if_collisions += cnt >= sc->bge_tx_collisions ? 2903 cnt - sc->bge_tx_collisions : cnt; 2904 sc->bge_tx_collisions = cnt; 2905 } 2906 2907 static void 2908 bge_stats_update(sc) 2909 struct bge_softc *sc; 2910 { 2911 struct ifnet *ifp; 2912 bus_size_t stats; 2913 u_long cnt; /* current register value */ 2914 2915 ifp = sc->bge_ifp; 2916 2917 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 2918 2919 #define READ_STAT(sc, stats, stat) \ 2920 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 2921 2922 cnt = READ_STAT(sc, stats, 2923 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo); 2924 cnt += READ_STAT(sc, stats, 2925 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo); 2926 cnt += READ_STAT(sc, stats, 2927 txstats.dot3StatsExcessiveCollisions.bge_addr_lo); 2928 cnt += READ_STAT(sc, stats, 2929 txstats.dot3StatsLateCollisions.bge_addr_lo); 2930 ifp->if_collisions += cnt >= sc->bge_tx_collisions ? 2931 cnt - sc->bge_tx_collisions : cnt; 2932 sc->bge_tx_collisions = cnt; 2933 2934 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 2935 ifp->if_ierrors += cnt >= sc->bge_rx_discards ? 2936 cnt - sc->bge_rx_discards : cnt; 2937 sc->bge_rx_discards = cnt; 2938 2939 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 2940 ifp->if_oerrors += cnt >= sc->bge_tx_discards ? 2941 cnt - sc->bge_tx_discards : cnt; 2942 sc->bge_tx_discards = cnt; 2943 2944 #undef READ_STAT 2945 } 2946 2947 /* 2948 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 2949 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 2950 * but when such padded frames employ the bge IP/TCP checksum offload, 2951 * the hardware checksum assist gives incorrect results (possibly 2952 * from incorporating its own padding into the UDP/TCP checksum; who knows). 2953 * If we pad such runts with zeros, the onboard checksum comes out correct. 2954 */ 2955 static __inline int 2956 bge_cksum_pad(struct mbuf *m) 2957 { 2958 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 2959 struct mbuf *last; 2960 2961 /* If there's only the packet-header and we can pad there, use it. */ 2962 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 2963 M_TRAILINGSPACE(m) >= padlen) { 2964 last = m; 2965 } else { 2966 /* 2967 * Walk packet chain to find last mbuf. We will either 2968 * pad there, or append a new mbuf and pad it. 2969 */ 2970 for (last = m; last->m_next != NULL; last = last->m_next); 2971 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 2972 /* Allocate new empty mbuf, pad it. Compact later. */ 2973 struct mbuf *n; 2974 2975 MGET(n, M_DONTWAIT, MT_DATA); 2976 if (n == NULL) 2977 return (ENOBUFS); 2978 n->m_len = 0; 2979 last->m_next = n; 2980 last = n; 2981 } 2982 } 2983 2984 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 2985 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 2986 last->m_len += padlen; 2987 m->m_pkthdr.len += padlen; 2988 2989 return (0); 2990 } 2991 2992 /* 2993 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2994 * pointers to descriptors. 2995 */ 2996 static int 2997 bge_encap(sc, m_head, txidx) 2998 struct bge_softc *sc; 2999 struct mbuf *m_head; 3000 uint32_t *txidx; 3001 { 3002 bus_dma_segment_t segs[BGE_NSEG_NEW]; 3003 bus_dmamap_t map; 3004 struct bge_tx_bd *d = NULL; 3005 struct m_tag *mtag; 3006 uint32_t idx = *txidx; 3007 uint16_t csum_flags = 0; 3008 int nsegs, i, error; 3009 3010 if (m_head->m_pkthdr.csum_flags) { 3011 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3012 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3013 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 3014 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3015 if (m_head->m_pkthdr.len < ETHER_MIN_NOPAD && 3016 bge_cksum_pad(m_head) != 0) 3017 return (ENOBUFS); 3018 } 3019 if (m_head->m_flags & M_LASTFRAG) 3020 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3021 else if (m_head->m_flags & M_FRAG) 3022 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3023 } 3024 3025 mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head); 3026 3027 map = sc->bge_cdata.bge_tx_dmamap[idx]; 3028 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, 3029 m_head, segs, &nsegs, BUS_DMA_NOWAIT); 3030 if (error) { 3031 if (error == EFBIG) { 3032 struct mbuf *m0; 3033 3034 m0 = m_defrag(m_head, M_DONTWAIT); 3035 if (m0 == NULL) 3036 return (ENOBUFS); 3037 m_head = m0; 3038 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, 3039 map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); 3040 } 3041 if (error) 3042 return (error); 3043 } 3044 3045 /* 3046 * Sanity check: avoid coming within 16 descriptors 3047 * of the end of the ring. 3048 */ 3049 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 3050 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map); 3051 return (ENOBUFS); 3052 } 3053 3054 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE); 3055 3056 for (i = 0; ; i++) { 3057 d = &sc->bge_ldata.bge_tx_ring[idx]; 3058 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3059 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3060 d->bge_len = segs[i].ds_len; 3061 d->bge_flags = csum_flags; 3062 if (i == nsegs - 1) 3063 break; 3064 BGE_INC(idx, BGE_TX_RING_CNT); 3065 } 3066 3067 /* Mark the last segment as end of packet... */ 3068 d->bge_flags |= BGE_TXBDFLAG_END; 3069 /* ... and put VLAN tag into first segment. */ 3070 d = &sc->bge_ldata.bge_tx_ring[*txidx]; 3071 if (mtag != NULL) { 3072 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3073 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3074 } else 3075 d->bge_vlan_tag = 0; 3076 3077 /* 3078 * Insure that the map for this transmission 3079 * is placed at the array index of the last descriptor 3080 * in this chain. 3081 */ 3082 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 3083 sc->bge_cdata.bge_tx_dmamap[idx] = map; 3084 sc->bge_cdata.bge_tx_chain[idx] = m_head; 3085 sc->bge_txcnt += nsegs; 3086 3087 BGE_INC(idx, BGE_TX_RING_CNT); 3088 *txidx = idx; 3089 3090 return (0); 3091 } 3092 3093 /* 3094 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3095 * to the mbuf data regions directly in the transmit descriptors. 3096 */ 3097 static void 3098 bge_start_locked(ifp) 3099 struct ifnet *ifp; 3100 { 3101 struct bge_softc *sc; 3102 struct mbuf *m_head = NULL; 3103 uint32_t prodidx; 3104 int count = 0; 3105 3106 sc = ifp->if_softc; 3107 3108 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3109 return; 3110 3111 prodidx = sc->bge_tx_prodidx; 3112 3113 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3114 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 3115 if (m_head == NULL) 3116 break; 3117 3118 /* 3119 * XXX 3120 * The code inside the if() block is never reached since we 3121 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3122 * requests to checksum TCP/UDP in a fragmented packet. 3123 * 3124 * XXX 3125 * safety overkill. If this is a fragmented packet chain 3126 * with delayed TCP/UDP checksums, then only encapsulate 3127 * it if we have enough descriptors to handle the entire 3128 * chain at once. 3129 * (paranoia -- may not actually be needed) 3130 */ 3131 if (m_head->m_flags & M_FIRSTFRAG && 3132 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3133 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3134 m_head->m_pkthdr.csum_data + 16) { 3135 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3136 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3137 break; 3138 } 3139 } 3140 3141 /* 3142 * Pack the data into the transmit ring. If we 3143 * don't have room, set the OACTIVE flag and wait 3144 * for the NIC to drain the ring. 3145 */ 3146 if (bge_encap(sc, m_head, &prodidx)) { 3147 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3148 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3149 break; 3150 } 3151 ++count; 3152 3153 /* 3154 * If there's a BPF listener, bounce a copy of this frame 3155 * to him. 3156 */ 3157 BPF_MTAP(ifp, m_head); 3158 } 3159 3160 if (count == 0) { 3161 /* no packets were dequeued */ 3162 return; 3163 } 3164 3165 /* Transmit */ 3166 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3167 /* 5700 b2 errata */ 3168 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 3169 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3170 3171 sc->bge_tx_prodidx = prodidx; 3172 3173 /* 3174 * Set a timeout in case the chip goes out to lunch. 3175 */ 3176 ifp->if_timer = 5; 3177 3178 return; 3179 } 3180 3181 /* 3182 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3183 * to the mbuf data regions directly in the transmit descriptors. 3184 */ 3185 static void 3186 bge_start(ifp) 3187 struct ifnet *ifp; 3188 { 3189 struct bge_softc *sc; 3190 3191 sc = ifp->if_softc; 3192 BGE_LOCK(sc); 3193 bge_start_locked(ifp); 3194 BGE_UNLOCK(sc); 3195 } 3196 3197 static void 3198 bge_init_locked(sc) 3199 struct bge_softc *sc; 3200 { 3201 struct ifnet *ifp; 3202 u_int16_t *m; 3203 3204 BGE_LOCK_ASSERT(sc); 3205 3206 ifp = sc->bge_ifp; 3207 3208 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3209 return; 3210 3211 /* Cancel pending I/O and flush buffers. */ 3212 bge_stop(sc); 3213 bge_reset(sc); 3214 bge_chipinit(sc); 3215 3216 /* 3217 * Init the various state machines, ring 3218 * control blocks and firmware. 3219 */ 3220 if (bge_blockinit(sc)) { 3221 device_printf(sc->bge_dev, "initialization failure\n"); 3222 return; 3223 } 3224 3225 ifp = sc->bge_ifp; 3226 3227 /* Specify MTU. */ 3228 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3229 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3230 3231 /* Load our MAC address. */ 3232 m = (u_int16_t *)IF_LLADDR(sc->bge_ifp); 3233 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3234 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3235 3236 /* Enable or disable promiscuous mode as needed. */ 3237 if (ifp->if_flags & IFF_PROMISC) { 3238 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3239 } else { 3240 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3241 } 3242 3243 /* Program multicast filter. */ 3244 bge_setmulti(sc); 3245 3246 /* Init RX ring. */ 3247 bge_init_rx_ring_std(sc); 3248 3249 /* 3250 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 3251 * memory to insure that the chip has in fact read the first 3252 * entry of the ring. 3253 */ 3254 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 3255 u_int32_t v, i; 3256 for (i = 0; i < 10; i++) { 3257 DELAY(20); 3258 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 3259 if (v == (MCLBYTES - ETHER_ALIGN)) 3260 break; 3261 } 3262 if (i == 10) 3263 device_printf (sc->bge_dev, 3264 "5705 A0 chip failed to load RX ring\n"); 3265 } 3266 3267 /* Init jumbo RX ring. */ 3268 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3269 bge_init_rx_ring_jumbo(sc); 3270 3271 /* Init our RX return ring index */ 3272 sc->bge_rx_saved_considx = 0; 3273 3274 /* Init TX ring. */ 3275 bge_init_tx_ring(sc); 3276 3277 /* Turn on transmitter */ 3278 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3279 3280 /* Turn on receiver */ 3281 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3282 3283 /* Tell firmware we're alive. */ 3284 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3285 3286 #ifdef DEVICE_POLLING 3287 /* Disable interrupts if we are polling. */ 3288 if (ifp->if_capenable & IFCAP_POLLING) { 3289 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3290 BGE_PCIMISCCTL_MASK_PCI_INTR); 3291 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3292 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 3293 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 3294 } else 3295 #endif 3296 3297 /* Enable host interrupts. */ 3298 { 3299 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3300 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3301 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3302 } 3303 3304 bge_ifmedia_upd(ifp); 3305 3306 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3307 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3308 3309 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3310 } 3311 3312 static void 3313 bge_init(xsc) 3314 void *xsc; 3315 { 3316 struct bge_softc *sc = xsc; 3317 3318 BGE_LOCK(sc); 3319 bge_init_locked(sc); 3320 BGE_UNLOCK(sc); 3321 3322 return; 3323 } 3324 3325 /* 3326 * Set media options. 3327 */ 3328 static int 3329 bge_ifmedia_upd(ifp) 3330 struct ifnet *ifp; 3331 { 3332 struct bge_softc *sc; 3333 struct mii_data *mii; 3334 struct ifmedia *ifm; 3335 3336 sc = ifp->if_softc; 3337 ifm = &sc->bge_ifmedia; 3338 3339 /* If this is a 1000baseX NIC, enable the TBI port. */ 3340 if (sc->bge_tbi) { 3341 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3342 return(EINVAL); 3343 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3344 case IFM_AUTO: 3345 /* 3346 * The BCM5704 ASIC appears to have a special 3347 * mechanism for programming the autoneg 3348 * advertisement registers in TBI mode. 3349 */ 3350 if (bge_fake_autoneg == 0 && 3351 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3352 uint32_t sgdig; 3353 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3354 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3355 sgdig |= BGE_SGDIGCFG_AUTO| 3356 BGE_SGDIGCFG_PAUSE_CAP| 3357 BGE_SGDIGCFG_ASYM_PAUSE; 3358 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3359 sgdig|BGE_SGDIGCFG_SEND); 3360 DELAY(5); 3361 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3362 } 3363 break; 3364 case IFM_1000_SX: 3365 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3366 BGE_CLRBIT(sc, BGE_MAC_MODE, 3367 BGE_MACMODE_HALF_DUPLEX); 3368 } else { 3369 BGE_SETBIT(sc, BGE_MAC_MODE, 3370 BGE_MACMODE_HALF_DUPLEX); 3371 } 3372 break; 3373 default: 3374 return(EINVAL); 3375 } 3376 return(0); 3377 } 3378 3379 sc->bge_link_evt++; 3380 mii = device_get_softc(sc->bge_miibus); 3381 if (mii->mii_instance) { 3382 struct mii_softc *miisc; 3383 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 3384 miisc = LIST_NEXT(miisc, mii_list)) 3385 mii_phy_reset(miisc); 3386 } 3387 mii_mediachg(mii); 3388 3389 return(0); 3390 } 3391 3392 /* 3393 * Report current media status. 3394 */ 3395 static void 3396 bge_ifmedia_sts(ifp, ifmr) 3397 struct ifnet *ifp; 3398 struct ifmediareq *ifmr; 3399 { 3400 struct bge_softc *sc; 3401 struct mii_data *mii; 3402 3403 sc = ifp->if_softc; 3404 3405 if (sc->bge_tbi) { 3406 ifmr->ifm_status = IFM_AVALID; 3407 ifmr->ifm_active = IFM_ETHER; 3408 if (CSR_READ_4(sc, BGE_MAC_STS) & 3409 BGE_MACSTAT_TBI_PCS_SYNCHED) 3410 ifmr->ifm_status |= IFM_ACTIVE; 3411 ifmr->ifm_active |= IFM_1000_SX; 3412 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3413 ifmr->ifm_active |= IFM_HDX; 3414 else 3415 ifmr->ifm_active |= IFM_FDX; 3416 return; 3417 } 3418 3419 mii = device_get_softc(sc->bge_miibus); 3420 mii_pollstat(mii); 3421 ifmr->ifm_active = mii->mii_media_active; 3422 ifmr->ifm_status = mii->mii_media_status; 3423 3424 return; 3425 } 3426 3427 static int 3428 bge_ioctl(ifp, command, data) 3429 struct ifnet *ifp; 3430 u_long command; 3431 caddr_t data; 3432 { 3433 struct bge_softc *sc = ifp->if_softc; 3434 struct ifreq *ifr = (struct ifreq *) data; 3435 int mask, error = 0; 3436 struct mii_data *mii; 3437 3438 switch(command) { 3439 case SIOCSIFMTU: 3440 /* Disallow jumbo frames on 5705. */ 3441 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 || 3442 sc->bge_asicrev == BGE_ASICREV_BCM5750) && 3443 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU) 3444 error = EINVAL; 3445 else { 3446 ifp->if_mtu = ifr->ifr_mtu; 3447 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3448 bge_init(sc); 3449 } 3450 break; 3451 case SIOCSIFFLAGS: 3452 BGE_LOCK(sc); 3453 if (ifp->if_flags & IFF_UP) { 3454 /* 3455 * If only the state of the PROMISC flag changed, 3456 * then just use the 'set promisc mode' command 3457 * instead of reinitializing the entire NIC. Doing 3458 * a full re-init means reloading the firmware and 3459 * waiting for it to start up, which may take a 3460 * second or two. Similarly for ALLMULTI. 3461 */ 3462 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3463 ifp->if_flags & IFF_PROMISC && 3464 !(sc->bge_if_flags & IFF_PROMISC)) { 3465 BGE_SETBIT(sc, BGE_RX_MODE, 3466 BGE_RXMODE_RX_PROMISC); 3467 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3468 !(ifp->if_flags & IFF_PROMISC) && 3469 sc->bge_if_flags & IFF_PROMISC) { 3470 BGE_CLRBIT(sc, BGE_RX_MODE, 3471 BGE_RXMODE_RX_PROMISC); 3472 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3473 (ifp->if_flags ^ sc->bge_if_flags) & IFF_ALLMULTI) { 3474 bge_setmulti(sc); 3475 } else 3476 bge_init_locked(sc); 3477 } else { 3478 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3479 bge_stop(sc); 3480 } 3481 } 3482 sc->bge_if_flags = ifp->if_flags; 3483 BGE_UNLOCK(sc); 3484 error = 0; 3485 break; 3486 case SIOCADDMULTI: 3487 case SIOCDELMULTI: 3488 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3489 BGE_LOCK(sc); 3490 bge_setmulti(sc); 3491 BGE_UNLOCK(sc); 3492 error = 0; 3493 } 3494 break; 3495 case SIOCSIFMEDIA: 3496 case SIOCGIFMEDIA: 3497 if (sc->bge_tbi) { 3498 error = ifmedia_ioctl(ifp, ifr, 3499 &sc->bge_ifmedia, command); 3500 } else { 3501 mii = device_get_softc(sc->bge_miibus); 3502 error = ifmedia_ioctl(ifp, ifr, 3503 &mii->mii_media, command); 3504 } 3505 break; 3506 case SIOCSIFCAP: 3507 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3508 #ifdef DEVICE_POLLING 3509 if (mask & IFCAP_POLLING) { 3510 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3511 error = ether_poll_register(bge_poll, ifp); 3512 if (error) 3513 return(error); 3514 BGE_LOCK(sc); 3515 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3516 BGE_PCIMISCCTL_MASK_PCI_INTR); 3517 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3518 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 3519 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 3520 ifp->if_capenable |= IFCAP_POLLING; 3521 BGE_UNLOCK(sc); 3522 } else { 3523 error = ether_poll_deregister(ifp); 3524 /* Enable interrupt even in error case */ 3525 BGE_LOCK(sc); 3526 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 3527 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 3528 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 3529 BGE_PCIMISCCTL_MASK_PCI_INTR); 3530 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3531 ifp->if_capenable &= ~IFCAP_POLLING; 3532 BGE_UNLOCK(sc); 3533 } 3534 } 3535 #endif 3536 if (mask & IFCAP_HWCSUM) { 3537 ifp->if_capenable ^= IFCAP_HWCSUM; 3538 if (IFCAP_HWCSUM & ifp->if_capenable && 3539 IFCAP_HWCSUM & ifp->if_capabilities) 3540 ifp->if_hwassist = BGE_CSUM_FEATURES; 3541 else 3542 ifp->if_hwassist = 0; 3543 VLAN_CAPABILITIES(ifp); 3544 } 3545 break; 3546 default: 3547 error = ether_ioctl(ifp, command, data); 3548 break; 3549 } 3550 3551 return(error); 3552 } 3553 3554 static void 3555 bge_watchdog(ifp) 3556 struct ifnet *ifp; 3557 { 3558 struct bge_softc *sc; 3559 3560 sc = ifp->if_softc; 3561 3562 if_printf(ifp, "watchdog timeout -- resetting\n"); 3563 3564 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3565 bge_init(sc); 3566 3567 ifp->if_oerrors++; 3568 3569 return; 3570 } 3571 3572 /* 3573 * Stop the adapter and free any mbufs allocated to the 3574 * RX and TX lists. 3575 */ 3576 static void 3577 bge_stop(sc) 3578 struct bge_softc *sc; 3579 { 3580 struct ifnet *ifp; 3581 struct ifmedia_entry *ifm; 3582 struct mii_data *mii = NULL; 3583 int mtmp, itmp; 3584 3585 BGE_LOCK_ASSERT(sc); 3586 3587 ifp = sc->bge_ifp; 3588 3589 if (!sc->bge_tbi) 3590 mii = device_get_softc(sc->bge_miibus); 3591 3592 callout_stop(&sc->bge_stat_ch); 3593 3594 /* 3595 * Disable all of the receiver blocks 3596 */ 3597 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3598 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3599 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3600 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3601 sc->bge_asicrev != BGE_ASICREV_BCM5750) 3602 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3603 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3604 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3605 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3606 3607 /* 3608 * Disable all of the transmit blocks 3609 */ 3610 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3611 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3612 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3613 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3614 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3615 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3616 sc->bge_asicrev != BGE_ASICREV_BCM5750) 3617 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3618 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3619 3620 /* 3621 * Shut down all of the memory managers and related 3622 * state machines. 3623 */ 3624 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3625 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3626 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3627 sc->bge_asicrev != BGE_ASICREV_BCM5750) 3628 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3629 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3630 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3631 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3632 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 3633 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3634 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3635 } 3636 3637 /* Disable host interrupts. */ 3638 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3639 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3640 3641 /* 3642 * Tell firmware we're shutting down. 3643 */ 3644 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3645 3646 /* Free the RX lists. */ 3647 bge_free_rx_ring_std(sc); 3648 3649 /* Free jumbo RX list. */ 3650 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3651 sc->bge_asicrev != BGE_ASICREV_BCM5750) 3652 bge_free_rx_ring_jumbo(sc); 3653 3654 /* Free TX buffers. */ 3655 bge_free_tx_ring(sc); 3656 3657 /* 3658 * Isolate/power down the PHY, but leave the media selection 3659 * unchanged so that things will be put back to normal when 3660 * we bring the interface back up. 3661 */ 3662 if (!sc->bge_tbi) { 3663 itmp = ifp->if_flags; 3664 ifp->if_flags |= IFF_UP; 3665 /* 3666 * If we are called from bge_detach(), mii is already NULL. 3667 */ 3668 if (mii != NULL) { 3669 ifm = mii->mii_media.ifm_cur; 3670 mtmp = ifm->ifm_media; 3671 ifm->ifm_media = IFM_ETHER|IFM_NONE; 3672 mii_mediachg(mii); 3673 ifm->ifm_media = mtmp; 3674 } 3675 ifp->if_flags = itmp; 3676 } 3677 3678 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3679 3680 /* 3681 * We can't just call bge_link_upd() cause chip is almost stopped so 3682 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may 3683 * lead to hardware deadlock. So we just clearing MAC's link state 3684 * (PHY may still have link UP). 3685 */ 3686 if (bootverbose && sc->bge_link) 3687 if_printf(sc->bge_ifp, "link DOWN\n"); 3688 sc->bge_link = 0; 3689 3690 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3691 } 3692 3693 /* 3694 * Stop all chip I/O so that the kernel's probe routines don't 3695 * get confused by errant DMAs when rebooting. 3696 */ 3697 static void 3698 bge_shutdown(dev) 3699 device_t dev; 3700 { 3701 struct bge_softc *sc; 3702 3703 sc = device_get_softc(dev); 3704 3705 BGE_LOCK(sc); 3706 bge_stop(sc); 3707 bge_reset(sc); 3708 BGE_UNLOCK(sc); 3709 3710 return; 3711 } 3712 3713 static int 3714 bge_suspend(device_t dev) 3715 { 3716 struct bge_softc *sc; 3717 3718 sc = device_get_softc(dev); 3719 BGE_LOCK(sc); 3720 bge_stop(sc); 3721 BGE_UNLOCK(sc); 3722 3723 return (0); 3724 } 3725 3726 static int 3727 bge_resume(device_t dev) 3728 { 3729 struct bge_softc *sc; 3730 struct ifnet *ifp; 3731 3732 sc = device_get_softc(dev); 3733 BGE_LOCK(sc); 3734 ifp = sc->bge_ifp; 3735 if (ifp->if_flags & IFF_UP) { 3736 bge_init_locked(sc); 3737 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3738 bge_start_locked(ifp); 3739 } 3740 BGE_UNLOCK(sc); 3741 3742 return (0); 3743 } 3744 3745 static void 3746 bge_link_upd(sc) 3747 struct bge_softc *sc; 3748 { 3749 struct mii_data *mii; 3750 uint32_t link, status; 3751 3752 BGE_LOCK_ASSERT(sc); 3753 3754 /* Clear 'pending link event' flag */ 3755 sc->bge_link_evt = 0; 3756 3757 /* 3758 * Process link state changes. 3759 * Grrr. The link status word in the status block does 3760 * not work correctly on the BCM5700 rev AX and BX chips, 3761 * according to all available information. Hence, we have 3762 * to enable MII interrupts in order to properly obtain 3763 * async link changes. Unfortunately, this also means that 3764 * we have to read the MAC status register to detect link 3765 * changes, thereby adding an additional register access to 3766 * the interrupt handler. 3767 * 3768 * XXX: perhaps link state detection procedure used for 3769 * BGE_CHIPID_BCM5700_B1 can be used for others BCM5700 revisions. 3770 */ 3771 3772 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 3773 sc->bge_chipid != BGE_CHIPID_BCM5700_B1) { 3774 status = CSR_READ_4(sc, BGE_MAC_STS); 3775 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3776 callout_stop(&sc->bge_stat_ch); 3777 bge_tick_locked(sc); 3778 3779 mii = device_get_softc(sc->bge_miibus); 3780 if (!sc->bge_link && 3781 mii->mii_media_status & IFM_ACTIVE && 3782 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3783 sc->bge_link++; 3784 if (bootverbose) 3785 if_printf(sc->bge_ifp, "link UP\n"); 3786 } else if (sc->bge_link && 3787 (!(mii->mii_media_status & IFM_ACTIVE) || 3788 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 3789 sc->bge_link = 0; 3790 if (bootverbose) 3791 if_printf(sc->bge_ifp, "link DOWN\n"); 3792 } 3793 3794 /* Clear the interrupt */ 3795 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3796 BGE_EVTENB_MI_INTERRUPT); 3797 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 3798 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 3799 BRGPHY_INTRS); 3800 } 3801 return; 3802 } 3803 3804 if (sc->bge_tbi) { 3805 status = CSR_READ_4(sc, BGE_MAC_STS); 3806 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 3807 if (!sc->bge_link) { 3808 sc->bge_link++; 3809 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 3810 BGE_CLRBIT(sc, BGE_MAC_MODE, 3811 BGE_MACMODE_TBI_SEND_CFGS); 3812 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3813 if (bootverbose) 3814 if_printf(sc->bge_ifp, "link UP\n"); 3815 if_link_state_change(sc->bge_ifp, LINK_STATE_UP); 3816 } 3817 } else if (sc->bge_link) { 3818 sc->bge_link = 0; 3819 if (bootverbose) 3820 if_printf(sc->bge_ifp, "link DOWN\n"); 3821 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 3822 } 3823 /* Discard link events for MII/GMII cards if MI auto-polling disabled */ 3824 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 3825 /* 3826 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 3827 * in status word always set. Workaround this bug by reading 3828 * PHY link status directly. 3829 */ 3830 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 3831 3832 if (link != sc->bge_link || 3833 sc->bge_asicrev == BGE_ASICREV_BCM5700) { 3834 callout_stop(&sc->bge_stat_ch); 3835 bge_tick_locked(sc); 3836 3837 mii = device_get_softc(sc->bge_miibus); 3838 if (!sc->bge_link && 3839 mii->mii_media_status & IFM_ACTIVE && 3840 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3841 sc->bge_link++; 3842 if (bootverbose) 3843 if_printf(sc->bge_ifp, "link UP\n"); 3844 } else if (sc->bge_link && 3845 (!(mii->mii_media_status & IFM_ACTIVE) || 3846 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 3847 sc->bge_link = 0; 3848 if (bootverbose) 3849 if_printf(sc->bge_ifp, "link DOWN\n"); 3850 } 3851 } 3852 } 3853 3854 /* Clear the attention */ 3855 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3856 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3857 BGE_MACSTAT_LINK_CHANGED); 3858 } 3859