1 /*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/endian.h> 71 #include <sys/systm.h> 72 #include <sys/sockio.h> 73 #include <sys/mbuf.h> 74 #include <sys/malloc.h> 75 #include <sys/kernel.h> 76 #include <sys/module.h> 77 #include <sys/socket.h> 78 #include <sys/queue.h> 79 80 #include <net/if.h> 81 #include <net/if_arp.h> 82 #include <net/ethernet.h> 83 #include <net/if_dl.h> 84 #include <net/if_media.h> 85 86 #include <net/bpf.h> 87 88 #include <net/if_types.h> 89 #include <net/if_vlan_var.h> 90 91 #include <netinet/in_systm.h> 92 #include <netinet/in.h> 93 #include <netinet/ip.h> 94 95 #include <machine/clock.h> /* for DELAY */ 96 #include <machine/bus_memio.h> 97 #include <machine/bus.h> 98 #include <machine/resource.h> 99 #include <sys/bus.h> 100 #include <sys/rman.h> 101 102 #include <dev/mii/mii.h> 103 #include <dev/mii/miivar.h> 104 #include "miidevs.h" 105 #include <dev/mii/brgphyreg.h> 106 107 #include <dev/pci/pcireg.h> 108 #include <dev/pci/pcivar.h> 109 110 #include <dev/bge/if_bgereg.h> 111 112 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 113 114 MODULE_DEPEND(bge, pci, 1, 1, 1); 115 MODULE_DEPEND(bge, ether, 1, 1, 1); 116 MODULE_DEPEND(bge, miibus, 1, 1, 1); 117 118 /* "controller miibus0" required. See GENERIC if you get errors here. */ 119 #include "miibus_if.h" 120 121 /* 122 * Various supported device vendors/types and their names. Note: the 123 * spec seems to indicate that the hardware still has Alteon's vendor 124 * ID burned into it, though it will always be overriden by the vendor 125 * ID in the EEPROM. Just to be safe, we cover all possibilities. 126 */ 127 #define BGE_DEVDESC_MAX 64 /* Maximum device description length */ 128 129 static struct bge_type bge_devs[] = { 130 { ALT_VENDORID, ALT_DEVICEID_BCM5700, 131 "Broadcom BCM5700 Gigabit Ethernet" }, 132 { ALT_VENDORID, ALT_DEVICEID_BCM5701, 133 "Broadcom BCM5701 Gigabit Ethernet" }, 134 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700, 135 "Broadcom BCM5700 Gigabit Ethernet" }, 136 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701, 137 "Broadcom BCM5701 Gigabit Ethernet" }, 138 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702, 139 "Broadcom BCM5702 Gigabit Ethernet" }, 140 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X, 141 "Broadcom BCM5702X Gigabit Ethernet" }, 142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703, 143 "Broadcom BCM5703 Gigabit Ethernet" }, 144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X, 145 "Broadcom BCM5703X Gigabit Ethernet" }, 146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C, 147 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S, 149 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705, 151 "Broadcom BCM5705 Gigabit Ethernet" }, 152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K, 153 "Broadcom BCM5705K Gigabit Ethernet" }, 154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M, 155 "Broadcom BCM5705M Gigabit Ethernet" }, 156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT, 157 "Broadcom BCM5705M Gigabit Ethernet" }, 158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721, 159 "Broadcom BCM5721 Gigabit Ethernet" }, 160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750, 161 "Broadcom BCM5750 Gigabit Ethernet" }, 162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M, 163 "Broadcom BCM5750M Gigabit Ethernet" }, 164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751, 165 "Broadcom BCM5751 Gigabit Ethernet" }, 166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M, 167 "Broadcom BCM5751M Gigabit Ethernet" }, 168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782, 169 "Broadcom BCM5782 Gigabit Ethernet" }, 170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788, 171 "Broadcom BCM5788 Gigabit Ethernet" }, 172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901, 173 "Broadcom BCM5901 Fast Ethernet" }, 174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2, 175 "Broadcom BCM5901A2 Fast Ethernet" }, 176 { SK_VENDORID, SK_DEVICEID_ALTIMA, 177 "SysKonnect Gigabit Ethernet" }, 178 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000, 179 "Altima AC1000 Gigabit Ethernet" }, 180 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002, 181 "Altima AC1002 Gigabit Ethernet" }, 182 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100, 183 "Altima AC9100 Gigabit Ethernet" }, 184 { 0, 0, NULL } 185 }; 186 187 static int bge_probe (device_t); 188 static int bge_attach (device_t); 189 static int bge_detach (device_t); 190 static void bge_release_resources 191 (struct bge_softc *); 192 static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int); 193 static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, 194 bus_size_t, int); 195 static int bge_dma_alloc (device_t); 196 static void bge_dma_free (struct bge_softc *); 197 198 static void bge_txeof (struct bge_softc *); 199 static void bge_rxeof (struct bge_softc *); 200 201 static void bge_tick_locked (struct bge_softc *); 202 static void bge_tick (void *); 203 static void bge_stats_update (struct bge_softc *); 204 static void bge_stats_update_regs 205 (struct bge_softc *); 206 static int bge_encap (struct bge_softc *, struct mbuf *, 207 u_int32_t *); 208 209 static void bge_intr (void *); 210 static void bge_start_locked (struct ifnet *); 211 static void bge_start (struct ifnet *); 212 static int bge_ioctl (struct ifnet *, u_long, caddr_t); 213 static void bge_init_locked (struct bge_softc *); 214 static void bge_init (void *); 215 static void bge_stop (struct bge_softc *); 216 static void bge_watchdog (struct ifnet *); 217 static void bge_shutdown (device_t); 218 static int bge_ifmedia_upd (struct ifnet *); 219 static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 220 221 static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *); 222 static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int); 223 224 static void bge_setmulti (struct bge_softc *); 225 226 static void bge_handle_events (struct bge_softc *); 227 static int bge_alloc_jumbo_mem (struct bge_softc *); 228 static void bge_free_jumbo_mem (struct bge_softc *); 229 static void *bge_jalloc (struct bge_softc *); 230 static void bge_jfree (void *, void *); 231 static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *); 232 static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *); 233 static int bge_init_rx_ring_std (struct bge_softc *); 234 static void bge_free_rx_ring_std (struct bge_softc *); 235 static int bge_init_rx_ring_jumbo (struct bge_softc *); 236 static void bge_free_rx_ring_jumbo (struct bge_softc *); 237 static void bge_free_tx_ring (struct bge_softc *); 238 static int bge_init_tx_ring (struct bge_softc *); 239 240 static int bge_chipinit (struct bge_softc *); 241 static int bge_blockinit (struct bge_softc *); 242 243 #ifdef notdef 244 static u_int8_t bge_vpd_readbyte(struct bge_softc *, int); 245 static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int); 246 static void bge_vpd_read (struct bge_softc *); 247 #endif 248 249 static u_int32_t bge_readmem_ind 250 (struct bge_softc *, int); 251 static void bge_writemem_ind (struct bge_softc *, int, int); 252 #ifdef notdef 253 static u_int32_t bge_readreg_ind 254 (struct bge_softc *, int); 255 #endif 256 static void bge_writereg_ind (struct bge_softc *, int, int); 257 258 static int bge_miibus_readreg (device_t, int, int); 259 static int bge_miibus_writereg (device_t, int, int, int); 260 static void bge_miibus_statchg (device_t); 261 262 static void bge_reset (struct bge_softc *); 263 264 static device_method_t bge_methods[] = { 265 /* Device interface */ 266 DEVMETHOD(device_probe, bge_probe), 267 DEVMETHOD(device_attach, bge_attach), 268 DEVMETHOD(device_detach, bge_detach), 269 DEVMETHOD(device_shutdown, bge_shutdown), 270 271 /* bus interface */ 272 DEVMETHOD(bus_print_child, bus_generic_print_child), 273 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 274 275 /* MII interface */ 276 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 277 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 278 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 279 280 { 0, 0 } 281 }; 282 283 static driver_t bge_driver = { 284 "bge", 285 bge_methods, 286 sizeof(struct bge_softc) 287 }; 288 289 static devclass_t bge_devclass; 290 291 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 292 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 293 294 static u_int32_t 295 bge_readmem_ind(sc, off) 296 struct bge_softc *sc; 297 int off; 298 { 299 device_t dev; 300 301 dev = sc->bge_dev; 302 303 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 304 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 305 } 306 307 static void 308 bge_writemem_ind(sc, off, val) 309 struct bge_softc *sc; 310 int off, val; 311 { 312 device_t dev; 313 314 dev = sc->bge_dev; 315 316 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 317 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 318 319 return; 320 } 321 322 #ifdef notdef 323 static u_int32_t 324 bge_readreg_ind(sc, off) 325 struct bge_softc *sc; 326 int off; 327 { 328 device_t dev; 329 330 dev = sc->bge_dev; 331 332 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 333 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 334 } 335 #endif 336 337 static void 338 bge_writereg_ind(sc, off, val) 339 struct bge_softc *sc; 340 int off, val; 341 { 342 device_t dev; 343 344 dev = sc->bge_dev; 345 346 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 347 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 348 349 return; 350 } 351 352 /* 353 * Map a single buffer address. 354 */ 355 356 static void 357 bge_dma_map_addr(arg, segs, nseg, error) 358 void *arg; 359 bus_dma_segment_t *segs; 360 int nseg; 361 int error; 362 { 363 struct bge_dmamap_arg *ctx; 364 365 if (error) 366 return; 367 368 ctx = arg; 369 370 if (nseg > ctx->bge_maxsegs) { 371 ctx->bge_maxsegs = 0; 372 return; 373 } 374 375 ctx->bge_busaddr = segs->ds_addr; 376 377 return; 378 } 379 380 /* 381 * Map an mbuf chain into an TX ring. 382 */ 383 384 static void 385 bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) 386 void *arg; 387 bus_dma_segment_t *segs; 388 int nseg; 389 bus_size_t mapsize; 390 int error; 391 { 392 struct bge_dmamap_arg *ctx; 393 struct bge_tx_bd *d = NULL; 394 int i = 0, idx; 395 396 if (error) 397 return; 398 399 ctx = arg; 400 401 /* Signal error to caller if there's too many segments */ 402 if (nseg > ctx->bge_maxsegs) { 403 ctx->bge_maxsegs = 0; 404 return; 405 } 406 407 idx = ctx->bge_idx; 408 while(1) { 409 d = &ctx->bge_ring[idx]; 410 d->bge_addr.bge_addr_lo = 411 htole32(BGE_ADDR_LO(segs[i].ds_addr)); 412 d->bge_addr.bge_addr_hi = 413 htole32(BGE_ADDR_HI(segs[i].ds_addr)); 414 d->bge_len = htole16(segs[i].ds_len); 415 d->bge_flags = htole16(ctx->bge_flags); 416 i++; 417 if (i == nseg) 418 break; 419 BGE_INC(idx, BGE_TX_RING_CNT); 420 } 421 422 d->bge_flags |= htole16(BGE_TXBDFLAG_END); 423 ctx->bge_maxsegs = nseg; 424 ctx->bge_idx = idx; 425 426 return; 427 } 428 429 430 #ifdef notdef 431 static u_int8_t 432 bge_vpd_readbyte(sc, addr) 433 struct bge_softc *sc; 434 int addr; 435 { 436 int i; 437 device_t dev; 438 u_int32_t val; 439 440 dev = sc->bge_dev; 441 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); 442 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 443 DELAY(10); 444 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) 445 break; 446 } 447 448 if (i == BGE_TIMEOUT) { 449 printf("bge%d: VPD read timed out\n", sc->bge_unit); 450 return(0); 451 } 452 453 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); 454 455 return((val >> ((addr % 4) * 8)) & 0xFF); 456 } 457 458 static void 459 bge_vpd_read_res(sc, res, addr) 460 struct bge_softc *sc; 461 struct vpd_res *res; 462 int addr; 463 { 464 int i; 465 u_int8_t *ptr; 466 467 ptr = (u_int8_t *)res; 468 for (i = 0; i < sizeof(struct vpd_res); i++) 469 ptr[i] = bge_vpd_readbyte(sc, i + addr); 470 471 return; 472 } 473 474 static void 475 bge_vpd_read(sc) 476 struct bge_softc *sc; 477 { 478 int pos = 0, i; 479 struct vpd_res res; 480 481 if (sc->bge_vpd_prodname != NULL) 482 free(sc->bge_vpd_prodname, M_DEVBUF); 483 if (sc->bge_vpd_readonly != NULL) 484 free(sc->bge_vpd_readonly, M_DEVBUF); 485 sc->bge_vpd_prodname = NULL; 486 sc->bge_vpd_readonly = NULL; 487 488 bge_vpd_read_res(sc, &res, pos); 489 490 if (res.vr_id != VPD_RES_ID) { 491 printf("bge%d: bad VPD resource id: expected %x got %x\n", 492 sc->bge_unit, VPD_RES_ID, res.vr_id); 493 return; 494 } 495 496 pos += sizeof(res); 497 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 498 for (i = 0; i < res.vr_len; i++) 499 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); 500 sc->bge_vpd_prodname[i] = '\0'; 501 pos += i; 502 503 bge_vpd_read_res(sc, &res, pos); 504 505 if (res.vr_id != VPD_RES_READ) { 506 printf("bge%d: bad VPD resource id: expected %x got %x\n", 507 sc->bge_unit, VPD_RES_READ, res.vr_id); 508 return; 509 } 510 511 pos += sizeof(res); 512 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 513 for (i = 0; i < res.vr_len + 1; i++) 514 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); 515 516 return; 517 } 518 #endif 519 520 /* 521 * Read a byte of data stored in the EEPROM at address 'addr.' The 522 * BCM570x supports both the traditional bitbang interface and an 523 * auto access interface for reading the EEPROM. We use the auto 524 * access method. 525 */ 526 static u_int8_t 527 bge_eeprom_getbyte(sc, addr, dest) 528 struct bge_softc *sc; 529 int addr; 530 u_int8_t *dest; 531 { 532 int i; 533 u_int32_t byte = 0; 534 535 /* 536 * Enable use of auto EEPROM access so we can avoid 537 * having to use the bitbang method. 538 */ 539 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 540 541 /* Reset the EEPROM, load the clock period. */ 542 CSR_WRITE_4(sc, BGE_EE_ADDR, 543 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 544 DELAY(20); 545 546 /* Issue the read EEPROM command. */ 547 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 548 549 /* Wait for completion */ 550 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 551 DELAY(10); 552 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 553 break; 554 } 555 556 if (i == BGE_TIMEOUT) { 557 printf("bge%d: eeprom read timed out\n", sc->bge_unit); 558 return(0); 559 } 560 561 /* Get result. */ 562 byte = CSR_READ_4(sc, BGE_EE_DATA); 563 564 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 565 566 return(0); 567 } 568 569 /* 570 * Read a sequence of bytes from the EEPROM. 571 */ 572 static int 573 bge_read_eeprom(sc, dest, off, cnt) 574 struct bge_softc *sc; 575 caddr_t dest; 576 int off; 577 int cnt; 578 { 579 int err = 0, i; 580 u_int8_t byte = 0; 581 582 for (i = 0; i < cnt; i++) { 583 err = bge_eeprom_getbyte(sc, off + i, &byte); 584 if (err) 585 break; 586 *(dest + i) = byte; 587 } 588 589 return(err ? 1 : 0); 590 } 591 592 static int 593 bge_miibus_readreg(dev, phy, reg) 594 device_t dev; 595 int phy, reg; 596 { 597 struct bge_softc *sc; 598 u_int32_t val, autopoll; 599 int i; 600 601 sc = device_get_softc(dev); 602 603 /* 604 * Broadcom's own driver always assumes the internal 605 * PHY is at GMII address 1. On some chips, the PHY responds 606 * to accesses at all addresses, which could cause us to 607 * bogusly attach the PHY 32 times at probe type. Always 608 * restricting the lookup to address 1 is simpler than 609 * trying to figure out which chips revisions should be 610 * special-cased. 611 */ 612 if (phy != 1) 613 return(0); 614 615 /* Reading with autopolling on may trigger PCI errors */ 616 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 617 if (autopoll & BGE_MIMODE_AUTOPOLL) { 618 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 619 DELAY(40); 620 } 621 622 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 623 BGE_MIPHY(phy)|BGE_MIREG(reg)); 624 625 for (i = 0; i < BGE_TIMEOUT; i++) { 626 val = CSR_READ_4(sc, BGE_MI_COMM); 627 if (!(val & BGE_MICOMM_BUSY)) 628 break; 629 } 630 631 if (i == BGE_TIMEOUT) { 632 printf("bge%d: PHY read timed out\n", sc->bge_unit); 633 val = 0; 634 goto done; 635 } 636 637 val = CSR_READ_4(sc, BGE_MI_COMM); 638 639 done: 640 if (autopoll & BGE_MIMODE_AUTOPOLL) { 641 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 642 DELAY(40); 643 } 644 645 if (val & BGE_MICOMM_READFAIL) 646 return(0); 647 648 return(val & 0xFFFF); 649 } 650 651 static int 652 bge_miibus_writereg(dev, phy, reg, val) 653 device_t dev; 654 int phy, reg, val; 655 { 656 struct bge_softc *sc; 657 u_int32_t autopoll; 658 int i; 659 660 sc = device_get_softc(dev); 661 662 /* Reading with autopolling on may trigger PCI errors */ 663 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 664 if (autopoll & BGE_MIMODE_AUTOPOLL) { 665 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 666 DELAY(40); 667 } 668 669 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 670 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 671 672 for (i = 0; i < BGE_TIMEOUT; i++) { 673 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 674 break; 675 } 676 677 if (autopoll & BGE_MIMODE_AUTOPOLL) { 678 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 679 DELAY(40); 680 } 681 682 if (i == BGE_TIMEOUT) { 683 printf("bge%d: PHY read timed out\n", sc->bge_unit); 684 return(0); 685 } 686 687 return(0); 688 } 689 690 static void 691 bge_miibus_statchg(dev) 692 device_t dev; 693 { 694 struct bge_softc *sc; 695 struct mii_data *mii; 696 697 sc = device_get_softc(dev); 698 mii = device_get_softc(sc->bge_miibus); 699 700 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 701 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 702 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 703 } else { 704 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 705 } 706 707 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 708 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 709 } else { 710 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 711 } 712 713 return; 714 } 715 716 /* 717 * Handle events that have triggered interrupts. 718 */ 719 static void 720 bge_handle_events(sc) 721 struct bge_softc *sc; 722 { 723 724 return; 725 } 726 727 /* 728 * Memory management for jumbo frames. 729 */ 730 731 static int 732 bge_alloc_jumbo_mem(sc) 733 struct bge_softc *sc; 734 { 735 caddr_t ptr; 736 register int i, error; 737 struct bge_jpool_entry *entry; 738 739 /* Create tag for jumbo buffer block */ 740 741 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 742 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 743 NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL, 744 &sc->bge_cdata.bge_jumbo_tag); 745 746 if (error) { 747 printf("bge%d: could not allocate jumbo dma tag\n", 748 sc->bge_unit); 749 return (ENOMEM); 750 } 751 752 /* Allocate DMA'able memory for jumbo buffer block */ 753 754 error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag, 755 (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT, 756 &sc->bge_cdata.bge_jumbo_map); 757 758 if (error) 759 return (ENOMEM); 760 761 SLIST_INIT(&sc->bge_jfree_listhead); 762 SLIST_INIT(&sc->bge_jinuse_listhead); 763 764 /* 765 * Now divide it up into 9K pieces and save the addresses 766 * in an array. 767 */ 768 ptr = sc->bge_ldata.bge_jumbo_buf; 769 for (i = 0; i < BGE_JSLOTS; i++) { 770 sc->bge_cdata.bge_jslots[i] = ptr; 771 ptr += BGE_JLEN; 772 entry = malloc(sizeof(struct bge_jpool_entry), 773 M_DEVBUF, M_NOWAIT); 774 if (entry == NULL) { 775 bge_free_jumbo_mem(sc); 776 sc->bge_ldata.bge_jumbo_buf = NULL; 777 printf("bge%d: no memory for jumbo " 778 "buffer queue!\n", sc->bge_unit); 779 return(ENOBUFS); 780 } 781 entry->slot = i; 782 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 783 entry, jpool_entries); 784 } 785 786 return(0); 787 } 788 789 static void 790 bge_free_jumbo_mem(sc) 791 struct bge_softc *sc; 792 { 793 int i; 794 struct bge_jpool_entry *entry; 795 796 for (i = 0; i < BGE_JSLOTS; i++) { 797 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 798 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 799 free(entry, M_DEVBUF); 800 } 801 802 /* Destroy jumbo buffer block */ 803 804 if (sc->bge_ldata.bge_rx_jumbo_ring) 805 bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag, 806 sc->bge_ldata.bge_jumbo_buf, 807 sc->bge_cdata.bge_jumbo_map); 808 809 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 810 bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag, 811 sc->bge_cdata.bge_jumbo_map); 812 813 if (sc->bge_cdata.bge_jumbo_tag) 814 bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag); 815 816 return; 817 } 818 819 /* 820 * Allocate a jumbo buffer. 821 */ 822 static void * 823 bge_jalloc(sc) 824 struct bge_softc *sc; 825 { 826 struct bge_jpool_entry *entry; 827 828 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 829 830 if (entry == NULL) { 831 printf("bge%d: no free jumbo buffers\n", sc->bge_unit); 832 return(NULL); 833 } 834 835 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 836 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 837 return(sc->bge_cdata.bge_jslots[entry->slot]); 838 } 839 840 /* 841 * Release a jumbo buffer. 842 */ 843 static void 844 bge_jfree(buf, args) 845 void *buf; 846 void *args; 847 { 848 struct bge_jpool_entry *entry; 849 struct bge_softc *sc; 850 int i; 851 852 /* Extract the softc struct pointer. */ 853 sc = (struct bge_softc *)args; 854 855 if (sc == NULL) 856 panic("bge_jfree: can't find softc pointer!"); 857 858 /* calculate the slot this buffer belongs to */ 859 860 i = ((vm_offset_t)buf 861 - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN; 862 863 if ((i < 0) || (i >= BGE_JSLOTS)) 864 panic("bge_jfree: asked to free buffer that we don't manage!"); 865 866 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 867 if (entry == NULL) 868 panic("bge_jfree: buffer not in use!"); 869 entry->slot = i; 870 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 871 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 872 873 return; 874 } 875 876 877 /* 878 * Intialize a standard receive ring descriptor. 879 */ 880 static int 881 bge_newbuf_std(sc, i, m) 882 struct bge_softc *sc; 883 int i; 884 struct mbuf *m; 885 { 886 struct mbuf *m_new = NULL; 887 struct bge_rx_bd *r; 888 struct bge_dmamap_arg ctx; 889 int error; 890 891 if (m == NULL) { 892 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 893 if (m_new == NULL) { 894 return(ENOBUFS); 895 } 896 897 MCLGET(m_new, M_DONTWAIT); 898 if (!(m_new->m_flags & M_EXT)) { 899 m_freem(m_new); 900 return(ENOBUFS); 901 } 902 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 903 } else { 904 m_new = m; 905 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 906 m_new->m_data = m_new->m_ext.ext_buf; 907 } 908 909 if (!sc->bge_rx_alignment_bug) 910 m_adj(m_new, ETHER_ALIGN); 911 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 912 r = &sc->bge_ldata.bge_rx_std_ring[i]; 913 ctx.bge_maxsegs = 1; 914 ctx.sc = sc; 915 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 916 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 917 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 918 if (error || ctx.bge_maxsegs == 0) { 919 if (m == NULL) 920 m_freem(m_new); 921 return(ENOMEM); 922 } 923 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr)); 924 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr)); 925 r->bge_flags = htole16(BGE_RXBDFLAG_END); 926 r->bge_len = htole16(m_new->m_len); 927 r->bge_idx = htole16(i); 928 929 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 930 sc->bge_cdata.bge_rx_std_dmamap[i], 931 BUS_DMASYNC_PREREAD); 932 933 return(0); 934 } 935 936 /* 937 * Initialize a jumbo receive ring descriptor. This allocates 938 * a jumbo buffer from the pool managed internally by the driver. 939 */ 940 static int 941 bge_newbuf_jumbo(sc, i, m) 942 struct bge_softc *sc; 943 int i; 944 struct mbuf *m; 945 { 946 struct mbuf *m_new = NULL; 947 struct bge_rx_bd *r; 948 struct bge_dmamap_arg ctx; 949 int error; 950 951 if (m == NULL) { 952 caddr_t *buf = NULL; 953 954 /* Allocate the mbuf. */ 955 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 956 if (m_new == NULL) { 957 return(ENOBUFS); 958 } 959 960 /* Allocate the jumbo buffer */ 961 buf = bge_jalloc(sc); 962 if (buf == NULL) { 963 m_freem(m_new); 964 printf("bge%d: jumbo allocation failed " 965 "-- packet dropped!\n", sc->bge_unit); 966 return(ENOBUFS); 967 } 968 969 /* Attach the buffer to the mbuf. */ 970 m_new->m_data = (void *) buf; 971 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 972 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree, 973 (struct bge_softc *)sc, 0, EXT_NET_DRV); 974 } else { 975 m_new = m; 976 m_new->m_data = m_new->m_ext.ext_buf; 977 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 978 } 979 980 if (!sc->bge_rx_alignment_bug) 981 m_adj(m_new, ETHER_ALIGN); 982 /* Set up the descriptor. */ 983 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 984 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 985 ctx.bge_maxsegs = 1; 986 ctx.sc = sc; 987 error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo, 988 sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *), 989 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 990 if (error || ctx.bge_maxsegs == 0) { 991 if (m == NULL) 992 m_freem(m_new); 993 return(ENOMEM); 994 } 995 r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr)); 996 r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr)); 997 r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING); 998 r->bge_len = htole16(m_new->m_len); 999 r->bge_idx = htole16(i); 1000 1001 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 1002 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 1003 BUS_DMASYNC_PREREAD); 1004 1005 return(0); 1006 } 1007 1008 /* 1009 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1010 * that's 1MB or memory, which is a lot. For now, we fill only the first 1011 * 256 ring entries and hope that our CPU is fast enough to keep up with 1012 * the NIC. 1013 */ 1014 static int 1015 bge_init_rx_ring_std(sc) 1016 struct bge_softc *sc; 1017 { 1018 int i; 1019 1020 for (i = 0; i < BGE_SSLOTS; i++) { 1021 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 1022 return(ENOBUFS); 1023 }; 1024 1025 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1026 sc->bge_cdata.bge_rx_std_ring_map, 1027 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1028 1029 sc->bge_std = i - 1; 1030 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1031 1032 return(0); 1033 } 1034 1035 static void 1036 bge_free_rx_ring_std(sc) 1037 struct bge_softc *sc; 1038 { 1039 int i; 1040 1041 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1042 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1043 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1044 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1045 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 1046 sc->bge_cdata.bge_rx_std_dmamap[i]); 1047 } 1048 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 1049 sizeof(struct bge_rx_bd)); 1050 } 1051 1052 return; 1053 } 1054 1055 static int 1056 bge_init_rx_ring_jumbo(sc) 1057 struct bge_softc *sc; 1058 { 1059 int i; 1060 struct bge_rcb *rcb; 1061 1062 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1063 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1064 return(ENOBUFS); 1065 }; 1066 1067 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1068 sc->bge_cdata.bge_rx_jumbo_ring_map, 1069 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1070 1071 sc->bge_jumbo = i - 1; 1072 1073 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1074 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 1075 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1076 1077 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1078 1079 return(0); 1080 } 1081 1082 static void 1083 bge_free_rx_ring_jumbo(sc) 1084 struct bge_softc *sc; 1085 { 1086 int i; 1087 1088 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1089 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1090 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1091 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1092 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1093 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1094 } 1095 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 1096 sizeof(struct bge_rx_bd)); 1097 } 1098 1099 return; 1100 } 1101 1102 static void 1103 bge_free_tx_ring(sc) 1104 struct bge_softc *sc; 1105 { 1106 int i; 1107 1108 if (sc->bge_ldata.bge_tx_ring == NULL) 1109 return; 1110 1111 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1112 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1113 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1114 sc->bge_cdata.bge_tx_chain[i] = NULL; 1115 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 1116 sc->bge_cdata.bge_tx_dmamap[i]); 1117 } 1118 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 1119 sizeof(struct bge_tx_bd)); 1120 } 1121 1122 return; 1123 } 1124 1125 static int 1126 bge_init_tx_ring(sc) 1127 struct bge_softc *sc; 1128 { 1129 sc->bge_txcnt = 0; 1130 sc->bge_tx_saved_considx = 0; 1131 1132 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1133 /* 5700 b2 errata */ 1134 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1135 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); 1136 1137 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1138 /* 5700 b2 errata */ 1139 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1140 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1141 1142 return(0); 1143 } 1144 1145 static void 1146 bge_setmulti(sc) 1147 struct bge_softc *sc; 1148 { 1149 struct ifnet *ifp; 1150 struct ifmultiaddr *ifma; 1151 u_int32_t hashes[4] = { 0, 0, 0, 0 }; 1152 int h, i; 1153 1154 BGE_LOCK_ASSERT(sc); 1155 1156 ifp = &sc->arpcom.ac_if; 1157 1158 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1159 for (i = 0; i < 4; i++) 1160 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1161 return; 1162 } 1163 1164 /* First, zot all the existing filters. */ 1165 for (i = 0; i < 4; i++) 1166 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1167 1168 /* Now program new ones. */ 1169 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1170 if (ifma->ifma_addr->sa_family != AF_LINK) 1171 continue; 1172 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1173 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 1174 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1175 } 1176 1177 for (i = 0; i < 4; i++) 1178 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1179 1180 return; 1181 } 1182 1183 /* 1184 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1185 * self-test results. 1186 */ 1187 static int 1188 bge_chipinit(sc) 1189 struct bge_softc *sc; 1190 { 1191 int i; 1192 u_int32_t dma_rw_ctl; 1193 1194 /* Set endianness before we access any non-PCI registers. */ 1195 #if BYTE_ORDER == BIG_ENDIAN 1196 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 1197 BGE_BIGENDIAN_INIT, 4); 1198 #else 1199 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 1200 BGE_LITTLEENDIAN_INIT, 4); 1201 #endif 1202 1203 /* 1204 * Check the 'ROM failed' bit on the RX CPU to see if 1205 * self-tests passed. 1206 */ 1207 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1208 printf("bge%d: RX CPU self-diagnostics failed!\n", 1209 sc->bge_unit); 1210 return(ENODEV); 1211 } 1212 1213 /* Clear the MAC control register */ 1214 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1215 1216 /* 1217 * Clear the MAC statistics block in the NIC's 1218 * internal memory. 1219 */ 1220 for (i = BGE_STATS_BLOCK; 1221 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1222 BGE_MEMWIN_WRITE(sc, i, 0); 1223 1224 for (i = BGE_STATUS_BLOCK; 1225 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1226 BGE_MEMWIN_WRITE(sc, i, 0); 1227 1228 /* Set up the PCI DMA control register. */ 1229 if (sc->bge_pcie) { 1230 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1231 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1232 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1233 } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 1234 BGE_PCISTATE_PCI_BUSMODE) { 1235 /* Conventional PCI bus */ 1236 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1237 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1238 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1239 (0x0F); 1240 } else { 1241 /* PCI-X bus */ 1242 /* 1243 * The 5704 uses a different encoding of read/write 1244 * watermarks. 1245 */ 1246 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1247 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1248 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1249 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1250 else 1251 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1252 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1253 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1254 (0x0F); 1255 1256 /* 1257 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1258 * for hardware bugs. 1259 */ 1260 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1261 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1262 u_int32_t tmp; 1263 1264 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1265 if (tmp == 0x6 || tmp == 0x7) 1266 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1267 } 1268 } 1269 1270 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1271 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1272 sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1273 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1274 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1275 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1276 1277 /* 1278 * Set up general mode register. 1279 */ 1280 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| 1281 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| 1282 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1283 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); 1284 1285 /* 1286 * Disable memory write invalidate. Apparently it is not supported 1287 * properly by these devices. 1288 */ 1289 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1290 1291 #ifdef __brokenalpha__ 1292 /* 1293 * Must insure that we do not cross an 8K (bytes) boundary 1294 * for DMA reads. Our highest limit is 1K bytes. This is a 1295 * restriction on some ALPHA platforms with early revision 1296 * 21174 PCI chipsets, such as the AlphaPC 164lx 1297 */ 1298 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1299 BGE_PCI_READ_BNDRY_1024BYTES, 4); 1300 #endif 1301 1302 /* Set the timer prescaler (always 66Mhz) */ 1303 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1304 1305 return(0); 1306 } 1307 1308 static int 1309 bge_blockinit(sc) 1310 struct bge_softc *sc; 1311 { 1312 struct bge_rcb *rcb; 1313 volatile struct bge_rcb *vrcb; 1314 int i; 1315 1316 /* 1317 * Initialize the memory window pointer register so that 1318 * we can access the first 32K of internal NIC RAM. This will 1319 * allow us to set up the TX send ring RCBs and the RX return 1320 * ring RCBs, plus other things which live in NIC memory. 1321 */ 1322 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1323 1324 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1325 1326 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1327 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1328 /* Configure mbuf memory pool */ 1329 if (sc->bge_extram) { 1330 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1331 BGE_EXT_SSRAM); 1332 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1333 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1334 else 1335 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1336 } else { 1337 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1338 BGE_BUFFPOOL_1); 1339 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1340 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1341 else 1342 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1343 } 1344 1345 /* Configure DMA resource pool */ 1346 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1347 BGE_DMA_DESCRIPTORS); 1348 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1349 } 1350 1351 /* Configure mbuf pool watermarks */ 1352 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1353 sc->bge_asicrev == BGE_ASICREV_BCM5750) { 1354 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1355 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1356 } else { 1357 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1358 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1359 } 1360 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1361 1362 /* Configure DMA resource watermarks */ 1363 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1364 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1365 1366 /* Enable buffer manager */ 1367 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1368 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1369 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1370 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1371 1372 /* Poll for buffer manager start indication */ 1373 for (i = 0; i < BGE_TIMEOUT; i++) { 1374 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1375 break; 1376 DELAY(10); 1377 } 1378 1379 if (i == BGE_TIMEOUT) { 1380 printf("bge%d: buffer manager failed to start\n", 1381 sc->bge_unit); 1382 return(ENXIO); 1383 } 1384 } 1385 1386 /* Enable flow-through queues */ 1387 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1388 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1389 1390 /* Wait until queue initialization is complete */ 1391 for (i = 0; i < BGE_TIMEOUT; i++) { 1392 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1393 break; 1394 DELAY(10); 1395 } 1396 1397 if (i == BGE_TIMEOUT) { 1398 printf("bge%d: flow-through queue init failed\n", 1399 sc->bge_unit); 1400 return(ENXIO); 1401 } 1402 1403 /* Initialize the standard RX ring control block */ 1404 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1405 rcb->bge_hostaddr.bge_addr_lo = 1406 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1407 rcb->bge_hostaddr.bge_addr_hi = 1408 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1409 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1410 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1411 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 1412 sc->bge_asicrev == BGE_ASICREV_BCM5750) 1413 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1414 else 1415 rcb->bge_maxlen_flags = 1416 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1417 if (sc->bge_extram) 1418 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1419 else 1420 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1421 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1422 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1423 1424 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1425 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1426 1427 /* 1428 * Initialize the jumbo RX ring control block 1429 * We set the 'ring disabled' bit in the flags 1430 * field until we're actually ready to start 1431 * using this ring (i.e. once we set the MTU 1432 * high enough to require it). 1433 */ 1434 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1435 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1436 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1437 1438 rcb->bge_hostaddr.bge_addr_lo = 1439 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1440 rcb->bge_hostaddr.bge_addr_hi = 1441 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1442 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1443 sc->bge_cdata.bge_rx_jumbo_ring_map, 1444 BUS_DMASYNC_PREREAD); 1445 rcb->bge_maxlen_flags = 1446 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1447 BGE_RCB_FLAG_RING_DISABLED); 1448 if (sc->bge_extram) 1449 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1450 else 1451 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1452 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1453 rcb->bge_hostaddr.bge_addr_hi); 1454 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1455 rcb->bge_hostaddr.bge_addr_lo); 1456 1457 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1458 rcb->bge_maxlen_flags); 1459 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1460 1461 /* Set up dummy disabled mini ring RCB */ 1462 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1463 rcb->bge_maxlen_flags = 1464 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1465 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1466 rcb->bge_maxlen_flags); 1467 } 1468 1469 /* 1470 * Set the BD ring replentish thresholds. The recommended 1471 * values are 1/8th the number of descriptors allocated to 1472 * each ring. 1473 */ 1474 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1475 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1476 1477 /* 1478 * Disable all unused send rings by setting the 'ring disabled' 1479 * bit in the flags field of all the TX send ring control blocks. 1480 * These are located in NIC memory. 1481 */ 1482 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1483 BGE_SEND_RING_RCB); 1484 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1485 vrcb->bge_maxlen_flags = 1486 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1487 vrcb->bge_nicaddr = 0; 1488 vrcb++; 1489 } 1490 1491 /* Configure TX RCB 0 (we use only the first ring) */ 1492 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1493 BGE_SEND_RING_RCB); 1494 vrcb->bge_hostaddr.bge_addr_lo = 1495 htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr)); 1496 vrcb->bge_hostaddr.bge_addr_hi = 1497 htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr)); 1498 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT); 1499 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1500 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1501 vrcb->bge_maxlen_flags = 1502 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0); 1503 1504 /* Disable all unused RX return rings */ 1505 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1506 BGE_RX_RETURN_RING_RCB); 1507 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1508 vrcb->bge_hostaddr.bge_addr_hi = 0; 1509 vrcb->bge_hostaddr.bge_addr_lo = 0; 1510 vrcb->bge_maxlen_flags = 1511 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1512 BGE_RCB_FLAG_RING_DISABLED); 1513 vrcb->bge_nicaddr = 0; 1514 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1515 (i * (sizeof(u_int64_t))), 0); 1516 vrcb++; 1517 } 1518 1519 /* Initialize RX ring indexes */ 1520 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1521 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1522 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1523 1524 /* 1525 * Set up RX return ring 0 1526 * Note that the NIC address for RX return rings is 0x00000000. 1527 * The return rings live entirely within the host, so the 1528 * nicaddr field in the RCB isn't used. 1529 */ 1530 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + 1531 BGE_RX_RETURN_RING_RCB); 1532 vrcb->bge_hostaddr.bge_addr_lo = 1533 BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr); 1534 vrcb->bge_hostaddr.bge_addr_hi = 1535 BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr); 1536 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 1537 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE); 1538 vrcb->bge_nicaddr = 0x00000000; 1539 vrcb->bge_maxlen_flags = 1540 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0); 1541 1542 /* Set random backoff seed for TX */ 1543 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1544 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1545 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1546 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1547 BGE_TX_BACKOFF_SEED_MASK); 1548 1549 /* Set inter-packet gap */ 1550 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1551 1552 /* 1553 * Specify which ring to use for packets that don't match 1554 * any RX rules. 1555 */ 1556 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1557 1558 /* 1559 * Configure number of RX lists. One interrupt distribution 1560 * list, sixteen active lists, one bad frames class. 1561 */ 1562 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1563 1564 /* Inialize RX list placement stats mask. */ 1565 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1566 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1567 1568 /* Disable host coalescing until we get it set up */ 1569 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1570 1571 /* Poll to make sure it's shut down. */ 1572 for (i = 0; i < BGE_TIMEOUT; i++) { 1573 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1574 break; 1575 DELAY(10); 1576 } 1577 1578 if (i == BGE_TIMEOUT) { 1579 printf("bge%d: host coalescing engine failed to idle\n", 1580 sc->bge_unit); 1581 return(ENXIO); 1582 } 1583 1584 /* Set up host coalescing defaults */ 1585 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1586 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1587 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1588 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1589 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1590 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1591 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1592 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1593 } 1594 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1595 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1596 1597 /* Set up address of statistics block */ 1598 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1599 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 1600 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1601 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1602 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1603 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1604 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1605 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1606 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1607 } 1608 1609 /* Set up address of status block */ 1610 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1611 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1612 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1613 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1614 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 1615 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE); 1616 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1617 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1618 1619 /* Turn on host coalescing state machine */ 1620 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1621 1622 /* Turn on RX BD completion state machine and enable attentions */ 1623 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1624 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1625 1626 /* Turn on RX list placement state machine */ 1627 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1628 1629 /* Turn on RX list selector state machine. */ 1630 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1631 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1632 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1633 1634 /* Turn on DMA, clear stats */ 1635 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1636 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1637 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1638 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1639 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1640 1641 /* Set misc. local control, enable interrupts on attentions */ 1642 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1643 1644 #ifdef notdef 1645 /* Assert GPIO pins for PHY reset */ 1646 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1647 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1648 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1649 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1650 #endif 1651 1652 /* Turn on DMA completion state machine */ 1653 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1654 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1655 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1656 1657 /* Turn on write DMA state machine */ 1658 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1659 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1660 1661 /* Turn on read DMA state machine */ 1662 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1663 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1664 1665 /* Turn on RX data completion state machine */ 1666 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1667 1668 /* Turn on RX BD initiator state machine */ 1669 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1670 1671 /* Turn on RX data and RX BD initiator state machine */ 1672 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1673 1674 /* Turn on Mbuf cluster free state machine */ 1675 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1676 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1677 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1678 1679 /* Turn on send BD completion state machine */ 1680 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1681 1682 /* Turn on send data completion state machine */ 1683 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1684 1685 /* Turn on send data initiator state machine */ 1686 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1687 1688 /* Turn on send BD initiator state machine */ 1689 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1690 1691 /* Turn on send BD selector state machine */ 1692 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1693 1694 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1695 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1696 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1697 1698 /* ack/clear link change events */ 1699 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1700 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1701 BGE_MACSTAT_LINK_CHANGED); 1702 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1703 1704 /* Enable PHY auto polling (for MII/GMII only) */ 1705 if (sc->bge_tbi) { 1706 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1707 } else { 1708 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1709 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) 1710 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1711 BGE_EVTENB_MI_INTERRUPT); 1712 } 1713 1714 /* Enable link state change attentions. */ 1715 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1716 1717 return(0); 1718 } 1719 1720 /* 1721 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1722 * against our list and return its name if we find a match. Note 1723 * that since the Broadcom controller contains VPD support, we 1724 * can get the device name string from the controller itself instead 1725 * of the compiled-in string. This is a little slow, but it guarantees 1726 * we'll always announce the right product name. 1727 */ 1728 static int 1729 bge_probe(dev) 1730 device_t dev; 1731 { 1732 struct bge_type *t; 1733 struct bge_softc *sc; 1734 char *descbuf; 1735 1736 t = bge_devs; 1737 1738 sc = device_get_softc(dev); 1739 bzero(sc, sizeof(struct bge_softc)); 1740 sc->bge_unit = device_get_unit(dev); 1741 sc->bge_dev = dev; 1742 1743 while(t->bge_name != NULL) { 1744 if ((pci_get_vendor(dev) == t->bge_vid) && 1745 (pci_get_device(dev) == t->bge_did)) { 1746 #ifdef notdef 1747 bge_vpd_read(sc); 1748 device_set_desc(dev, sc->bge_vpd_prodname); 1749 #endif 1750 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 1751 if (descbuf == NULL) 1752 return(ENOMEM); 1753 snprintf(descbuf, BGE_DEVDESC_MAX, 1754 "%s, ASIC rev. %#04x", t->bge_name, 1755 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16); 1756 device_set_desc_copy(dev, descbuf); 1757 if (pci_get_subvendor(dev) == DELL_VENDORID) 1758 sc->bge_no_3_led = 1; 1759 free(descbuf, M_TEMP); 1760 return(0); 1761 } 1762 t++; 1763 } 1764 1765 return(ENXIO); 1766 } 1767 1768 static void 1769 bge_dma_free(sc) 1770 struct bge_softc *sc; 1771 { 1772 int i; 1773 1774 1775 /* Destroy DMA maps for RX buffers */ 1776 1777 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1778 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1779 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1780 sc->bge_cdata.bge_rx_std_dmamap[i]); 1781 } 1782 1783 /* Destroy DMA maps for jumbo RX buffers */ 1784 1785 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1786 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1787 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1788 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1789 } 1790 1791 /* Destroy DMA maps for TX buffers */ 1792 1793 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1794 if (sc->bge_cdata.bge_tx_dmamap[i]) 1795 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1796 sc->bge_cdata.bge_tx_dmamap[i]); 1797 } 1798 1799 if (sc->bge_cdata.bge_mtag) 1800 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1801 1802 1803 /* Destroy standard RX ring */ 1804 1805 if (sc->bge_ldata.bge_rx_std_ring) 1806 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1807 sc->bge_ldata.bge_rx_std_ring, 1808 sc->bge_cdata.bge_rx_std_ring_map); 1809 1810 if (sc->bge_cdata.bge_rx_std_ring_map) { 1811 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1812 sc->bge_cdata.bge_rx_std_ring_map); 1813 bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag, 1814 sc->bge_cdata.bge_rx_std_ring_map); 1815 } 1816 1817 if (sc->bge_cdata.bge_rx_std_ring_tag) 1818 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1819 1820 /* Destroy jumbo RX ring */ 1821 1822 if (sc->bge_ldata.bge_rx_jumbo_ring) 1823 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1824 sc->bge_ldata.bge_rx_jumbo_ring, 1825 sc->bge_cdata.bge_rx_jumbo_ring_map); 1826 1827 if (sc->bge_cdata.bge_rx_jumbo_ring_map) { 1828 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1829 sc->bge_cdata.bge_rx_jumbo_ring_map); 1830 bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1831 sc->bge_cdata.bge_rx_jumbo_ring_map); 1832 } 1833 1834 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1835 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1836 1837 /* Destroy RX return ring */ 1838 1839 if (sc->bge_ldata.bge_rx_return_ring) 1840 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1841 sc->bge_ldata.bge_rx_return_ring, 1842 sc->bge_cdata.bge_rx_return_ring_map); 1843 1844 if (sc->bge_cdata.bge_rx_return_ring_map) { 1845 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1846 sc->bge_cdata.bge_rx_return_ring_map); 1847 bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag, 1848 sc->bge_cdata.bge_rx_return_ring_map); 1849 } 1850 1851 if (sc->bge_cdata.bge_rx_return_ring_tag) 1852 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1853 1854 /* Destroy TX ring */ 1855 1856 if (sc->bge_ldata.bge_tx_ring) 1857 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1858 sc->bge_ldata.bge_tx_ring, 1859 sc->bge_cdata.bge_tx_ring_map); 1860 1861 if (sc->bge_cdata.bge_tx_ring_map) { 1862 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1863 sc->bge_cdata.bge_tx_ring_map); 1864 bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag, 1865 sc->bge_cdata.bge_tx_ring_map); 1866 } 1867 1868 if (sc->bge_cdata.bge_tx_ring_tag) 1869 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1870 1871 /* Destroy status block */ 1872 1873 if (sc->bge_ldata.bge_status_block) 1874 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1875 sc->bge_ldata.bge_status_block, 1876 sc->bge_cdata.bge_status_map); 1877 1878 if (sc->bge_cdata.bge_status_map) { 1879 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1880 sc->bge_cdata.bge_status_map); 1881 bus_dmamap_destroy(sc->bge_cdata.bge_status_tag, 1882 sc->bge_cdata.bge_status_map); 1883 } 1884 1885 if (sc->bge_cdata.bge_status_tag) 1886 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1887 1888 /* Destroy statistics block */ 1889 1890 if (sc->bge_ldata.bge_stats) 1891 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1892 sc->bge_ldata.bge_stats, 1893 sc->bge_cdata.bge_stats_map); 1894 1895 if (sc->bge_cdata.bge_stats_map) { 1896 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1897 sc->bge_cdata.bge_stats_map); 1898 bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag, 1899 sc->bge_cdata.bge_stats_map); 1900 } 1901 1902 if (sc->bge_cdata.bge_stats_tag) 1903 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1904 1905 /* Destroy the parent tag */ 1906 1907 if (sc->bge_cdata.bge_parent_tag) 1908 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1909 1910 return; 1911 } 1912 1913 static int 1914 bge_dma_alloc(dev) 1915 device_t dev; 1916 { 1917 struct bge_softc *sc; 1918 int nseg, i, error; 1919 struct bge_dmamap_arg ctx; 1920 1921 sc = device_get_softc(dev); 1922 1923 /* 1924 * Allocate the parent bus DMA tag appropriate for PCI. 1925 */ 1926 #define BGE_NSEG_NEW 32 1927 error = bus_dma_tag_create(NULL, /* parent */ 1928 PAGE_SIZE, 0, /* alignment, boundary */ 1929 BUS_SPACE_MAXADDR, /* lowaddr */ 1930 BUS_SPACE_MAXADDR_32BIT,/* highaddr */ 1931 NULL, NULL, /* filter, filterarg */ 1932 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1933 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1934 0, /* flags */ 1935 NULL, NULL, /* lockfunc, lockarg */ 1936 &sc->bge_cdata.bge_parent_tag); 1937 1938 /* 1939 * Create tag for RX mbufs. 1940 */ 1941 nseg = 32; 1942 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1943 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1944 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, 1945 &sc->bge_cdata.bge_mtag); 1946 1947 if (error) { 1948 device_printf(dev, "could not allocate dma tag\n"); 1949 return (ENOMEM); 1950 } 1951 1952 /* Create DMA maps for RX buffers */ 1953 1954 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1955 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1956 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1957 if (error) { 1958 device_printf(dev, "can't create DMA map for RX\n"); 1959 return(ENOMEM); 1960 } 1961 } 1962 1963 /* Create DMA maps for TX buffers */ 1964 1965 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1966 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1967 &sc->bge_cdata.bge_tx_dmamap[i]); 1968 if (error) { 1969 device_printf(dev, "can't create DMA map for RX\n"); 1970 return(ENOMEM); 1971 } 1972 } 1973 1974 /* Create tag for standard RX ring */ 1975 1976 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1977 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1978 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1979 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1980 1981 if (error) { 1982 device_printf(dev, "could not allocate dma tag\n"); 1983 return (ENOMEM); 1984 } 1985 1986 /* Allocate DMA'able memory for standard RX ring */ 1987 1988 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1989 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1990 &sc->bge_cdata.bge_rx_std_ring_map); 1991 if (error) 1992 return (ENOMEM); 1993 1994 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1995 1996 /* Load the address of the standard RX ring */ 1997 1998 ctx.bge_maxsegs = 1; 1999 ctx.sc = sc; 2000 2001 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 2002 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 2003 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2004 2005 if (error) 2006 return (ENOMEM); 2007 2008 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 2009 2010 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2011 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2012 2013 /* 2014 * Create tag for jumbo mbufs. 2015 * This is really a bit of a kludge. We allocate a special 2016 * jumbo buffer pool which (thanks to the way our DMA 2017 * memory allocation works) will consist of contiguous 2018 * pages. This means that even though a jumbo buffer might 2019 * be larger than a page size, we don't really need to 2020 * map it into more than one DMA segment. However, the 2021 * default mbuf tag will result in multi-segment mappings, 2022 * so we have to create a special jumbo mbuf tag that 2023 * lets us get away with mapping the jumbo buffers as 2024 * a single segment. I think eventually the driver should 2025 * be changed so that it uses ordinary mbufs and cluster 2026 * buffers, i.e. jumbo frames can span multiple DMA 2027 * descriptors. But that's a project for another day. 2028 */ 2029 2030 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2031 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2032 NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL, 2033 &sc->bge_cdata.bge_mtag_jumbo); 2034 2035 if (error) { 2036 device_printf(dev, "could not allocate dma tag\n"); 2037 return (ENOMEM); 2038 } 2039 2040 /* Create tag for jumbo RX ring */ 2041 2042 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2043 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2044 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 2045 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 2046 2047 if (error) { 2048 device_printf(dev, "could not allocate dma tag\n"); 2049 return (ENOMEM); 2050 } 2051 2052 /* Allocate DMA'able memory for jumbo RX ring */ 2053 2054 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2055 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT, 2056 &sc->bge_cdata.bge_rx_jumbo_ring_map); 2057 if (error) 2058 return (ENOMEM); 2059 2060 bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring, 2061 BGE_JUMBO_RX_RING_SZ); 2062 2063 /* Load the address of the jumbo RX ring */ 2064 2065 ctx.bge_maxsegs = 1; 2066 ctx.sc = sc; 2067 2068 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2069 sc->bge_cdata.bge_rx_jumbo_ring_map, 2070 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 2071 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2072 2073 if (error) 2074 return (ENOMEM); 2075 2076 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 2077 2078 /* Create DMA maps for jumbo RX buffers */ 2079 2080 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2081 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2082 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2083 if (error) { 2084 device_printf(dev, 2085 "can't create DMA map for RX\n"); 2086 return(ENOMEM); 2087 } 2088 } 2089 2090 } 2091 2092 /* Create tag for RX return ring */ 2093 2094 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2095 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2096 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 2097 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 2098 2099 if (error) { 2100 device_printf(dev, "could not allocate dma tag\n"); 2101 return (ENOMEM); 2102 } 2103 2104 /* Allocate DMA'able memory for RX return ring */ 2105 2106 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 2107 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 2108 &sc->bge_cdata.bge_rx_return_ring_map); 2109 if (error) 2110 return (ENOMEM); 2111 2112 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 2113 BGE_RX_RTN_RING_SZ(sc)); 2114 2115 /* Load the address of the RX return ring */ 2116 2117 ctx.bge_maxsegs = 1; 2118 ctx.sc = sc; 2119 2120 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 2121 sc->bge_cdata.bge_rx_return_ring_map, 2122 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 2123 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2124 2125 if (error) 2126 return (ENOMEM); 2127 2128 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2129 2130 /* Create tag for TX ring */ 2131 2132 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2133 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2134 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2135 &sc->bge_cdata.bge_tx_ring_tag); 2136 2137 if (error) { 2138 device_printf(dev, "could not allocate dma tag\n"); 2139 return (ENOMEM); 2140 } 2141 2142 /* Allocate DMA'able memory for TX ring */ 2143 2144 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2145 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2146 &sc->bge_cdata.bge_tx_ring_map); 2147 if (error) 2148 return (ENOMEM); 2149 2150 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2151 2152 /* Load the address of the TX ring */ 2153 2154 ctx.bge_maxsegs = 1; 2155 ctx.sc = sc; 2156 2157 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2158 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2159 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2160 2161 if (error) 2162 return (ENOMEM); 2163 2164 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2165 2166 /* Create tag for status block */ 2167 2168 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2169 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2170 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 2171 NULL, NULL, &sc->bge_cdata.bge_status_tag); 2172 2173 if (error) { 2174 device_printf(dev, "could not allocate dma tag\n"); 2175 return (ENOMEM); 2176 } 2177 2178 /* Allocate DMA'able memory for status block */ 2179 2180 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2181 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2182 &sc->bge_cdata.bge_status_map); 2183 if (error) 2184 return (ENOMEM); 2185 2186 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2187 2188 /* Load the address of the status block */ 2189 2190 ctx.sc = sc; 2191 ctx.bge_maxsegs = 1; 2192 2193 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2194 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2195 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2196 2197 if (error) 2198 return (ENOMEM); 2199 2200 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2201 2202 /* Create tag for statistics block */ 2203 2204 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2205 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2206 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2207 &sc->bge_cdata.bge_stats_tag); 2208 2209 if (error) { 2210 device_printf(dev, "could not allocate dma tag\n"); 2211 return (ENOMEM); 2212 } 2213 2214 /* Allocate DMA'able memory for statistics block */ 2215 2216 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2217 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2218 &sc->bge_cdata.bge_stats_map); 2219 if (error) 2220 return (ENOMEM); 2221 2222 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2223 2224 /* Load the address of the statstics block */ 2225 2226 ctx.sc = sc; 2227 ctx.bge_maxsegs = 1; 2228 2229 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2230 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2231 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2232 2233 if (error) 2234 return (ENOMEM); 2235 2236 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2237 2238 return(0); 2239 } 2240 2241 static int 2242 bge_attach(dev) 2243 device_t dev; 2244 { 2245 struct ifnet *ifp; 2246 struct bge_softc *sc; 2247 u_int32_t hwcfg = 0; 2248 u_int32_t mac_addr = 0; 2249 int unit, error = 0, rid; 2250 2251 sc = device_get_softc(dev); 2252 unit = device_get_unit(dev); 2253 sc->bge_dev = dev; 2254 sc->bge_unit = unit; 2255 2256 /* 2257 * Map control/status registers. 2258 */ 2259 pci_enable_busmaster(dev); 2260 2261 rid = BGE_PCI_BAR0; 2262 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2263 RF_ACTIVE|PCI_RF_DENSE); 2264 2265 if (sc->bge_res == NULL) { 2266 printf ("bge%d: couldn't map memory\n", unit); 2267 error = ENXIO; 2268 goto fail; 2269 } 2270 2271 sc->bge_btag = rman_get_bustag(sc->bge_res); 2272 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2273 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res); 2274 2275 /* Allocate interrupt */ 2276 rid = 0; 2277 2278 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2279 RF_SHAREABLE | RF_ACTIVE); 2280 2281 if (sc->bge_irq == NULL) { 2282 printf("bge%d: couldn't map interrupt\n", unit); 2283 error = ENXIO; 2284 goto fail; 2285 } 2286 2287 sc->bge_unit = unit; 2288 2289 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2290 2291 /* Save ASIC rev. */ 2292 2293 sc->bge_chipid = 2294 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2295 BGE_PCIMISCCTL_ASICREV; 2296 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2297 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2298 2299 /* 2300 * XXX: Broadcom Linux driver. Not in specs or eratta. 2301 * PCI-Express? 2302 */ 2303 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2304 u_int32_t v; 2305 2306 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); 2307 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) { 2308 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2309 if ((v & 0xff) == BGE_PCIE_CAPID) 2310 sc->bge_pcie = 1; 2311 } 2312 } 2313 2314 /* Try to reset the chip. */ 2315 bge_reset(sc); 2316 2317 if (bge_chipinit(sc)) { 2318 printf("bge%d: chip initialization failed\n", sc->bge_unit); 2319 bge_release_resources(sc); 2320 error = ENXIO; 2321 goto fail; 2322 } 2323 2324 /* 2325 * Get station address from the EEPROM. 2326 */ 2327 mac_addr = bge_readmem_ind(sc, 0x0c14); 2328 if ((mac_addr >> 16) == 0x484b) { 2329 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); 2330 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; 2331 mac_addr = bge_readmem_ind(sc, 0x0c18); 2332 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); 2333 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); 2334 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); 2335 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; 2336 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 2337 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2338 printf("bge%d: failed to read station address\n", unit); 2339 bge_release_resources(sc); 2340 error = ENXIO; 2341 goto fail; 2342 } 2343 2344 /* 5705 limits RX return ring to 512 entries. */ 2345 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 2346 sc->bge_asicrev == BGE_ASICREV_BCM5750) 2347 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2348 else 2349 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2350 2351 if (bge_dma_alloc(dev)) { 2352 printf ("bge%d: failed to allocate DMA resources\n", 2353 sc->bge_unit); 2354 bge_release_resources(sc); 2355 error = ENXIO; 2356 goto fail; 2357 } 2358 2359 /* 2360 * Try to allocate memory for jumbo buffers. 2361 * The 5705 does not appear to support jumbo frames. 2362 */ 2363 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2364 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2365 if (bge_alloc_jumbo_mem(sc)) { 2366 printf("bge%d: jumbo buffer allocation " 2367 "failed\n", sc->bge_unit); 2368 bge_release_resources(sc); 2369 error = ENXIO; 2370 goto fail; 2371 } 2372 } 2373 2374 /* Set default tuneable values. */ 2375 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2376 sc->bge_rx_coal_ticks = 150; 2377 sc->bge_tx_coal_ticks = 150; 2378 sc->bge_rx_max_coal_bds = 64; 2379 sc->bge_tx_max_coal_bds = 128; 2380 2381 /* Set up ifnet structure */ 2382 ifp = &sc->arpcom.ac_if; 2383 ifp->if_softc = sc; 2384 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2385 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2386 ifp->if_ioctl = bge_ioctl; 2387 ifp->if_start = bge_start; 2388 ifp->if_watchdog = bge_watchdog; 2389 ifp->if_init = bge_init; 2390 ifp->if_mtu = ETHERMTU; 2391 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2392 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2393 IFQ_SET_READY(&ifp->if_snd); 2394 ifp->if_hwassist = BGE_CSUM_FEATURES; 2395 /* NB: the code for RX csum offload is disabled for now */ 2396 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING | 2397 IFCAP_VLAN_MTU; 2398 ifp->if_capenable = ifp->if_capabilities; 2399 2400 /* 2401 * Figure out what sort of media we have by checking the 2402 * hardware config word in the first 32k of NIC internal memory, 2403 * or fall back to examining the EEPROM if necessary. 2404 * Note: on some BCM5700 cards, this value appears to be unset. 2405 * If that's the case, we have to rely on identifying the NIC 2406 * by its PCI subsystem ID, as we do below for the SysKonnect 2407 * SK-9D41. 2408 */ 2409 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2410 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2411 else { 2412 bge_read_eeprom(sc, (caddr_t)&hwcfg, 2413 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2414 hwcfg = ntohl(hwcfg); 2415 } 2416 2417 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2418 sc->bge_tbi = 1; 2419 2420 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2421 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2422 sc->bge_tbi = 1; 2423 2424 if (sc->bge_tbi) { 2425 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2426 bge_ifmedia_upd, bge_ifmedia_sts); 2427 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2428 ifmedia_add(&sc->bge_ifmedia, 2429 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2430 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2431 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2432 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2433 } else { 2434 /* 2435 * Do transceiver setup. 2436 */ 2437 if (mii_phy_probe(dev, &sc->bge_miibus, 2438 bge_ifmedia_upd, bge_ifmedia_sts)) { 2439 printf("bge%d: MII without any PHY!\n", sc->bge_unit); 2440 bge_release_resources(sc); 2441 bge_free_jumbo_mem(sc); 2442 error = ENXIO; 2443 goto fail; 2444 } 2445 } 2446 2447 /* 2448 * When using the BCM5701 in PCI-X mode, data corruption has 2449 * been observed in the first few bytes of some received packets. 2450 * Aligning the packet buffer in memory eliminates the corruption. 2451 * Unfortunately, this misaligns the packet payloads. On platforms 2452 * which do not support unaligned accesses, we will realign the 2453 * payloads by copying the received packets. 2454 */ 2455 switch (sc->bge_chipid) { 2456 case BGE_CHIPID_BCM5701_A0: 2457 case BGE_CHIPID_BCM5701_B0: 2458 case BGE_CHIPID_BCM5701_B2: 2459 case BGE_CHIPID_BCM5701_B5: 2460 /* If in PCI-X mode, work around the alignment bug. */ 2461 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 2462 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == 2463 BGE_PCISTATE_PCI_BUSSPEED) 2464 sc->bge_rx_alignment_bug = 1; 2465 break; 2466 } 2467 2468 /* 2469 * Call MI attach routine. 2470 */ 2471 ether_ifattach(ifp, sc->arpcom.ac_enaddr); 2472 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE); 2473 2474 /* 2475 * Hookup IRQ last. 2476 */ 2477 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2478 bge_intr, sc, &sc->bge_intrhand); 2479 2480 if (error) { 2481 bge_release_resources(sc); 2482 printf("bge%d: couldn't set up irq\n", unit); 2483 } 2484 2485 fail: 2486 return(error); 2487 } 2488 2489 static int 2490 bge_detach(dev) 2491 device_t dev; 2492 { 2493 struct bge_softc *sc; 2494 struct ifnet *ifp; 2495 2496 sc = device_get_softc(dev); 2497 ifp = &sc->arpcom.ac_if; 2498 2499 BGE_LOCK(sc); 2500 bge_stop(sc); 2501 bge_reset(sc); 2502 BGE_UNLOCK(sc); 2503 2504 ether_ifdetach(ifp); 2505 2506 if (sc->bge_tbi) { 2507 ifmedia_removeall(&sc->bge_ifmedia); 2508 } else { 2509 bus_generic_detach(dev); 2510 device_delete_child(dev, sc->bge_miibus); 2511 } 2512 2513 bge_release_resources(sc); 2514 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2515 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2516 bge_free_jumbo_mem(sc); 2517 2518 return(0); 2519 } 2520 2521 static void 2522 bge_release_resources(sc) 2523 struct bge_softc *sc; 2524 { 2525 device_t dev; 2526 2527 dev = sc->bge_dev; 2528 2529 if (sc->bge_vpd_prodname != NULL) 2530 free(sc->bge_vpd_prodname, M_DEVBUF); 2531 2532 if (sc->bge_vpd_readonly != NULL) 2533 free(sc->bge_vpd_readonly, M_DEVBUF); 2534 2535 if (sc->bge_intrhand != NULL) 2536 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2537 2538 if (sc->bge_irq != NULL) 2539 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 2540 2541 if (sc->bge_res != NULL) 2542 bus_release_resource(dev, SYS_RES_MEMORY, 2543 BGE_PCI_BAR0, sc->bge_res); 2544 2545 bge_dma_free(sc); 2546 2547 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2548 BGE_LOCK_DESTROY(sc); 2549 2550 return; 2551 } 2552 2553 static void 2554 bge_reset(sc) 2555 struct bge_softc *sc; 2556 { 2557 device_t dev; 2558 u_int32_t cachesize, command, pcistate, reset; 2559 int i, val = 0; 2560 2561 dev = sc->bge_dev; 2562 2563 /* Save some important PCI state. */ 2564 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2565 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2566 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2567 2568 pci_write_config(dev, BGE_PCI_MISC_CTL, 2569 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2570 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2571 2572 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2573 2574 /* XXX: Broadcom Linux driver. */ 2575 if (sc->bge_pcie) { 2576 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 2577 CSR_WRITE_4(sc, 0x7e2c, 0x20); 2578 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2579 /* Prevent PCIE link training during global reset */ 2580 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2581 reset |= (1<<29); 2582 } 2583 } 2584 2585 /* Issue global reset */ 2586 bge_writereg_ind(sc, BGE_MISC_CFG, reset); 2587 2588 DELAY(1000); 2589 2590 /* XXX: Broadcom Linux driver. */ 2591 if (sc->bge_pcie) { 2592 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2593 uint32_t v; 2594 2595 DELAY(500000); /* wait for link training to complete */ 2596 v = pci_read_config(dev, 0xc4, 4); 2597 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2598 } 2599 /* Set PCIE max payload size and clear error status. */ 2600 pci_write_config(dev, 0xd8, 0xf5000, 4); 2601 } 2602 2603 /* Reset some of the PCI state that got zapped by reset */ 2604 pci_write_config(dev, BGE_PCI_MISC_CTL, 2605 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2606 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2607 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2608 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2609 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2610 2611 /* Enable memory arbiter. */ 2612 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2613 sc->bge_asicrev != BGE_ASICREV_BCM5750) 2614 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2615 2616 /* 2617 * Prevent PXE restart: write a magic number to the 2618 * general communications memory at 0xB50. 2619 */ 2620 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2621 /* 2622 * Poll the value location we just wrote until 2623 * we see the 1's complement of the magic number. 2624 * This indicates that the firmware initialization 2625 * is complete. 2626 */ 2627 for (i = 0; i < BGE_TIMEOUT; i++) { 2628 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2629 if (val == ~BGE_MAGIC_NUMBER) 2630 break; 2631 DELAY(10); 2632 } 2633 2634 if (i == BGE_TIMEOUT) { 2635 printf("bge%d: firmware handshake timed out\n", sc->bge_unit); 2636 return; 2637 } 2638 2639 /* 2640 * XXX Wait for the value of the PCISTATE register to 2641 * return to its original pre-reset state. This is a 2642 * fairly good indicator of reset completion. If we don't 2643 * wait for the reset to fully complete, trying to read 2644 * from the device's non-PCI registers may yield garbage 2645 * results. 2646 */ 2647 for (i = 0; i < BGE_TIMEOUT; i++) { 2648 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2649 break; 2650 DELAY(10); 2651 } 2652 2653 /* Fix up byte swapping */ 2654 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| 2655 BGE_MODECTL_BYTESWAP_DATA); 2656 2657 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2658 2659 /* 2660 * The 5704 in TBI mode apparently needs some special 2661 * adjustment to insure the SERDES drive level is set 2662 * to 1.2V. 2663 */ 2664 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) { 2665 uint32_t serdescfg; 2666 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2667 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2668 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2669 } 2670 2671 /* XXX: Broadcom Linux driver. */ 2672 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2673 uint32_t v; 2674 2675 v = CSR_READ_4(sc, 0x7c00); 2676 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2677 } 2678 DELAY(10000); 2679 2680 return; 2681 } 2682 2683 /* 2684 * Frame reception handling. This is called if there's a frame 2685 * on the receive return list. 2686 * 2687 * Note: we have to be able to handle two possibilities here: 2688 * 1) the frame is from the jumbo recieve ring 2689 * 2) the frame is from the standard receive ring 2690 */ 2691 2692 static void 2693 bge_rxeof(sc) 2694 struct bge_softc *sc; 2695 { 2696 struct ifnet *ifp; 2697 int stdcnt = 0, jumbocnt = 0; 2698 2699 BGE_LOCK_ASSERT(sc); 2700 2701 ifp = &sc->arpcom.ac_if; 2702 2703 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2704 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE); 2705 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2706 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2707 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2708 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2709 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2710 sc->bge_cdata.bge_rx_jumbo_ring_map, 2711 BUS_DMASYNC_POSTREAD); 2712 } 2713 2714 while(sc->bge_rx_saved_considx != 2715 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2716 struct bge_rx_bd *cur_rx; 2717 u_int32_t rxidx; 2718 struct ether_header *eh; 2719 struct mbuf *m = NULL; 2720 u_int16_t vlan_tag = 0; 2721 int have_tag = 0; 2722 2723 cur_rx = 2724 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2725 2726 rxidx = cur_rx->bge_idx; 2727 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2728 2729 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2730 have_tag = 1; 2731 vlan_tag = cur_rx->bge_vlan_tag; 2732 } 2733 2734 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2735 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2736 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2737 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2738 BUS_DMASYNC_POSTREAD); 2739 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2740 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2741 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2742 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2743 jumbocnt++; 2744 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2745 ifp->if_ierrors++; 2746 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2747 continue; 2748 } 2749 if (bge_newbuf_jumbo(sc, 2750 sc->bge_jumbo, NULL) == ENOBUFS) { 2751 ifp->if_ierrors++; 2752 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2753 continue; 2754 } 2755 } else { 2756 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2757 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2758 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2759 BUS_DMASYNC_POSTREAD); 2760 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2761 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2762 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2763 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2764 stdcnt++; 2765 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2766 ifp->if_ierrors++; 2767 bge_newbuf_std(sc, sc->bge_std, m); 2768 continue; 2769 } 2770 if (bge_newbuf_std(sc, sc->bge_std, 2771 NULL) == ENOBUFS) { 2772 ifp->if_ierrors++; 2773 bge_newbuf_std(sc, sc->bge_std, m); 2774 continue; 2775 } 2776 } 2777 2778 ifp->if_ipackets++; 2779 #ifndef __i386__ 2780 /* 2781 * The i386 allows unaligned accesses, but for other 2782 * platforms we must make sure the payload is aligned. 2783 */ 2784 if (sc->bge_rx_alignment_bug) { 2785 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2786 cur_rx->bge_len); 2787 m->m_data += ETHER_ALIGN; 2788 } 2789 #endif 2790 eh = mtod(m, struct ether_header *); 2791 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2792 m->m_pkthdr.rcvif = ifp; 2793 2794 #if 0 /* currently broken for some packets, possibly related to TCP options */ 2795 if (ifp->if_capenable & IFCAP_RXCSUM) { 2796 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2797 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2798 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2799 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 2800 m->m_pkthdr.csum_data = 2801 cur_rx->bge_tcp_udp_csum; 2802 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 2803 } 2804 } 2805 #endif 2806 2807 /* 2808 * If we received a packet with a vlan tag, 2809 * attach that information to the packet. 2810 */ 2811 if (have_tag) 2812 VLAN_INPUT_TAG(ifp, m, vlan_tag, continue); 2813 2814 BGE_UNLOCK(sc); 2815 (*ifp->if_input)(ifp, m); 2816 BGE_LOCK(sc); 2817 } 2818 2819 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2820 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE); 2821 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2822 sc->bge_cdata.bge_rx_std_ring_map, 2823 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE); 2824 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 2825 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 2826 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2827 sc->bge_cdata.bge_rx_jumbo_ring_map, 2828 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2829 } 2830 2831 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2832 if (stdcnt) 2833 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2834 if (jumbocnt) 2835 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2836 2837 return; 2838 } 2839 2840 static void 2841 bge_txeof(sc) 2842 struct bge_softc *sc; 2843 { 2844 struct bge_tx_bd *cur_tx = NULL; 2845 struct ifnet *ifp; 2846 2847 BGE_LOCK_ASSERT(sc); 2848 2849 ifp = &sc->arpcom.ac_if; 2850 2851 /* 2852 * Go through our tx ring and free mbufs for those 2853 * frames that have been sent. 2854 */ 2855 while (sc->bge_tx_saved_considx != 2856 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 2857 u_int32_t idx = 0; 2858 2859 idx = sc->bge_tx_saved_considx; 2860 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 2861 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2862 ifp->if_opackets++; 2863 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2864 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2865 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2866 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2867 sc->bge_cdata.bge_tx_dmamap[idx]); 2868 } 2869 sc->bge_txcnt--; 2870 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2871 ifp->if_timer = 0; 2872 } 2873 2874 if (cur_tx != NULL) 2875 ifp->if_flags &= ~IFF_OACTIVE; 2876 2877 return; 2878 } 2879 2880 static void 2881 bge_intr(xsc) 2882 void *xsc; 2883 { 2884 struct bge_softc *sc; 2885 struct ifnet *ifp; 2886 u_int32_t statusword; 2887 u_int32_t status, mimode; 2888 2889 sc = xsc; 2890 ifp = &sc->arpcom.ac_if; 2891 2892 BGE_LOCK(sc); 2893 2894 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2895 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE); 2896 2897 statusword = 2898 atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status); 2899 2900 #ifdef notdef 2901 /* Avoid this for now -- checking this register is expensive. */ 2902 /* Make sure this is really our interrupt. */ 2903 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) 2904 return; 2905 #endif 2906 /* Ack interrupt and stop others from occuring. */ 2907 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2908 2909 /* 2910 * Process link state changes. 2911 * Grrr. The link status word in the status block does 2912 * not work correctly on the BCM5700 rev AX and BX chips, 2913 * according to all available information. Hence, we have 2914 * to enable MII interrupts in order to properly obtain 2915 * async link changes. Unfortunately, this also means that 2916 * we have to read the MAC status register to detect link 2917 * changes, thereby adding an additional register access to 2918 * the interrupt handler. 2919 */ 2920 2921 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 2922 2923 status = CSR_READ_4(sc, BGE_MAC_STS); 2924 if (status & BGE_MACSTAT_MI_INTERRUPT) { 2925 sc->bge_link = 0; 2926 callout_stop(&sc->bge_stat_ch); 2927 bge_tick_locked(sc); 2928 /* Clear the interrupt */ 2929 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2930 BGE_EVTENB_MI_INTERRUPT); 2931 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 2932 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 2933 BRGPHY_INTRS); 2934 } 2935 } else { 2936 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) { 2937 /* 2938 * Sometimes PCS encoding errors are detected in 2939 * TBI mode (on fiber NICs), and for some reason 2940 * the chip will signal them as link changes. 2941 * If we get a link change event, but the 'PCS 2942 * encoding error' bit in the MAC status register 2943 * is set, don't bother doing a link check. 2944 * This avoids spurious "gigabit link up" messages 2945 * that sometimes appear on fiber NICs during 2946 * periods of heavy traffic. (There should be no 2947 * effect on copper NICs.) 2948 * 2949 * If we do have a copper NIC (bge_tbi == 0) then 2950 * check that the AUTOPOLL bit is set before 2951 * processing the event as a real link change. 2952 * Turning AUTOPOLL on and off in the MII read/write 2953 * functions will often trigger a link status 2954 * interrupt for no reason. 2955 */ 2956 status = CSR_READ_4(sc, BGE_MAC_STS); 2957 mimode = CSR_READ_4(sc, BGE_MI_MODE); 2958 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR| 2959 BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi && 2960 (mimode & BGE_MIMODE_AUTOPOLL))) { 2961 sc->bge_link = 0; 2962 callout_stop(&sc->bge_stat_ch); 2963 bge_tick_locked(sc); 2964 } 2965 /* Clear the interrupt */ 2966 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2967 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 2968 BGE_MACSTAT_LINK_CHANGED); 2969 2970 /* Force flush the status block cached by PCI bridge */ 2971 CSR_READ_4(sc, BGE_MBX_IRQ0_LO); 2972 } 2973 } 2974 2975 if (ifp->if_flags & IFF_RUNNING) { 2976 /* Check RX return ring producer/consumer */ 2977 bge_rxeof(sc); 2978 2979 /* Check TX ring producer/consumer */ 2980 bge_txeof(sc); 2981 } 2982 2983 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2984 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE); 2985 2986 bge_handle_events(sc); 2987 2988 /* Re-enable interrupts. */ 2989 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2990 2991 if (ifp->if_flags & IFF_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2992 bge_start_locked(ifp); 2993 2994 BGE_UNLOCK(sc); 2995 2996 return; 2997 } 2998 2999 static void 3000 bge_tick_locked(sc) 3001 struct bge_softc *sc; 3002 { 3003 struct mii_data *mii = NULL; 3004 struct ifmedia *ifm = NULL; 3005 struct ifnet *ifp; 3006 3007 ifp = &sc->arpcom.ac_if; 3008 3009 BGE_LOCK_ASSERT(sc); 3010 3011 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || 3012 sc->bge_asicrev == BGE_ASICREV_BCM5750) 3013 bge_stats_update_regs(sc); 3014 else 3015 bge_stats_update(sc); 3016 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3017 if (sc->bge_link) 3018 return; 3019 3020 if (sc->bge_tbi) { 3021 ifm = &sc->bge_ifmedia; 3022 if (CSR_READ_4(sc, BGE_MAC_STS) & 3023 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3024 sc->bge_link++; 3025 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 3026 BGE_CLRBIT(sc, BGE_MAC_MODE, 3027 BGE_MACMODE_TBI_SEND_CFGS); 3028 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3029 if (bootverbose) 3030 printf("bge%d: gigabit link up\n", 3031 sc->bge_unit); 3032 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3033 bge_start_locked(ifp); 3034 } 3035 return; 3036 } 3037 3038 mii = device_get_softc(sc->bge_miibus); 3039 mii_tick(mii); 3040 3041 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && 3042 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3043 sc->bge_link++; 3044 if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 3045 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) && 3046 bootverbose) 3047 printf("bge%d: gigabit link up\n", sc->bge_unit); 3048 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3049 bge_start_locked(ifp); 3050 } 3051 3052 return; 3053 } 3054 3055 static void 3056 bge_tick(xsc) 3057 void *xsc; 3058 { 3059 struct bge_softc *sc; 3060 3061 sc = xsc; 3062 3063 BGE_LOCK(sc); 3064 bge_tick_locked(sc); 3065 BGE_UNLOCK(sc); 3066 } 3067 3068 static void 3069 bge_stats_update_regs(sc) 3070 struct bge_softc *sc; 3071 { 3072 struct ifnet *ifp; 3073 struct bge_mac_stats_regs stats; 3074 u_int32_t *s; 3075 int i; 3076 3077 ifp = &sc->arpcom.ac_if; 3078 3079 s = (u_int32_t *)&stats; 3080 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3081 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3082 s++; 3083 } 3084 3085 ifp->if_collisions += 3086 (stats.dot3StatsSingleCollisionFrames + 3087 stats.dot3StatsMultipleCollisionFrames + 3088 stats.dot3StatsExcessiveCollisions + 3089 stats.dot3StatsLateCollisions) - 3090 ifp->if_collisions; 3091 3092 return; 3093 } 3094 3095 static void 3096 bge_stats_update(sc) 3097 struct bge_softc *sc; 3098 { 3099 struct ifnet *ifp; 3100 struct bge_stats *stats; 3101 3102 ifp = &sc->arpcom.ac_if; 3103 3104 stats = (struct bge_stats *)(sc->bge_vhandle + 3105 BGE_MEMWIN_START + BGE_STATS_BLOCK); 3106 3107 ifp->if_collisions += 3108 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo + 3109 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo + 3110 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo + 3111 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) - 3112 ifp->if_collisions; 3113 3114 #ifdef notdef 3115 ifp->if_collisions += 3116 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3117 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3118 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3119 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3120 ifp->if_collisions; 3121 #endif 3122 3123 return; 3124 } 3125 3126 /* 3127 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3128 * pointers to descriptors. 3129 */ 3130 static int 3131 bge_encap(sc, m_head, txidx) 3132 struct bge_softc *sc; 3133 struct mbuf *m_head; 3134 u_int32_t *txidx; 3135 { 3136 struct bge_tx_bd *f = NULL; 3137 u_int16_t csum_flags = 0; 3138 struct m_tag *mtag; 3139 struct bge_dmamap_arg ctx; 3140 bus_dmamap_t map; 3141 int error; 3142 3143 3144 if (m_head->m_pkthdr.csum_flags) { 3145 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3146 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3147 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 3148 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3149 if (m_head->m_flags & M_LASTFRAG) 3150 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3151 else if (m_head->m_flags & M_FRAG) 3152 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3153 } 3154 3155 mtag = VLAN_OUTPUT_TAG(&sc->arpcom.ac_if, m_head); 3156 3157 ctx.sc = sc; 3158 ctx.bge_idx = *txidx; 3159 ctx.bge_ring = sc->bge_ldata.bge_tx_ring; 3160 ctx.bge_flags = csum_flags; 3161 /* 3162 * Sanity check: avoid coming within 16 descriptors 3163 * of the end of the ring. 3164 */ 3165 ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16; 3166 3167 map = sc->bge_cdata.bge_tx_dmamap[*txidx]; 3168 error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map, 3169 m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT); 3170 3171 if (error || ctx.bge_maxsegs == 0 /*|| 3172 ctx.bge_idx == sc->bge_tx_saved_considx*/) 3173 return (ENOBUFS); 3174 3175 /* 3176 * Insure that the map for this transmission 3177 * is placed at the array index of the last descriptor 3178 * in this chain. 3179 */ 3180 sc->bge_cdata.bge_tx_dmamap[*txidx] = 3181 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx]; 3182 sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map; 3183 sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head; 3184 sc->bge_txcnt += ctx.bge_maxsegs; 3185 f = &sc->bge_ldata.bge_tx_ring[*txidx]; 3186 if (mtag != NULL) { 3187 f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG); 3188 f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag)); 3189 } else { 3190 f->bge_vlan_tag = 0; 3191 } 3192 3193 BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT); 3194 *txidx = ctx.bge_idx; 3195 3196 return(0); 3197 } 3198 3199 /* 3200 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3201 * to the mbuf data regions directly in the transmit descriptors. 3202 */ 3203 static void 3204 bge_start_locked(ifp) 3205 struct ifnet *ifp; 3206 { 3207 struct bge_softc *sc; 3208 struct mbuf *m_head = NULL; 3209 u_int32_t prodidx = 0; 3210 int count = 0; 3211 3212 sc = ifp->if_softc; 3213 3214 if (!sc->bge_link && IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3215 return; 3216 3217 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); 3218 3219 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3220 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 3221 if (m_head == NULL) 3222 break; 3223 3224 /* 3225 * XXX 3226 * The code inside the if() block is never reached since we 3227 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3228 * requests to checksum TCP/UDP in a fragmented packet. 3229 * 3230 * XXX 3231 * safety overkill. If this is a fragmented packet chain 3232 * with delayed TCP/UDP checksums, then only encapsulate 3233 * it if we have enough descriptors to handle the entire 3234 * chain at once. 3235 * (paranoia -- may not actually be needed) 3236 */ 3237 if (m_head->m_flags & M_FIRSTFRAG && 3238 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3239 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3240 m_head->m_pkthdr.csum_data + 16) { 3241 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3242 ifp->if_flags |= IFF_OACTIVE; 3243 break; 3244 } 3245 } 3246 3247 /* 3248 * Pack the data into the transmit ring. If we 3249 * don't have room, set the OACTIVE flag and wait 3250 * for the NIC to drain the ring. 3251 */ 3252 if (bge_encap(sc, m_head, &prodidx)) { 3253 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3254 ifp->if_flags |= IFF_OACTIVE; 3255 break; 3256 } 3257 ++count; 3258 3259 /* 3260 * If there's a BPF listener, bounce a copy of this frame 3261 * to him. 3262 */ 3263 BPF_MTAP(ifp, m_head); 3264 } 3265 3266 if (count == 0) { 3267 /* no packets were dequeued */ 3268 return; 3269 } 3270 3271 /* Transmit */ 3272 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3273 /* 5700 b2 errata */ 3274 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 3275 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3276 3277 /* 3278 * Set a timeout in case the chip goes out to lunch. 3279 */ 3280 ifp->if_timer = 5; 3281 3282 return; 3283 } 3284 3285 /* 3286 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3287 * to the mbuf data regions directly in the transmit descriptors. 3288 */ 3289 static void 3290 bge_start(ifp) 3291 struct ifnet *ifp; 3292 { 3293 struct bge_softc *sc; 3294 3295 sc = ifp->if_softc; 3296 BGE_LOCK(sc); 3297 bge_start_locked(ifp); 3298 BGE_UNLOCK(sc); 3299 } 3300 3301 static void 3302 bge_init_locked(sc) 3303 struct bge_softc *sc; 3304 { 3305 struct ifnet *ifp; 3306 u_int16_t *m; 3307 3308 BGE_LOCK_ASSERT(sc); 3309 3310 ifp = &sc->arpcom.ac_if; 3311 3312 if (ifp->if_flags & IFF_RUNNING) 3313 return; 3314 3315 /* Cancel pending I/O and flush buffers. */ 3316 bge_stop(sc); 3317 bge_reset(sc); 3318 bge_chipinit(sc); 3319 3320 /* 3321 * Init the various state machines, ring 3322 * control blocks and firmware. 3323 */ 3324 if (bge_blockinit(sc)) { 3325 printf("bge%d: initialization failure\n", sc->bge_unit); 3326 return; 3327 } 3328 3329 ifp = &sc->arpcom.ac_if; 3330 3331 /* Specify MTU. */ 3332 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3333 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3334 3335 /* Load our MAC address. */ 3336 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 3337 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3338 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3339 3340 /* Enable or disable promiscuous mode as needed. */ 3341 if (ifp->if_flags & IFF_PROMISC) { 3342 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3343 } else { 3344 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3345 } 3346 3347 /* Program multicast filter. */ 3348 bge_setmulti(sc); 3349 3350 /* Init RX ring. */ 3351 bge_init_rx_ring_std(sc); 3352 3353 /* 3354 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 3355 * memory to insure that the chip has in fact read the first 3356 * entry of the ring. 3357 */ 3358 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 3359 u_int32_t v, i; 3360 for (i = 0; i < 10; i++) { 3361 DELAY(20); 3362 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 3363 if (v == (MCLBYTES - ETHER_ALIGN)) 3364 break; 3365 } 3366 if (i == 10) 3367 printf ("bge%d: 5705 A0 chip failed to load RX ring\n", 3368 sc->bge_unit); 3369 } 3370 3371 /* Init jumbo RX ring. */ 3372 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3373 bge_init_rx_ring_jumbo(sc); 3374 3375 /* Init our RX return ring index */ 3376 sc->bge_rx_saved_considx = 0; 3377 3378 /* Init TX ring. */ 3379 bge_init_tx_ring(sc); 3380 3381 /* Turn on transmitter */ 3382 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3383 3384 /* Turn on receiver */ 3385 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3386 3387 /* Tell firmware we're alive. */ 3388 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3389 3390 /* Enable host interrupts. */ 3391 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3392 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3393 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3394 3395 bge_ifmedia_upd(ifp); 3396 3397 ifp->if_flags |= IFF_RUNNING; 3398 ifp->if_flags &= ~IFF_OACTIVE; 3399 3400 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3401 3402 return; 3403 } 3404 3405 static void 3406 bge_init(xsc) 3407 void *xsc; 3408 { 3409 struct bge_softc *sc = xsc; 3410 3411 BGE_LOCK(sc); 3412 bge_init_locked(sc); 3413 BGE_UNLOCK(sc); 3414 3415 return; 3416 } 3417 3418 /* 3419 * Set media options. 3420 */ 3421 static int 3422 bge_ifmedia_upd(ifp) 3423 struct ifnet *ifp; 3424 { 3425 struct bge_softc *sc; 3426 struct mii_data *mii; 3427 struct ifmedia *ifm; 3428 3429 sc = ifp->if_softc; 3430 ifm = &sc->bge_ifmedia; 3431 3432 /* If this is a 1000baseX NIC, enable the TBI port. */ 3433 if (sc->bge_tbi) { 3434 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3435 return(EINVAL); 3436 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3437 case IFM_AUTO: 3438 /* 3439 * The BCM5704 ASIC appears to have a special 3440 * mechanism for programming the autoneg 3441 * advertisement registers in TBI mode. 3442 */ 3443 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3444 uint32_t sgdig; 3445 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3446 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3447 sgdig |= BGE_SGDIGCFG_AUTO| 3448 BGE_SGDIGCFG_PAUSE_CAP| 3449 BGE_SGDIGCFG_ASYM_PAUSE; 3450 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3451 sgdig|BGE_SGDIGCFG_SEND); 3452 DELAY(5); 3453 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3454 } 3455 break; 3456 case IFM_1000_SX: 3457 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3458 BGE_CLRBIT(sc, BGE_MAC_MODE, 3459 BGE_MACMODE_HALF_DUPLEX); 3460 } else { 3461 BGE_SETBIT(sc, BGE_MAC_MODE, 3462 BGE_MACMODE_HALF_DUPLEX); 3463 } 3464 break; 3465 default: 3466 return(EINVAL); 3467 } 3468 return(0); 3469 } 3470 3471 mii = device_get_softc(sc->bge_miibus); 3472 sc->bge_link = 0; 3473 if (mii->mii_instance) { 3474 struct mii_softc *miisc; 3475 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 3476 miisc = LIST_NEXT(miisc, mii_list)) 3477 mii_phy_reset(miisc); 3478 } 3479 mii_mediachg(mii); 3480 3481 return(0); 3482 } 3483 3484 /* 3485 * Report current media status. 3486 */ 3487 static void 3488 bge_ifmedia_sts(ifp, ifmr) 3489 struct ifnet *ifp; 3490 struct ifmediareq *ifmr; 3491 { 3492 struct bge_softc *sc; 3493 struct mii_data *mii; 3494 3495 sc = ifp->if_softc; 3496 3497 if (sc->bge_tbi) { 3498 ifmr->ifm_status = IFM_AVALID; 3499 ifmr->ifm_active = IFM_ETHER; 3500 if (CSR_READ_4(sc, BGE_MAC_STS) & 3501 BGE_MACSTAT_TBI_PCS_SYNCHED) 3502 ifmr->ifm_status |= IFM_ACTIVE; 3503 ifmr->ifm_active |= IFM_1000_SX; 3504 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3505 ifmr->ifm_active |= IFM_HDX; 3506 else 3507 ifmr->ifm_active |= IFM_FDX; 3508 return; 3509 } 3510 3511 mii = device_get_softc(sc->bge_miibus); 3512 mii_pollstat(mii); 3513 ifmr->ifm_active = mii->mii_media_active; 3514 ifmr->ifm_status = mii->mii_media_status; 3515 3516 return; 3517 } 3518 3519 static int 3520 bge_ioctl(ifp, command, data) 3521 struct ifnet *ifp; 3522 u_long command; 3523 caddr_t data; 3524 { 3525 struct bge_softc *sc = ifp->if_softc; 3526 struct ifreq *ifr = (struct ifreq *) data; 3527 int mask, error = 0; 3528 struct mii_data *mii; 3529 3530 switch(command) { 3531 case SIOCSIFMTU: 3532 /* Disallow jumbo frames on 5705. */ 3533 if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 || 3534 sc->bge_asicrev == BGE_ASICREV_BCM5750) && 3535 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU) 3536 error = EINVAL; 3537 else { 3538 ifp->if_mtu = ifr->ifr_mtu; 3539 ifp->if_flags &= ~IFF_RUNNING; 3540 bge_init(sc); 3541 } 3542 break; 3543 case SIOCSIFFLAGS: 3544 BGE_LOCK(sc); 3545 if (ifp->if_flags & IFF_UP) { 3546 /* 3547 * If only the state of the PROMISC flag changed, 3548 * then just use the 'set promisc mode' command 3549 * instead of reinitializing the entire NIC. Doing 3550 * a full re-init means reloading the firmware and 3551 * waiting for it to start up, which may take a 3552 * second or two. 3553 */ 3554 if (ifp->if_flags & IFF_RUNNING && 3555 ifp->if_flags & IFF_PROMISC && 3556 !(sc->bge_if_flags & IFF_PROMISC)) { 3557 BGE_SETBIT(sc, BGE_RX_MODE, 3558 BGE_RXMODE_RX_PROMISC); 3559 } else if (ifp->if_flags & IFF_RUNNING && 3560 !(ifp->if_flags & IFF_PROMISC) && 3561 sc->bge_if_flags & IFF_PROMISC) { 3562 BGE_CLRBIT(sc, BGE_RX_MODE, 3563 BGE_RXMODE_RX_PROMISC); 3564 } else 3565 bge_init_locked(sc); 3566 } else { 3567 if (ifp->if_flags & IFF_RUNNING) { 3568 bge_stop(sc); 3569 } 3570 } 3571 sc->bge_if_flags = ifp->if_flags; 3572 BGE_UNLOCK(sc); 3573 error = 0; 3574 break; 3575 case SIOCADDMULTI: 3576 case SIOCDELMULTI: 3577 if (ifp->if_flags & IFF_RUNNING) { 3578 BGE_LOCK(sc); 3579 bge_setmulti(sc); 3580 BGE_UNLOCK(sc); 3581 error = 0; 3582 } 3583 break; 3584 case SIOCSIFMEDIA: 3585 case SIOCGIFMEDIA: 3586 if (sc->bge_tbi) { 3587 error = ifmedia_ioctl(ifp, ifr, 3588 &sc->bge_ifmedia, command); 3589 } else { 3590 mii = device_get_softc(sc->bge_miibus); 3591 error = ifmedia_ioctl(ifp, ifr, 3592 &mii->mii_media, command); 3593 } 3594 break; 3595 case SIOCSIFCAP: 3596 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3597 /* NB: the code for RX csum offload is disabled for now */ 3598 if (mask & IFCAP_TXCSUM) { 3599 ifp->if_capenable ^= IFCAP_TXCSUM; 3600 if (IFCAP_TXCSUM & ifp->if_capenable) 3601 ifp->if_hwassist = BGE_CSUM_FEATURES; 3602 else 3603 ifp->if_hwassist = 0; 3604 } 3605 error = 0; 3606 break; 3607 default: 3608 error = ether_ioctl(ifp, command, data); 3609 break; 3610 } 3611 3612 return(error); 3613 } 3614 3615 static void 3616 bge_watchdog(ifp) 3617 struct ifnet *ifp; 3618 { 3619 struct bge_softc *sc; 3620 3621 sc = ifp->if_softc; 3622 3623 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit); 3624 3625 ifp->if_flags &= ~IFF_RUNNING; 3626 bge_init(sc); 3627 3628 ifp->if_oerrors++; 3629 3630 return; 3631 } 3632 3633 /* 3634 * Stop the adapter and free any mbufs allocated to the 3635 * RX and TX lists. 3636 */ 3637 static void 3638 bge_stop(sc) 3639 struct bge_softc *sc; 3640 { 3641 struct ifnet *ifp; 3642 struct ifmedia_entry *ifm; 3643 struct mii_data *mii = NULL; 3644 int mtmp, itmp; 3645 3646 BGE_LOCK_ASSERT(sc); 3647 3648 ifp = &sc->arpcom.ac_if; 3649 3650 if (!sc->bge_tbi) 3651 mii = device_get_softc(sc->bge_miibus); 3652 3653 callout_stop(&sc->bge_stat_ch); 3654 3655 /* 3656 * Disable all of the receiver blocks 3657 */ 3658 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3659 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3660 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3661 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3662 sc->bge_asicrev != BGE_ASICREV_BCM5750) 3663 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3664 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3665 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3666 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3667 3668 /* 3669 * Disable all of the transmit blocks 3670 */ 3671 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3672 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3673 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3674 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3675 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3676 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3677 sc->bge_asicrev != BGE_ASICREV_BCM5750) 3678 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3679 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3680 3681 /* 3682 * Shut down all of the memory managers and related 3683 * state machines. 3684 */ 3685 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3686 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3687 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3688 sc->bge_asicrev != BGE_ASICREV_BCM5750) 3689 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3690 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3691 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3692 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3693 sc->bge_asicrev != BGE_ASICREV_BCM5750) { 3694 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3695 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3696 } 3697 3698 /* Disable host interrupts. */ 3699 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3700 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3701 3702 /* 3703 * Tell firmware we're shutting down. 3704 */ 3705 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3706 3707 /* Free the RX lists. */ 3708 bge_free_rx_ring_std(sc); 3709 3710 /* Free jumbo RX list. */ 3711 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 3712 sc->bge_asicrev != BGE_ASICREV_BCM5750) 3713 bge_free_rx_ring_jumbo(sc); 3714 3715 /* Free TX buffers. */ 3716 bge_free_tx_ring(sc); 3717 3718 /* 3719 * Isolate/power down the PHY, but leave the media selection 3720 * unchanged so that things will be put back to normal when 3721 * we bring the interface back up. 3722 */ 3723 if (!sc->bge_tbi) { 3724 itmp = ifp->if_flags; 3725 ifp->if_flags |= IFF_UP; 3726 ifm = mii->mii_media.ifm_cur; 3727 mtmp = ifm->ifm_media; 3728 ifm->ifm_media = IFM_ETHER|IFM_NONE; 3729 mii_mediachg(mii); 3730 ifm->ifm_media = mtmp; 3731 ifp->if_flags = itmp; 3732 } 3733 3734 sc->bge_link = 0; 3735 3736 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3737 3738 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3739 3740 return; 3741 } 3742 3743 /* 3744 * Stop all chip I/O so that the kernel's probe routines don't 3745 * get confused by errant DMAs when rebooting. 3746 */ 3747 static void 3748 bge_shutdown(dev) 3749 device_t dev; 3750 { 3751 struct bge_softc *sc; 3752 3753 sc = device_get_softc(dev); 3754 3755 BGE_LOCK(sc); 3756 bge_stop(sc); 3757 bge_reset(sc); 3758 BGE_UNLOCK(sc); 3759 3760 return; 3761 } 3762