1 /*- 2 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 3 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 25 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 26 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 33 * OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 /* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 49 * Linux and Solaris drivers. 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/bus.h> 55 #include <sys/endian.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/module.h> 61 #include <sys/mutex.h> 62 #include <sys/rman.h> 63 #include <sys/socket.h> 64 #include <sys/sockio.h> 65 66 #include <net/bpf.h> 67 #include <net/if.h> 68 #include <net/if_var.h> 69 #include <net/if_arp.h> 70 #include <net/ethernet.h> 71 #include <net/if_dl.h> 72 #include <net/if_media.h> 73 #include <net/if_types.h> 74 #include <net/if_vlan_var.h> 75 76 #include <netinet/in.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/ip.h> 79 #include <netinet/tcp.h> 80 81 #include <machine/bus.h> 82 #include <machine/in_cksum.h> 83 84 #include <dev/mii/mii.h> 85 #include <dev/mii/miivar.h> 86 87 #include <dev/pci/pcireg.h> 88 #include <dev/pci/pcivar.h> 89 90 #include <dev/sge/if_sgereg.h> 91 92 MODULE_DEPEND(sge, pci, 1, 1, 1); 93 MODULE_DEPEND(sge, ether, 1, 1, 1); 94 MODULE_DEPEND(sge, miibus, 1, 1, 1); 95 96 /* "device miibus0" required. See GENERIC if you get errors here. */ 97 #include "miibus_if.h" 98 99 /* 100 * Various supported device vendors/types and their names. 101 */ 102 static struct sge_type sge_devs[] = { 103 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 104 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 105 { 0, 0, NULL } 106 }; 107 108 static int sge_probe(device_t); 109 static int sge_attach(device_t); 110 static int sge_detach(device_t); 111 static int sge_shutdown(device_t); 112 static int sge_suspend(device_t); 113 static int sge_resume(device_t); 114 115 static int sge_miibus_readreg(device_t, int, int); 116 static int sge_miibus_writereg(device_t, int, int, int); 117 static void sge_miibus_statchg(device_t); 118 119 static int sge_newbuf(struct sge_softc *, int); 120 static int sge_encap(struct sge_softc *, struct mbuf **); 121 static __inline void 122 sge_discard_rxbuf(struct sge_softc *, int); 123 static void sge_rxeof(struct sge_softc *); 124 static void sge_txeof(struct sge_softc *); 125 static void sge_intr(void *); 126 static void sge_tick(void *); 127 static void sge_start(struct ifnet *); 128 static void sge_start_locked(struct ifnet *); 129 static int sge_ioctl(struct ifnet *, u_long, caddr_t); 130 static void sge_init(void *); 131 static void sge_init_locked(struct sge_softc *); 132 static void sge_stop(struct sge_softc *); 133 static void sge_watchdog(struct sge_softc *); 134 static int sge_ifmedia_upd(struct ifnet *); 135 static void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 136 137 static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 138 static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 139 static uint16_t sge_read_eeprom(struct sge_softc *, int); 140 141 static void sge_rxfilter(struct sge_softc *); 142 static void sge_setvlan(struct sge_softc *); 143 static void sge_reset(struct sge_softc *); 144 static int sge_list_rx_init(struct sge_softc *); 145 static int sge_list_rx_free(struct sge_softc *); 146 static int sge_list_tx_init(struct sge_softc *); 147 static int sge_list_tx_free(struct sge_softc *); 148 149 static int sge_dma_alloc(struct sge_softc *); 150 static void sge_dma_free(struct sge_softc *); 151 static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 152 153 static device_method_t sge_methods[] = { 154 /* Device interface */ 155 DEVMETHOD(device_probe, sge_probe), 156 DEVMETHOD(device_attach, sge_attach), 157 DEVMETHOD(device_detach, sge_detach), 158 DEVMETHOD(device_suspend, sge_suspend), 159 DEVMETHOD(device_resume, sge_resume), 160 DEVMETHOD(device_shutdown, sge_shutdown), 161 162 /* MII interface */ 163 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 164 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 165 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 166 167 DEVMETHOD_END 168 }; 169 170 static driver_t sge_driver = { 171 "sge", sge_methods, sizeof(struct sge_softc) 172 }; 173 174 static devclass_t sge_devclass; 175 176 DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 177 DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 178 179 /* 180 * Register space access macros. 181 */ 182 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 183 #define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 184 #define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 185 186 #define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 187 #define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 188 #define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 189 190 /* Define to show Tx/Rx error status. */ 191 #undef SGE_SHOW_ERRORS 192 193 #define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 194 195 static void 196 sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 197 { 198 bus_addr_t *p; 199 200 if (error != 0) 201 return; 202 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 203 p = arg; 204 *p = segs->ds_addr; 205 } 206 207 /* 208 * Read a sequence of words from the EEPROM. 209 */ 210 static uint16_t 211 sge_read_eeprom(struct sge_softc *sc, int offset) 212 { 213 uint32_t val; 214 int i; 215 216 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 217 CSR_WRITE_4(sc, ROMInterface, 218 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 219 DELAY(500); 220 for (i = 0; i < SGE_TIMEOUT; i++) { 221 val = CSR_READ_4(sc, ROMInterface); 222 if ((val & EI_REQ) == 0) 223 break; 224 DELAY(100); 225 } 226 if (i == SGE_TIMEOUT) { 227 device_printf(sc->sge_dev, 228 "EEPROM read timeout : 0x%08x\n", val); 229 return (0xffff); 230 } 231 232 return ((val & EI_DATA) >> EI_DATA_SHIFT); 233 } 234 235 static int 236 sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 237 { 238 uint16_t val; 239 int i; 240 241 val = sge_read_eeprom(sc, EEPROMSignature); 242 if (val == 0xffff || val == 0) { 243 device_printf(sc->sge_dev, 244 "invalid EEPROM signature : 0x%04x\n", val); 245 return (EINVAL); 246 } 247 248 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 249 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 250 dest[i + 0] = (uint8_t)val; 251 dest[i + 1] = (uint8_t)(val >> 8); 252 } 253 254 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 255 sc->sge_flags |= SGE_FLAG_RGMII; 256 return (0); 257 } 258 259 /* 260 * For SiS96x, APC CMOS RAM is used to store ethernet address. 261 * APC CMOS RAM is accessed through ISA bridge. 262 */ 263 static int 264 sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 265 { 266 #if defined(__amd64__) || defined(__i386__) 267 devclass_t pci; 268 device_t bus, dev = NULL; 269 device_t *kids; 270 struct apc_tbl { 271 uint16_t vid; 272 uint16_t did; 273 } *tp, apc_tbls[] = { 274 { SIS_VENDORID, 0x0965 }, 275 { SIS_VENDORID, 0x0966 }, 276 { SIS_VENDORID, 0x0968 } 277 }; 278 uint8_t reg; 279 int busnum, i, j, numkids; 280 281 pci = devclass_find("pci"); 282 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 283 bus = devclass_get_device(pci, busnum); 284 if (!bus) 285 continue; 286 if (device_get_children(bus, &kids, &numkids) != 0) 287 continue; 288 for (i = 0; i < numkids; i++) { 289 dev = kids[i]; 290 if (pci_get_class(dev) == PCIC_BRIDGE && 291 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 292 tp = apc_tbls; 293 for (j = 0; j < nitems(apc_tbls); j++) { 294 if (pci_get_vendor(dev) == tp->vid && 295 pci_get_device(dev) == tp->did) { 296 free(kids, M_TEMP); 297 goto apc_found; 298 } 299 tp++; 300 } 301 } 302 } 303 free(kids, M_TEMP); 304 } 305 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 306 return (EINVAL); 307 apc_found: 308 /* Enable port 0x78 and 0x79 to access APC registers. */ 309 reg = pci_read_config(dev, 0x48, 1); 310 pci_write_config(dev, 0x48, reg & ~0x02, 1); 311 DELAY(50); 312 pci_read_config(dev, 0x48, 1); 313 /* Read stored ethernet address. */ 314 for (i = 0; i < ETHER_ADDR_LEN; i++) { 315 outb(0x78, 0x09 + i); 316 dest[i] = inb(0x79); 317 } 318 outb(0x78, 0x12); 319 if ((inb(0x79) & 0x80) != 0) 320 sc->sge_flags |= SGE_FLAG_RGMII; 321 /* Restore access to APC registers. */ 322 pci_write_config(dev, 0x48, reg, 1); 323 324 return (0); 325 #else 326 return (EINVAL); 327 #endif 328 } 329 330 static int 331 sge_miibus_readreg(device_t dev, int phy, int reg) 332 { 333 struct sge_softc *sc; 334 uint32_t val; 335 int i; 336 337 sc = device_get_softc(dev); 338 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 339 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 340 DELAY(10); 341 for (i = 0; i < SGE_TIMEOUT; i++) { 342 val = CSR_READ_4(sc, GMIIControl); 343 if ((val & GMI_REQ) == 0) 344 break; 345 DELAY(10); 346 } 347 if (i == SGE_TIMEOUT) { 348 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 349 return (0); 350 } 351 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 352 } 353 354 static int 355 sge_miibus_writereg(device_t dev, int phy, int reg, int data) 356 { 357 struct sge_softc *sc; 358 uint32_t val; 359 int i; 360 361 sc = device_get_softc(dev); 362 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 363 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 364 GMI_OP_WR | GMI_REQ); 365 DELAY(10); 366 for (i = 0; i < SGE_TIMEOUT; i++) { 367 val = CSR_READ_4(sc, GMIIControl); 368 if ((val & GMI_REQ) == 0) 369 break; 370 DELAY(10); 371 } 372 if (i == SGE_TIMEOUT) 373 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 374 return (0); 375 } 376 377 static void 378 sge_miibus_statchg(device_t dev) 379 { 380 struct sge_softc *sc; 381 struct mii_data *mii; 382 struct ifnet *ifp; 383 uint32_t ctl, speed; 384 385 sc = device_get_softc(dev); 386 mii = device_get_softc(sc->sge_miibus); 387 ifp = sc->sge_ifp; 388 if (mii == NULL || ifp == NULL || 389 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 390 return; 391 speed = 0; 392 sc->sge_flags &= ~SGE_FLAG_LINK; 393 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 394 (IFM_ACTIVE | IFM_AVALID)) { 395 switch (IFM_SUBTYPE(mii->mii_media_active)) { 396 case IFM_10_T: 397 sc->sge_flags |= SGE_FLAG_LINK; 398 speed = SC_SPEED_10; 399 break; 400 case IFM_100_TX: 401 sc->sge_flags |= SGE_FLAG_LINK; 402 speed = SC_SPEED_100; 403 break; 404 case IFM_1000_T: 405 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 406 sc->sge_flags |= SGE_FLAG_LINK; 407 speed = SC_SPEED_1000; 408 } 409 break; 410 default: 411 break; 412 } 413 } 414 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 415 return; 416 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 417 ctl = CSR_READ_4(sc, StationControl); 418 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 419 if (speed == SC_SPEED_1000) { 420 ctl |= 0x07000000; 421 sc->sge_flags |= SGE_FLAG_SPEED_1000; 422 } else { 423 ctl |= 0x04000000; 424 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 425 } 426 #ifdef notyet 427 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 428 ctl |= 0x03000000; 429 #endif 430 ctl |= speed; 431 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 432 ctl |= SC_FDX; 433 sc->sge_flags |= SGE_FLAG_FDX; 434 } else 435 sc->sge_flags &= ~SGE_FLAG_FDX; 436 CSR_WRITE_4(sc, StationControl, ctl); 437 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 438 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 439 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 440 } 441 } 442 443 static void 444 sge_rxfilter(struct sge_softc *sc) 445 { 446 struct ifnet *ifp; 447 struct ifmultiaddr *ifma; 448 uint32_t crc, hashes[2]; 449 uint16_t rxfilt; 450 451 SGE_LOCK_ASSERT(sc); 452 453 ifp = sc->sge_ifp; 454 rxfilt = CSR_READ_2(sc, RxMacControl); 455 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 456 rxfilt |= AcceptMyPhys; 457 if ((ifp->if_flags & IFF_BROADCAST) != 0) 458 rxfilt |= AcceptBroadcast; 459 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 460 if ((ifp->if_flags & IFF_PROMISC) != 0) 461 rxfilt |= AcceptAllPhys; 462 rxfilt |= AcceptMulticast; 463 hashes[0] = 0xFFFFFFFF; 464 hashes[1] = 0xFFFFFFFF; 465 } else { 466 rxfilt |= AcceptMulticast; 467 hashes[0] = hashes[1] = 0; 468 /* Now program new ones. */ 469 if_maddr_rlock(ifp); 470 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 471 if (ifma->ifma_addr->sa_family != AF_LINK) 472 continue; 473 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 474 ifma->ifma_addr), ETHER_ADDR_LEN); 475 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 476 } 477 if_maddr_runlock(ifp); 478 } 479 CSR_WRITE_2(sc, RxMacControl, rxfilt); 480 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 481 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 482 } 483 484 static void 485 sge_setvlan(struct sge_softc *sc) 486 { 487 struct ifnet *ifp; 488 uint16_t rxfilt; 489 490 SGE_LOCK_ASSERT(sc); 491 492 ifp = sc->sge_ifp; 493 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 494 return; 495 rxfilt = CSR_READ_2(sc, RxMacControl); 496 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 497 rxfilt |= RXMAC_STRIP_VLAN; 498 else 499 rxfilt &= ~RXMAC_STRIP_VLAN; 500 CSR_WRITE_2(sc, RxMacControl, rxfilt); 501 } 502 503 static void 504 sge_reset(struct sge_softc *sc) 505 { 506 507 CSR_WRITE_4(sc, IntrMask, 0); 508 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 509 510 /* Soft reset. */ 511 CSR_WRITE_4(sc, IntrControl, 0x8000); 512 CSR_READ_4(sc, IntrControl); 513 DELAY(100); 514 CSR_WRITE_4(sc, IntrControl, 0); 515 /* Stop MAC. */ 516 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 517 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 518 519 CSR_WRITE_4(sc, IntrMask, 0); 520 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 521 522 CSR_WRITE_4(sc, GMIIControl, 0); 523 } 524 525 /* 526 * Probe for an SiS chip. Check the PCI vendor and device 527 * IDs against our list and return a device name if we find a match. 528 */ 529 static int 530 sge_probe(device_t dev) 531 { 532 struct sge_type *t; 533 534 t = sge_devs; 535 while (t->sge_name != NULL) { 536 if ((pci_get_vendor(dev) == t->sge_vid) && 537 (pci_get_device(dev) == t->sge_did)) { 538 device_set_desc(dev, t->sge_name); 539 return (BUS_PROBE_DEFAULT); 540 } 541 t++; 542 } 543 544 return (ENXIO); 545 } 546 547 /* 548 * Attach the interface. Allocate softc structures, do ifmedia 549 * setup and ethernet/BPF attach. 550 */ 551 static int 552 sge_attach(device_t dev) 553 { 554 struct sge_softc *sc; 555 struct ifnet *ifp; 556 uint8_t eaddr[ETHER_ADDR_LEN]; 557 int error = 0, rid; 558 559 sc = device_get_softc(dev); 560 sc->sge_dev = dev; 561 562 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 563 MTX_DEF); 564 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 565 566 /* 567 * Map control/status registers. 568 */ 569 pci_enable_busmaster(dev); 570 571 /* Allocate resources. */ 572 sc->sge_res_id = PCIR_BAR(0); 573 sc->sge_res_type = SYS_RES_MEMORY; 574 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 575 &sc->sge_res_id, RF_ACTIVE); 576 if (sc->sge_res == NULL) { 577 device_printf(dev, "couldn't allocate resource\n"); 578 error = ENXIO; 579 goto fail; 580 } 581 582 rid = 0; 583 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 584 RF_SHAREABLE | RF_ACTIVE); 585 if (sc->sge_irq == NULL) { 586 device_printf(dev, "couldn't allocate IRQ resources\n"); 587 error = ENXIO; 588 goto fail; 589 } 590 sc->sge_rev = pci_get_revid(dev); 591 if (pci_get_device(dev) == SIS_DEVICEID_190) 592 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 593 /* Reset the adapter. */ 594 sge_reset(sc); 595 596 /* Get MAC address from the EEPROM. */ 597 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 598 sge_get_mac_addr_apc(sc, eaddr); 599 else 600 sge_get_mac_addr_eeprom(sc, eaddr); 601 602 if ((error = sge_dma_alloc(sc)) != 0) 603 goto fail; 604 605 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 606 if (ifp == NULL) { 607 device_printf(dev, "cannot allocate ifnet structure.\n"); 608 error = ENOSPC; 609 goto fail; 610 } 611 ifp->if_softc = sc; 612 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 613 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 614 ifp->if_ioctl = sge_ioctl; 615 ifp->if_start = sge_start; 616 ifp->if_init = sge_init; 617 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 618 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 619 IFQ_SET_READY(&ifp->if_snd); 620 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4; 621 ifp->if_hwassist = SGE_CSUM_FEATURES | CSUM_TSO; 622 ifp->if_capenable = ifp->if_capabilities; 623 /* 624 * Do MII setup. 625 */ 626 error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd, 627 sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 628 if (error != 0) { 629 device_printf(dev, "attaching PHYs failed\n"); 630 goto fail; 631 } 632 633 /* 634 * Call MI attach routine. 635 */ 636 ether_ifattach(ifp, eaddr); 637 638 /* VLAN setup. */ 639 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 640 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; 641 ifp->if_capenable = ifp->if_capabilities; 642 /* Tell the upper layer(s) we support long frames. */ 643 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 644 645 /* Hook interrupt last to avoid having to lock softc */ 646 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 647 NULL, sge_intr, sc, &sc->sge_intrhand); 648 if (error) { 649 device_printf(dev, "couldn't set up irq\n"); 650 ether_ifdetach(ifp); 651 goto fail; 652 } 653 654 fail: 655 if (error) 656 sge_detach(dev); 657 658 return (error); 659 } 660 661 /* 662 * Shutdown hardware and free up resources. This can be called any 663 * time after the mutex has been initialized. It is called in both 664 * the error case in attach and the normal detach case so it needs 665 * to be careful about only freeing resources that have actually been 666 * allocated. 667 */ 668 static int 669 sge_detach(device_t dev) 670 { 671 struct sge_softc *sc; 672 struct ifnet *ifp; 673 674 sc = device_get_softc(dev); 675 ifp = sc->sge_ifp; 676 /* These should only be active if attach succeeded. */ 677 if (device_is_attached(dev)) { 678 ether_ifdetach(ifp); 679 SGE_LOCK(sc); 680 sge_stop(sc); 681 SGE_UNLOCK(sc); 682 callout_drain(&sc->sge_stat_ch); 683 } 684 if (sc->sge_miibus) 685 device_delete_child(dev, sc->sge_miibus); 686 bus_generic_detach(dev); 687 688 if (sc->sge_intrhand) 689 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 690 if (sc->sge_irq) 691 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 692 if (sc->sge_res) 693 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 694 sc->sge_res); 695 if (ifp) 696 if_free(ifp); 697 sge_dma_free(sc); 698 mtx_destroy(&sc->sge_mtx); 699 700 return (0); 701 } 702 703 /* 704 * Stop all chip I/O so that the kernel's probe routines don't 705 * get confused by errant DMAs when rebooting. 706 */ 707 static int 708 sge_shutdown(device_t dev) 709 { 710 struct sge_softc *sc; 711 712 sc = device_get_softc(dev); 713 SGE_LOCK(sc); 714 sge_stop(sc); 715 SGE_UNLOCK(sc); 716 return (0); 717 } 718 719 static int 720 sge_suspend(device_t dev) 721 { 722 struct sge_softc *sc; 723 struct ifnet *ifp; 724 725 sc = device_get_softc(dev); 726 SGE_LOCK(sc); 727 ifp = sc->sge_ifp; 728 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 729 sge_stop(sc); 730 SGE_UNLOCK(sc); 731 return (0); 732 } 733 734 static int 735 sge_resume(device_t dev) 736 { 737 struct sge_softc *sc; 738 struct ifnet *ifp; 739 740 sc = device_get_softc(dev); 741 SGE_LOCK(sc); 742 ifp = sc->sge_ifp; 743 if ((ifp->if_flags & IFF_UP) != 0) 744 sge_init_locked(sc); 745 SGE_UNLOCK(sc); 746 return (0); 747 } 748 749 static int 750 sge_dma_alloc(struct sge_softc *sc) 751 { 752 struct sge_chain_data *cd; 753 struct sge_list_data *ld; 754 struct sge_rxdesc *rxd; 755 struct sge_txdesc *txd; 756 int error, i; 757 758 cd = &sc->sge_cdata; 759 ld = &sc->sge_ldata; 760 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 761 1, 0, /* alignment, boundary */ 762 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 763 BUS_SPACE_MAXADDR, /* highaddr */ 764 NULL, NULL, /* filter, filterarg */ 765 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 766 1, /* nsegments */ 767 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 768 0, /* flags */ 769 NULL, /* lockfunc */ 770 NULL, /* lockarg */ 771 &cd->sge_tag); 772 if (error != 0) { 773 device_printf(sc->sge_dev, 774 "could not create parent DMA tag.\n"); 775 goto fail; 776 } 777 778 /* RX descriptor ring */ 779 error = bus_dma_tag_create(cd->sge_tag, 780 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 781 BUS_SPACE_MAXADDR, /* lowaddr */ 782 BUS_SPACE_MAXADDR, /* highaddr */ 783 NULL, NULL, /* filter, filterarg */ 784 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 785 SGE_RX_RING_SZ, /* maxsegsize */ 786 0, /* flags */ 787 NULL, /* lockfunc */ 788 NULL, /* lockarg */ 789 &cd->sge_rx_tag); 790 if (error != 0) { 791 device_printf(sc->sge_dev, 792 "could not create Rx ring DMA tag.\n"); 793 goto fail; 794 } 795 /* Allocate DMA'able memory and load DMA map for RX ring. */ 796 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 797 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 798 &cd->sge_rx_dmamap); 799 if (error != 0) { 800 device_printf(sc->sge_dev, 801 "could not allocate DMA'able memory for Rx ring.\n"); 802 goto fail; 803 } 804 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 805 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 806 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 807 if (error != 0) { 808 device_printf(sc->sge_dev, 809 "could not load DMA'able memory for Rx ring.\n"); 810 } 811 812 /* TX descriptor ring */ 813 error = bus_dma_tag_create(cd->sge_tag, 814 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 815 BUS_SPACE_MAXADDR, /* lowaddr */ 816 BUS_SPACE_MAXADDR, /* highaddr */ 817 NULL, NULL, /* filter, filterarg */ 818 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 819 SGE_TX_RING_SZ, /* maxsegsize */ 820 0, /* flags */ 821 NULL, /* lockfunc */ 822 NULL, /* lockarg */ 823 &cd->sge_tx_tag); 824 if (error != 0) { 825 device_printf(sc->sge_dev, 826 "could not create Rx ring DMA tag.\n"); 827 goto fail; 828 } 829 /* Allocate DMA'able memory and load DMA map for TX ring. */ 830 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 831 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 832 &cd->sge_tx_dmamap); 833 if (error != 0) { 834 device_printf(sc->sge_dev, 835 "could not allocate DMA'able memory for Tx ring.\n"); 836 goto fail; 837 } 838 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 839 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 840 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 841 if (error != 0) { 842 device_printf(sc->sge_dev, 843 "could not load DMA'able memory for Rx ring.\n"); 844 goto fail; 845 } 846 847 /* Create DMA tag for Tx buffers. */ 848 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 849 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 850 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 851 if (error != 0) { 852 device_printf(sc->sge_dev, 853 "could not create Tx mbuf DMA tag.\n"); 854 goto fail; 855 } 856 857 /* Create DMA tag for Rx buffers. */ 858 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 859 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 860 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 861 if (error != 0) { 862 device_printf(sc->sge_dev, 863 "could not create Rx mbuf DMA tag.\n"); 864 goto fail; 865 } 866 867 /* Create DMA maps for Tx buffers. */ 868 for (i = 0; i < SGE_TX_RING_CNT; i++) { 869 txd = &cd->sge_txdesc[i]; 870 txd->tx_m = NULL; 871 txd->tx_dmamap = NULL; 872 txd->tx_ndesc = 0; 873 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 874 &txd->tx_dmamap); 875 if (error != 0) { 876 device_printf(sc->sge_dev, 877 "could not create Tx DMA map.\n"); 878 goto fail; 879 } 880 } 881 /* Create spare DMA map for Rx buffer. */ 882 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 883 if (error != 0) { 884 device_printf(sc->sge_dev, 885 "could not create spare Rx DMA map.\n"); 886 goto fail; 887 } 888 /* Create DMA maps for Rx buffers. */ 889 for (i = 0; i < SGE_RX_RING_CNT; i++) { 890 rxd = &cd->sge_rxdesc[i]; 891 rxd->rx_m = NULL; 892 rxd->rx_dmamap = NULL; 893 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 894 &rxd->rx_dmamap); 895 if (error) { 896 device_printf(sc->sge_dev, 897 "could not create Rx DMA map.\n"); 898 goto fail; 899 } 900 } 901 fail: 902 return (error); 903 } 904 905 static void 906 sge_dma_free(struct sge_softc *sc) 907 { 908 struct sge_chain_data *cd; 909 struct sge_list_data *ld; 910 struct sge_rxdesc *rxd; 911 struct sge_txdesc *txd; 912 int i; 913 914 cd = &sc->sge_cdata; 915 ld = &sc->sge_ldata; 916 /* Rx ring. */ 917 if (cd->sge_rx_tag != NULL) { 918 if (ld->sge_rx_paddr != 0) 919 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 920 if (ld->sge_rx_ring != NULL) 921 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 922 cd->sge_rx_dmamap); 923 ld->sge_rx_ring = NULL; 924 ld->sge_rx_paddr = 0; 925 bus_dma_tag_destroy(cd->sge_rx_tag); 926 cd->sge_rx_tag = NULL; 927 } 928 /* Tx ring. */ 929 if (cd->sge_tx_tag != NULL) { 930 if (ld->sge_tx_paddr != 0) 931 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 932 if (ld->sge_tx_ring != NULL) 933 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 934 cd->sge_tx_dmamap); 935 ld->sge_tx_ring = NULL; 936 ld->sge_tx_paddr = 0; 937 bus_dma_tag_destroy(cd->sge_tx_tag); 938 cd->sge_tx_tag = NULL; 939 } 940 /* Rx buffers. */ 941 if (cd->sge_rxmbuf_tag != NULL) { 942 for (i = 0; i < SGE_RX_RING_CNT; i++) { 943 rxd = &cd->sge_rxdesc[i]; 944 if (rxd->rx_dmamap != NULL) { 945 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 946 rxd->rx_dmamap); 947 rxd->rx_dmamap = NULL; 948 } 949 } 950 if (cd->sge_rx_spare_map != NULL) { 951 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 952 cd->sge_rx_spare_map); 953 cd->sge_rx_spare_map = NULL; 954 } 955 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 956 cd->sge_rxmbuf_tag = NULL; 957 } 958 /* Tx buffers. */ 959 if (cd->sge_txmbuf_tag != NULL) { 960 for (i = 0; i < SGE_TX_RING_CNT; i++) { 961 txd = &cd->sge_txdesc[i]; 962 if (txd->tx_dmamap != NULL) { 963 bus_dmamap_destroy(cd->sge_txmbuf_tag, 964 txd->tx_dmamap); 965 txd->tx_dmamap = NULL; 966 } 967 } 968 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 969 cd->sge_txmbuf_tag = NULL; 970 } 971 if (cd->sge_tag != NULL) 972 bus_dma_tag_destroy(cd->sge_tag); 973 cd->sge_tag = NULL; 974 } 975 976 /* 977 * Initialize the TX descriptors. 978 */ 979 static int 980 sge_list_tx_init(struct sge_softc *sc) 981 { 982 struct sge_list_data *ld; 983 struct sge_chain_data *cd; 984 985 SGE_LOCK_ASSERT(sc); 986 ld = &sc->sge_ldata; 987 cd = &sc->sge_cdata; 988 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 989 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 990 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 991 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 992 cd->sge_tx_prod = 0; 993 cd->sge_tx_cons = 0; 994 cd->sge_tx_cnt = 0; 995 return (0); 996 } 997 998 static int 999 sge_list_tx_free(struct sge_softc *sc) 1000 { 1001 struct sge_chain_data *cd; 1002 struct sge_txdesc *txd; 1003 int i; 1004 1005 SGE_LOCK_ASSERT(sc); 1006 cd = &sc->sge_cdata; 1007 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1008 txd = &cd->sge_txdesc[i]; 1009 if (txd->tx_m != NULL) { 1010 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1011 BUS_DMASYNC_POSTWRITE); 1012 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1013 m_freem(txd->tx_m); 1014 txd->tx_m = NULL; 1015 txd->tx_ndesc = 0; 1016 } 1017 } 1018 1019 return (0); 1020 } 1021 1022 /* 1023 * Initialize the RX descriptors and allocate mbufs for them. Note that 1024 * we arrange the descriptors in a closed ring, so that the last descriptor 1025 * has RING_END flag set. 1026 */ 1027 static int 1028 sge_list_rx_init(struct sge_softc *sc) 1029 { 1030 struct sge_chain_data *cd; 1031 int i; 1032 1033 SGE_LOCK_ASSERT(sc); 1034 cd = &sc->sge_cdata; 1035 cd->sge_rx_cons = 0; 1036 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1037 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1038 if (sge_newbuf(sc, i) != 0) 1039 return (ENOBUFS); 1040 } 1041 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1042 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1043 return (0); 1044 } 1045 1046 static int 1047 sge_list_rx_free(struct sge_softc *sc) 1048 { 1049 struct sge_chain_data *cd; 1050 struct sge_rxdesc *rxd; 1051 int i; 1052 1053 SGE_LOCK_ASSERT(sc); 1054 cd = &sc->sge_cdata; 1055 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1056 rxd = &cd->sge_rxdesc[i]; 1057 if (rxd->rx_m != NULL) { 1058 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1059 BUS_DMASYNC_POSTREAD); 1060 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1061 rxd->rx_dmamap); 1062 m_freem(rxd->rx_m); 1063 rxd->rx_m = NULL; 1064 } 1065 } 1066 return (0); 1067 } 1068 1069 /* 1070 * Initialize an RX descriptor and attach an MBUF cluster. 1071 */ 1072 static int 1073 sge_newbuf(struct sge_softc *sc, int prod) 1074 { 1075 struct mbuf *m; 1076 struct sge_desc *desc; 1077 struct sge_chain_data *cd; 1078 struct sge_rxdesc *rxd; 1079 bus_dma_segment_t segs[1]; 1080 bus_dmamap_t map; 1081 int error, nsegs; 1082 1083 SGE_LOCK_ASSERT(sc); 1084 1085 cd = &sc->sge_cdata; 1086 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1087 if (m == NULL) 1088 return (ENOBUFS); 1089 m->m_len = m->m_pkthdr.len = MCLBYTES; 1090 m_adj(m, SGE_RX_BUF_ALIGN); 1091 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1092 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1093 if (error != 0) { 1094 m_freem(m); 1095 return (error); 1096 } 1097 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1098 rxd = &cd->sge_rxdesc[prod]; 1099 if (rxd->rx_m != NULL) { 1100 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1101 BUS_DMASYNC_POSTREAD); 1102 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1103 } 1104 map = rxd->rx_dmamap; 1105 rxd->rx_dmamap = cd->sge_rx_spare_map; 1106 cd->sge_rx_spare_map = map; 1107 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1108 BUS_DMASYNC_PREREAD); 1109 rxd->rx_m = m; 1110 1111 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1112 desc->sge_sts_size = 0; 1113 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1114 desc->sge_flags = htole32(segs[0].ds_len); 1115 if (prod == SGE_RX_RING_CNT - 1) 1116 desc->sge_flags |= htole32(RING_END); 1117 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1118 return (0); 1119 } 1120 1121 static __inline void 1122 sge_discard_rxbuf(struct sge_softc *sc, int index) 1123 { 1124 struct sge_desc *desc; 1125 1126 desc = &sc->sge_ldata.sge_rx_ring[index]; 1127 desc->sge_sts_size = 0; 1128 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1129 if (index == SGE_RX_RING_CNT - 1) 1130 desc->sge_flags |= htole32(RING_END); 1131 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1132 } 1133 1134 /* 1135 * A frame has been uploaded: pass the resulting mbuf chain up to 1136 * the higher level protocols. 1137 */ 1138 static void 1139 sge_rxeof(struct sge_softc *sc) 1140 { 1141 struct ifnet *ifp; 1142 struct mbuf *m; 1143 struct sge_chain_data *cd; 1144 struct sge_desc *cur_rx; 1145 uint32_t rxinfo, rxstat; 1146 int cons, prog; 1147 1148 SGE_LOCK_ASSERT(sc); 1149 1150 ifp = sc->sge_ifp; 1151 cd = &sc->sge_cdata; 1152 1153 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1154 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1155 cons = cd->sge_rx_cons; 1156 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1157 SGE_INC(cons, SGE_RX_RING_CNT)) { 1158 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1159 break; 1160 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1161 rxinfo = le32toh(cur_rx->sge_cmdsts); 1162 if ((rxinfo & RDC_OWN) != 0) 1163 break; 1164 rxstat = le32toh(cur_rx->sge_sts_size); 1165 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1166 SGE_RX_NSEGS(rxstat) != 1) { 1167 /* XXX We don't support multi-segment frames yet. */ 1168 #ifdef SGE_SHOW_ERRORS 1169 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1170 RX_ERR_BITS); 1171 #endif 1172 sge_discard_rxbuf(sc, cons); 1173 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1174 continue; 1175 } 1176 m = cd->sge_rxdesc[cons].rx_m; 1177 if (sge_newbuf(sc, cons) != 0) { 1178 sge_discard_rxbuf(sc, cons); 1179 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1180 continue; 1181 } 1182 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1183 if ((rxinfo & RDC_IP_CSUM) != 0 && 1184 (rxinfo & RDC_IP_CSUM_OK) != 0) 1185 m->m_pkthdr.csum_flags |= 1186 CSUM_IP_CHECKED | CSUM_IP_VALID; 1187 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1188 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1189 ((rxinfo & RDC_UDP_CSUM) != 0 && 1190 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1191 m->m_pkthdr.csum_flags |= 1192 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1193 m->m_pkthdr.csum_data = 0xffff; 1194 } 1195 } 1196 /* Check for VLAN tagged frame. */ 1197 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1198 (rxstat & RDS_VLAN) != 0) { 1199 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1200 m->m_flags |= M_VLANTAG; 1201 } 1202 /* 1203 * Account for 10bytes auto padding which is used 1204 * to align IP header on 32bit boundary. Also note, 1205 * CRC bytes is automatically removed by the 1206 * hardware. 1207 */ 1208 m->m_data += SGE_RX_PAD_BYTES; 1209 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1210 SGE_RX_PAD_BYTES; 1211 m->m_pkthdr.rcvif = ifp; 1212 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1213 SGE_UNLOCK(sc); 1214 (*ifp->if_input)(ifp, m); 1215 SGE_LOCK(sc); 1216 } 1217 1218 if (prog > 0) { 1219 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1220 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1221 cd->sge_rx_cons = cons; 1222 } 1223 } 1224 1225 /* 1226 * A frame was downloaded to the chip. It's safe for us to clean up 1227 * the list buffers. 1228 */ 1229 static void 1230 sge_txeof(struct sge_softc *sc) 1231 { 1232 struct ifnet *ifp; 1233 struct sge_list_data *ld; 1234 struct sge_chain_data *cd; 1235 struct sge_txdesc *txd; 1236 uint32_t txstat; 1237 int cons, nsegs, prod; 1238 1239 SGE_LOCK_ASSERT(sc); 1240 1241 ifp = sc->sge_ifp; 1242 ld = &sc->sge_ldata; 1243 cd = &sc->sge_cdata; 1244 1245 if (cd->sge_tx_cnt == 0) 1246 return; 1247 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1248 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1249 cons = cd->sge_tx_cons; 1250 prod = cd->sge_tx_prod; 1251 for (; cons != prod;) { 1252 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1253 if ((txstat & TDC_OWN) != 0) 1254 break; 1255 /* 1256 * Only the first descriptor of multi-descriptor transmission 1257 * is updated by controller. Driver should skip entire 1258 * chained buffers for the transmitted frame. In other words 1259 * TDC_OWN bit is valid only at the first descriptor of a 1260 * multi-descriptor transmission. 1261 */ 1262 if (SGE_TX_ERROR(txstat) != 0) { 1263 #ifdef SGE_SHOW_ERRORS 1264 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1265 txstat, TX_ERR_BITS); 1266 #endif 1267 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1268 } else { 1269 #ifdef notyet 1270 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0xFFFF) - 1); 1271 #endif 1272 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1273 } 1274 txd = &cd->sge_txdesc[cons]; 1275 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1276 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1277 SGE_INC(cons, SGE_TX_RING_CNT); 1278 } 1279 /* Reclaim transmitted mbuf. */ 1280 KASSERT(txd->tx_m != NULL, 1281 ("%s: freeing NULL mbuf\n", __func__)); 1282 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1283 BUS_DMASYNC_POSTWRITE); 1284 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1285 m_freem(txd->tx_m); 1286 txd->tx_m = NULL; 1287 cd->sge_tx_cnt -= txd->tx_ndesc; 1288 KASSERT(cd->sge_tx_cnt >= 0, 1289 ("%s: Active Tx desc counter was garbled\n", __func__)); 1290 txd->tx_ndesc = 0; 1291 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1292 } 1293 cd->sge_tx_cons = cons; 1294 if (cd->sge_tx_cnt == 0) 1295 sc->sge_timer = 0; 1296 } 1297 1298 static void 1299 sge_tick(void *arg) 1300 { 1301 struct sge_softc *sc; 1302 struct mii_data *mii; 1303 struct ifnet *ifp; 1304 1305 sc = arg; 1306 SGE_LOCK_ASSERT(sc); 1307 1308 ifp = sc->sge_ifp; 1309 mii = device_get_softc(sc->sge_miibus); 1310 mii_tick(mii); 1311 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1312 sge_miibus_statchg(sc->sge_dev); 1313 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1314 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1315 sge_start_locked(ifp); 1316 } 1317 /* 1318 * Reclaim transmitted frames here as we do not request 1319 * Tx completion interrupt for every queued frames to 1320 * reduce excessive interrupts. 1321 */ 1322 sge_txeof(sc); 1323 sge_watchdog(sc); 1324 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1325 } 1326 1327 static void 1328 sge_intr(void *arg) 1329 { 1330 struct sge_softc *sc; 1331 struct ifnet *ifp; 1332 uint32_t status; 1333 1334 sc = arg; 1335 SGE_LOCK(sc); 1336 ifp = sc->sge_ifp; 1337 1338 status = CSR_READ_4(sc, IntrStatus); 1339 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1340 /* Not ours. */ 1341 SGE_UNLOCK(sc); 1342 return; 1343 } 1344 /* Acknowledge interrupts. */ 1345 CSR_WRITE_4(sc, IntrStatus, status); 1346 /* Disable further interrupts. */ 1347 CSR_WRITE_4(sc, IntrMask, 0); 1348 /* 1349 * It seems the controller supports some kind of interrupt 1350 * moderation mechanism but we still don't know how to 1351 * enable that. To reduce number of generated interrupts 1352 * under load we check pending interrupts in a loop. This 1353 * will increase number of register access and is not correct 1354 * way to handle interrupt moderation but there seems to be 1355 * no other way at this time. 1356 */ 1357 for (;;) { 1358 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1359 break; 1360 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1361 sge_rxeof(sc); 1362 /* Wakeup Rx MAC. */ 1363 if ((status & INTR_RX_IDLE) != 0) 1364 CSR_WRITE_4(sc, RX_CTL, 1365 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1366 } 1367 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1368 sge_txeof(sc); 1369 status = CSR_READ_4(sc, IntrStatus); 1370 if ((status & SGE_INTRS) == 0) 1371 break; 1372 /* Acknowledge interrupts. */ 1373 CSR_WRITE_4(sc, IntrStatus, status); 1374 } 1375 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1376 /* Re-enable interrupts */ 1377 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1378 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1379 sge_start_locked(ifp); 1380 } 1381 SGE_UNLOCK(sc); 1382 } 1383 1384 /* 1385 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1386 * pointers to the fragment pointers. 1387 */ 1388 static int 1389 sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1390 { 1391 struct mbuf *m; 1392 struct sge_desc *desc; 1393 struct sge_txdesc *txd; 1394 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1395 uint32_t cflags, mss; 1396 int error, i, nsegs, prod, si; 1397 1398 SGE_LOCK_ASSERT(sc); 1399 1400 si = prod = sc->sge_cdata.sge_tx_prod; 1401 txd = &sc->sge_cdata.sge_txdesc[prod]; 1402 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1403 struct ether_header *eh; 1404 struct ip *ip; 1405 struct tcphdr *tcp; 1406 uint32_t ip_off, poff; 1407 1408 if (M_WRITABLE(*m_head) == 0) { 1409 /* Get a writable copy. */ 1410 m = m_dup(*m_head, M_NOWAIT); 1411 m_freem(*m_head); 1412 if (m == NULL) { 1413 *m_head = NULL; 1414 return (ENOBUFS); 1415 } 1416 *m_head = m; 1417 } 1418 ip_off = sizeof(struct ether_header); 1419 m = m_pullup(*m_head, ip_off); 1420 if (m == NULL) { 1421 *m_head = NULL; 1422 return (ENOBUFS); 1423 } 1424 eh = mtod(m, struct ether_header *); 1425 /* Check the existence of VLAN tag. */ 1426 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1427 ip_off = sizeof(struct ether_vlan_header); 1428 m = m_pullup(m, ip_off); 1429 if (m == NULL) { 1430 *m_head = NULL; 1431 return (ENOBUFS); 1432 } 1433 } 1434 m = m_pullup(m, ip_off + sizeof(struct ip)); 1435 if (m == NULL) { 1436 *m_head = NULL; 1437 return (ENOBUFS); 1438 } 1439 ip = (struct ip *)(mtod(m, char *) + ip_off); 1440 poff = ip_off + (ip->ip_hl << 2); 1441 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1442 if (m == NULL) { 1443 *m_head = NULL; 1444 return (ENOBUFS); 1445 } 1446 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1447 m = m_pullup(m, poff + (tcp->th_off << 2)); 1448 if (m == NULL) { 1449 *m_head = NULL; 1450 return (ENOBUFS); 1451 } 1452 /* 1453 * Reset IP checksum and recompute TCP pseudo 1454 * checksum that NDIS specification requires. 1455 */ 1456 ip = (struct ip *)(mtod(m, char *) + ip_off); 1457 ip->ip_sum = 0; 1458 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1459 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1460 htons(IPPROTO_TCP)); 1461 *m_head = m; 1462 } 1463 1464 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1465 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1466 if (error == EFBIG) { 1467 m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS); 1468 if (m == NULL) { 1469 m_freem(*m_head); 1470 *m_head = NULL; 1471 return (ENOBUFS); 1472 } 1473 *m_head = m; 1474 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1475 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1476 if (error != 0) { 1477 m_freem(*m_head); 1478 *m_head = NULL; 1479 return (error); 1480 } 1481 } else if (error != 0) 1482 return (error); 1483 1484 KASSERT(nsegs != 0, ("zero segment returned")); 1485 /* Check descriptor overrun. */ 1486 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1487 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1488 return (ENOBUFS); 1489 } 1490 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1491 BUS_DMASYNC_PREWRITE); 1492 1493 m = *m_head; 1494 cflags = 0; 1495 mss = 0; 1496 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1497 cflags |= TDC_LS; 1498 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1499 mss <<= 16; 1500 } else { 1501 if (m->m_pkthdr.csum_flags & CSUM_IP) 1502 cflags |= TDC_IP_CSUM; 1503 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1504 cflags |= TDC_TCP_CSUM; 1505 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1506 cflags |= TDC_UDP_CSUM; 1507 } 1508 for (i = 0; i < nsegs; i++) { 1509 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1510 if (i == 0) { 1511 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1512 desc->sge_cmdsts = 0; 1513 } else { 1514 desc->sge_sts_size = 0; 1515 desc->sge_cmdsts = htole32(TDC_OWN); 1516 } 1517 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1518 desc->sge_flags = htole32(txsegs[i].ds_len); 1519 if (prod == SGE_TX_RING_CNT - 1) 1520 desc->sge_flags |= htole32(RING_END); 1521 sc->sge_cdata.sge_tx_cnt++; 1522 SGE_INC(prod, SGE_TX_RING_CNT); 1523 } 1524 /* Update producer index. */ 1525 sc->sge_cdata.sge_tx_prod = prod; 1526 1527 desc = &sc->sge_ldata.sge_tx_ring[si]; 1528 /* Configure VLAN. */ 1529 if((m->m_flags & M_VLANTAG) != 0) { 1530 cflags |= m->m_pkthdr.ether_vtag; 1531 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1532 } 1533 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1534 #if 1 1535 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1536 desc->sge_cmdsts |= htole32(TDC_BST); 1537 #else 1538 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1539 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1540 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1541 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1542 } 1543 #endif 1544 /* Request interrupt and give ownership to controller. */ 1545 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1546 txd->tx_m = m; 1547 txd->tx_ndesc = nsegs; 1548 return (0); 1549 } 1550 1551 static void 1552 sge_start(struct ifnet *ifp) 1553 { 1554 struct sge_softc *sc; 1555 1556 sc = ifp->if_softc; 1557 SGE_LOCK(sc); 1558 sge_start_locked(ifp); 1559 SGE_UNLOCK(sc); 1560 } 1561 1562 static void 1563 sge_start_locked(struct ifnet *ifp) 1564 { 1565 struct sge_softc *sc; 1566 struct mbuf *m_head; 1567 int queued = 0; 1568 1569 sc = ifp->if_softc; 1570 SGE_LOCK_ASSERT(sc); 1571 1572 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1573 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1574 IFF_DRV_RUNNING) 1575 return; 1576 1577 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1578 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1579 SGE_MAXTXSEGS)) { 1580 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1581 break; 1582 } 1583 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1584 if (m_head == NULL) 1585 break; 1586 if (sge_encap(sc, &m_head)) { 1587 if (m_head == NULL) 1588 break; 1589 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1590 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1591 break; 1592 } 1593 queued++; 1594 /* 1595 * If there's a BPF listener, bounce a copy of this frame 1596 * to him. 1597 */ 1598 BPF_MTAP(ifp, m_head); 1599 } 1600 1601 if (queued > 0) { 1602 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1603 sc->sge_cdata.sge_tx_dmamap, 1604 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1605 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1606 sc->sge_timer = 5; 1607 } 1608 } 1609 1610 static void 1611 sge_init(void *arg) 1612 { 1613 struct sge_softc *sc; 1614 1615 sc = arg; 1616 SGE_LOCK(sc); 1617 sge_init_locked(sc); 1618 SGE_UNLOCK(sc); 1619 } 1620 1621 static void 1622 sge_init_locked(struct sge_softc *sc) 1623 { 1624 struct ifnet *ifp; 1625 struct mii_data *mii; 1626 uint16_t rxfilt; 1627 int i; 1628 1629 SGE_LOCK_ASSERT(sc); 1630 ifp = sc->sge_ifp; 1631 mii = device_get_softc(sc->sge_miibus); 1632 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1633 return; 1634 /* 1635 * Cancel pending I/O and free all RX/TX buffers. 1636 */ 1637 sge_stop(sc); 1638 sge_reset(sc); 1639 1640 /* Init circular RX list. */ 1641 if (sge_list_rx_init(sc) == ENOBUFS) { 1642 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1643 sge_stop(sc); 1644 return; 1645 } 1646 /* Init TX descriptors. */ 1647 sge_list_tx_init(sc); 1648 /* 1649 * Load the address of the RX and TX lists. 1650 */ 1651 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1652 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1653 1654 CSR_WRITE_4(sc, TxMacControl, 0x60); 1655 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1656 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1657 /* Allow receiving VLAN frames. */ 1658 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1659 SGE_RX_PAD_BYTES); 1660 1661 for (i = 0; i < ETHER_ADDR_LEN; i++) 1662 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1663 /* Configure RX MAC. */ 1664 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1665 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1666 sge_rxfilter(sc); 1667 sge_setvlan(sc); 1668 1669 /* Initialize default speed/duplex information. */ 1670 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1671 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1672 sc->sge_flags |= SGE_FLAG_FDX; 1673 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1674 CSR_WRITE_4(sc, StationControl, 0x04008001); 1675 else 1676 CSR_WRITE_4(sc, StationControl, 0x04000001); 1677 /* 1678 * XXX Try to mitigate interrupts. 1679 */ 1680 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1681 #ifdef notyet 1682 if (sc->sge_intrcontrol != 0) 1683 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1684 if (sc->sge_intrtimer != 0) 1685 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1686 #endif 1687 1688 /* 1689 * Clear and enable interrupts. 1690 */ 1691 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1692 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1693 1694 /* Enable receiver and transmitter. */ 1695 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1696 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1697 1698 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1699 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1700 1701 sc->sge_flags &= ~SGE_FLAG_LINK; 1702 mii_mediachg(mii); 1703 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1704 } 1705 1706 /* 1707 * Set media options. 1708 */ 1709 static int 1710 sge_ifmedia_upd(struct ifnet *ifp) 1711 { 1712 struct sge_softc *sc; 1713 struct mii_data *mii; 1714 struct mii_softc *miisc; 1715 int error; 1716 1717 sc = ifp->if_softc; 1718 SGE_LOCK(sc); 1719 mii = device_get_softc(sc->sge_miibus); 1720 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1721 PHY_RESET(miisc); 1722 error = mii_mediachg(mii); 1723 SGE_UNLOCK(sc); 1724 1725 return (error); 1726 } 1727 1728 /* 1729 * Report current media status. 1730 */ 1731 static void 1732 sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1733 { 1734 struct sge_softc *sc; 1735 struct mii_data *mii; 1736 1737 sc = ifp->if_softc; 1738 SGE_LOCK(sc); 1739 mii = device_get_softc(sc->sge_miibus); 1740 if ((ifp->if_flags & IFF_UP) == 0) { 1741 SGE_UNLOCK(sc); 1742 return; 1743 } 1744 mii_pollstat(mii); 1745 ifmr->ifm_active = mii->mii_media_active; 1746 ifmr->ifm_status = mii->mii_media_status; 1747 SGE_UNLOCK(sc); 1748 } 1749 1750 static int 1751 sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1752 { 1753 struct sge_softc *sc; 1754 struct ifreq *ifr; 1755 struct mii_data *mii; 1756 int error = 0, mask, reinit; 1757 1758 sc = ifp->if_softc; 1759 ifr = (struct ifreq *)data; 1760 1761 switch(command) { 1762 case SIOCSIFFLAGS: 1763 SGE_LOCK(sc); 1764 if ((ifp->if_flags & IFF_UP) != 0) { 1765 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1766 ((ifp->if_flags ^ sc->sge_if_flags) & 1767 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1768 sge_rxfilter(sc); 1769 else 1770 sge_init_locked(sc); 1771 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1772 sge_stop(sc); 1773 sc->sge_if_flags = ifp->if_flags; 1774 SGE_UNLOCK(sc); 1775 break; 1776 case SIOCSIFCAP: 1777 SGE_LOCK(sc); 1778 reinit = 0; 1779 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1780 if ((mask & IFCAP_TXCSUM) != 0 && 1781 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1782 ifp->if_capenable ^= IFCAP_TXCSUM; 1783 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1784 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1785 else 1786 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1787 } 1788 if ((mask & IFCAP_RXCSUM) != 0 && 1789 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1790 ifp->if_capenable ^= IFCAP_RXCSUM; 1791 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1792 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1793 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1794 if ((mask & IFCAP_TSO4) != 0 && 1795 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 1796 ifp->if_capenable ^= IFCAP_TSO4; 1797 if ((ifp->if_capenable & IFCAP_TSO4) != 0) 1798 ifp->if_hwassist |= CSUM_TSO; 1799 else 1800 ifp->if_hwassist &= ~CSUM_TSO; 1801 } 1802 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1803 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1804 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1805 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1806 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1807 /* 1808 * Due to unknown reason, toggling VLAN hardware 1809 * tagging require interface reinitialization. 1810 */ 1811 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1812 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1813 ifp->if_capenable &= 1814 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1815 reinit = 1; 1816 } 1817 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1818 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1819 sge_init_locked(sc); 1820 } 1821 SGE_UNLOCK(sc); 1822 VLAN_CAPABILITIES(ifp); 1823 break; 1824 case SIOCADDMULTI: 1825 case SIOCDELMULTI: 1826 SGE_LOCK(sc); 1827 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1828 sge_rxfilter(sc); 1829 SGE_UNLOCK(sc); 1830 break; 1831 case SIOCGIFMEDIA: 1832 case SIOCSIFMEDIA: 1833 mii = device_get_softc(sc->sge_miibus); 1834 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1835 break; 1836 default: 1837 error = ether_ioctl(ifp, command, data); 1838 break; 1839 } 1840 1841 return (error); 1842 } 1843 1844 static void 1845 sge_watchdog(struct sge_softc *sc) 1846 { 1847 struct ifnet *ifp; 1848 1849 SGE_LOCK_ASSERT(sc); 1850 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1851 return; 1852 1853 ifp = sc->sge_ifp; 1854 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1855 if (1 || bootverbose) 1856 device_printf(sc->sge_dev, 1857 "watchdog timeout (lost link)\n"); 1858 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1859 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1860 sge_init_locked(sc); 1861 return; 1862 } 1863 device_printf(sc->sge_dev, "watchdog timeout\n"); 1864 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1865 1866 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1867 sge_init_locked(sc); 1868 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1869 sge_start_locked(ifp); 1870 } 1871 1872 /* 1873 * Stop the adapter and free any mbufs allocated to the 1874 * RX and TX lists. 1875 */ 1876 static void 1877 sge_stop(struct sge_softc *sc) 1878 { 1879 struct ifnet *ifp; 1880 1881 ifp = sc->sge_ifp; 1882 1883 SGE_LOCK_ASSERT(sc); 1884 1885 sc->sge_timer = 0; 1886 callout_stop(&sc->sge_stat_ch); 1887 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1888 1889 CSR_WRITE_4(sc, IntrMask, 0); 1890 CSR_READ_4(sc, IntrMask); 1891 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1892 /* Stop TX/RX MAC. */ 1893 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1894 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1895 /* XXX Can we assume active DMA cycles gone? */ 1896 DELAY(2000); 1897 CSR_WRITE_4(sc, IntrMask, 0); 1898 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1899 1900 sc->sge_flags &= ~SGE_FLAG_LINK; 1901 sge_list_rx_free(sc); 1902 sge_list_tx_free(sc); 1903 } 1904