1 /*- 2 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 3 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 25 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 26 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 33 * OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 /* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 49 * Linux and Solaris drivers. 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/bus.h> 55 #include <sys/endian.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/module.h> 61 #include <sys/mutex.h> 62 #include <sys/rman.h> 63 #include <sys/socket.h> 64 #include <sys/sockio.h> 65 66 #include <net/bpf.h> 67 #include <net/if.h> 68 #include <net/if_var.h> 69 #include <net/if_arp.h> 70 #include <net/ethernet.h> 71 #include <net/if_dl.h> 72 #include <net/if_media.h> 73 #include <net/if_types.h> 74 #include <net/if_vlan_var.h> 75 76 #include <netinet/in.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/ip.h> 79 #include <netinet/tcp.h> 80 81 #include <machine/bus.h> 82 #include <machine/in_cksum.h> 83 84 #include <dev/mii/mii.h> 85 #include <dev/mii/miivar.h> 86 87 #include <dev/pci/pcireg.h> 88 #include <dev/pci/pcivar.h> 89 90 #include <dev/sge/if_sgereg.h> 91 92 MODULE_DEPEND(sge, pci, 1, 1, 1); 93 MODULE_DEPEND(sge, ether, 1, 1, 1); 94 MODULE_DEPEND(sge, miibus, 1, 1, 1); 95 96 /* "device miibus0" required. See GENERIC if you get errors here. */ 97 #include "miibus_if.h" 98 99 /* 100 * Various supported device vendors/types and their names. 101 */ 102 static struct sge_type sge_devs[] = { 103 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 104 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 105 { 0, 0, NULL } 106 }; 107 108 static int sge_probe(device_t); 109 static int sge_attach(device_t); 110 static int sge_detach(device_t); 111 static int sge_shutdown(device_t); 112 static int sge_suspend(device_t); 113 static int sge_resume(device_t); 114 115 static int sge_miibus_readreg(device_t, int, int); 116 static int sge_miibus_writereg(device_t, int, int, int); 117 static void sge_miibus_statchg(device_t); 118 119 static int sge_newbuf(struct sge_softc *, int); 120 static int sge_encap(struct sge_softc *, struct mbuf **); 121 static __inline void 122 sge_discard_rxbuf(struct sge_softc *, int); 123 static void sge_rxeof(struct sge_softc *); 124 static void sge_txeof(struct sge_softc *); 125 static void sge_intr(void *); 126 static void sge_tick(void *); 127 static void sge_start(struct ifnet *); 128 static void sge_start_locked(struct ifnet *); 129 static int sge_ioctl(struct ifnet *, u_long, caddr_t); 130 static void sge_init(void *); 131 static void sge_init_locked(struct sge_softc *); 132 static void sge_stop(struct sge_softc *); 133 static void sge_watchdog(struct sge_softc *); 134 static int sge_ifmedia_upd(struct ifnet *); 135 static void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 136 137 static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 138 static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 139 static uint16_t sge_read_eeprom(struct sge_softc *, int); 140 141 static void sge_rxfilter(struct sge_softc *); 142 static void sge_setvlan(struct sge_softc *); 143 static void sge_reset(struct sge_softc *); 144 static int sge_list_rx_init(struct sge_softc *); 145 static int sge_list_rx_free(struct sge_softc *); 146 static int sge_list_tx_init(struct sge_softc *); 147 static int sge_list_tx_free(struct sge_softc *); 148 149 static int sge_dma_alloc(struct sge_softc *); 150 static void sge_dma_free(struct sge_softc *); 151 static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 152 153 static device_method_t sge_methods[] = { 154 /* Device interface */ 155 DEVMETHOD(device_probe, sge_probe), 156 DEVMETHOD(device_attach, sge_attach), 157 DEVMETHOD(device_detach, sge_detach), 158 DEVMETHOD(device_suspend, sge_suspend), 159 DEVMETHOD(device_resume, sge_resume), 160 DEVMETHOD(device_shutdown, sge_shutdown), 161 162 /* MII interface */ 163 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 164 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 165 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 166 167 DEVMETHOD_END 168 }; 169 170 static driver_t sge_driver = { 171 "sge", sge_methods, sizeof(struct sge_softc) 172 }; 173 174 static devclass_t sge_devclass; 175 176 DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 177 DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 178 179 /* 180 * Register space access macros. 181 */ 182 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 183 #define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 184 #define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 185 186 #define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 187 #define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 188 #define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 189 190 /* Define to show Tx/Rx error status. */ 191 #undef SGE_SHOW_ERRORS 192 193 #define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 194 195 static void 196 sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 197 { 198 bus_addr_t *p; 199 200 if (error != 0) 201 return; 202 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 203 p = arg; 204 *p = segs->ds_addr; 205 } 206 207 /* 208 * Read a sequence of words from the EEPROM. 209 */ 210 static uint16_t 211 sge_read_eeprom(struct sge_softc *sc, int offset) 212 { 213 uint32_t val; 214 int i; 215 216 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 217 CSR_WRITE_4(sc, ROMInterface, 218 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 219 DELAY(500); 220 for (i = 0; i < SGE_TIMEOUT; i++) { 221 val = CSR_READ_4(sc, ROMInterface); 222 if ((val & EI_REQ) == 0) 223 break; 224 DELAY(100); 225 } 226 if (i == SGE_TIMEOUT) { 227 device_printf(sc->sge_dev, 228 "EEPROM read timeout : 0x%08x\n", val); 229 return (0xffff); 230 } 231 232 return ((val & EI_DATA) >> EI_DATA_SHIFT); 233 } 234 235 static int 236 sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 237 { 238 uint16_t val; 239 int i; 240 241 val = sge_read_eeprom(sc, EEPROMSignature); 242 if (val == 0xffff || val == 0) { 243 device_printf(sc->sge_dev, 244 "invalid EEPROM signature : 0x%04x\n", val); 245 return (EINVAL); 246 } 247 248 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 249 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 250 dest[i + 0] = (uint8_t)val; 251 dest[i + 1] = (uint8_t)(val >> 8); 252 } 253 254 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 255 sc->sge_flags |= SGE_FLAG_RGMII; 256 return (0); 257 } 258 259 /* 260 * For SiS96x, APC CMOS RAM is used to store ethernet address. 261 * APC CMOS RAM is accessed through ISA bridge. 262 */ 263 static int 264 sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 265 { 266 #if defined(__amd64__) || defined(__i386__) 267 devclass_t pci; 268 device_t bus, dev = NULL; 269 device_t *kids; 270 struct apc_tbl { 271 uint16_t vid; 272 uint16_t did; 273 } *tp, apc_tbls[] = { 274 { SIS_VENDORID, 0x0965 }, 275 { SIS_VENDORID, 0x0966 }, 276 { SIS_VENDORID, 0x0968 } 277 }; 278 uint8_t reg; 279 int busnum, cnt, i, j, numkids; 280 281 cnt = sizeof(apc_tbls) / sizeof(apc_tbls[0]); 282 pci = devclass_find("pci"); 283 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 284 bus = devclass_get_device(pci, busnum); 285 if (!bus) 286 continue; 287 if (device_get_children(bus, &kids, &numkids) != 0) 288 continue; 289 for (i = 0; i < numkids; i++) { 290 dev = kids[i]; 291 if (pci_get_class(dev) == PCIC_BRIDGE && 292 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 293 tp = apc_tbls; 294 for (j = 0; j < cnt; j++) { 295 if (pci_get_vendor(dev) == tp->vid && 296 pci_get_device(dev) == tp->did) { 297 free(kids, M_TEMP); 298 goto apc_found; 299 } 300 tp++; 301 } 302 } 303 } 304 free(kids, M_TEMP); 305 } 306 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 307 return (EINVAL); 308 apc_found: 309 /* Enable port 0x78 and 0x79 to access APC registers. */ 310 reg = pci_read_config(dev, 0x48, 1); 311 pci_write_config(dev, 0x48, reg & ~0x02, 1); 312 DELAY(50); 313 pci_read_config(dev, 0x48, 1); 314 /* Read stored ethernet address. */ 315 for (i = 0; i < ETHER_ADDR_LEN; i++) { 316 outb(0x78, 0x09 + i); 317 dest[i] = inb(0x79); 318 } 319 outb(0x78, 0x12); 320 if ((inb(0x79) & 0x80) != 0) 321 sc->sge_flags |= SGE_FLAG_RGMII; 322 /* Restore access to APC registers. */ 323 pci_write_config(dev, 0x48, reg, 1); 324 325 return (0); 326 #else 327 return (EINVAL); 328 #endif 329 } 330 331 static int 332 sge_miibus_readreg(device_t dev, int phy, int reg) 333 { 334 struct sge_softc *sc; 335 uint32_t val; 336 int i; 337 338 sc = device_get_softc(dev); 339 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 340 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 341 DELAY(10); 342 for (i = 0; i < SGE_TIMEOUT; i++) { 343 val = CSR_READ_4(sc, GMIIControl); 344 if ((val & GMI_REQ) == 0) 345 break; 346 DELAY(10); 347 } 348 if (i == SGE_TIMEOUT) { 349 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 350 return (0); 351 } 352 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 353 } 354 355 static int 356 sge_miibus_writereg(device_t dev, int phy, int reg, int data) 357 { 358 struct sge_softc *sc; 359 uint32_t val; 360 int i; 361 362 sc = device_get_softc(dev); 363 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 364 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 365 GMI_OP_WR | GMI_REQ); 366 DELAY(10); 367 for (i = 0; i < SGE_TIMEOUT; i++) { 368 val = CSR_READ_4(sc, GMIIControl); 369 if ((val & GMI_REQ) == 0) 370 break; 371 DELAY(10); 372 } 373 if (i == SGE_TIMEOUT) 374 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 375 return (0); 376 } 377 378 static void 379 sge_miibus_statchg(device_t dev) 380 { 381 struct sge_softc *sc; 382 struct mii_data *mii; 383 struct ifnet *ifp; 384 uint32_t ctl, speed; 385 386 sc = device_get_softc(dev); 387 mii = device_get_softc(sc->sge_miibus); 388 ifp = sc->sge_ifp; 389 if (mii == NULL || ifp == NULL || 390 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 391 return; 392 speed = 0; 393 sc->sge_flags &= ~SGE_FLAG_LINK; 394 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 395 (IFM_ACTIVE | IFM_AVALID)) { 396 switch (IFM_SUBTYPE(mii->mii_media_active)) { 397 case IFM_10_T: 398 sc->sge_flags |= SGE_FLAG_LINK; 399 speed = SC_SPEED_10; 400 break; 401 case IFM_100_TX: 402 sc->sge_flags |= SGE_FLAG_LINK; 403 speed = SC_SPEED_100; 404 break; 405 case IFM_1000_T: 406 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 407 sc->sge_flags |= SGE_FLAG_LINK; 408 speed = SC_SPEED_1000; 409 } 410 break; 411 default: 412 break; 413 } 414 } 415 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 416 return; 417 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 418 ctl = CSR_READ_4(sc, StationControl); 419 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 420 if (speed == SC_SPEED_1000) { 421 ctl |= 0x07000000; 422 sc->sge_flags |= SGE_FLAG_SPEED_1000; 423 } else { 424 ctl |= 0x04000000; 425 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 426 } 427 #ifdef notyet 428 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 429 ctl |= 0x03000000; 430 #endif 431 ctl |= speed; 432 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 433 ctl |= SC_FDX; 434 sc->sge_flags |= SGE_FLAG_FDX; 435 } else 436 sc->sge_flags &= ~SGE_FLAG_FDX; 437 CSR_WRITE_4(sc, StationControl, ctl); 438 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 439 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 440 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 441 } 442 } 443 444 static void 445 sge_rxfilter(struct sge_softc *sc) 446 { 447 struct ifnet *ifp; 448 struct ifmultiaddr *ifma; 449 uint32_t crc, hashes[2]; 450 uint16_t rxfilt; 451 452 SGE_LOCK_ASSERT(sc); 453 454 ifp = sc->sge_ifp; 455 rxfilt = CSR_READ_2(sc, RxMacControl); 456 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 457 rxfilt |= AcceptMyPhys; 458 if ((ifp->if_flags & IFF_BROADCAST) != 0) 459 rxfilt |= AcceptBroadcast; 460 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 461 if ((ifp->if_flags & IFF_PROMISC) != 0) 462 rxfilt |= AcceptAllPhys; 463 rxfilt |= AcceptMulticast; 464 hashes[0] = 0xFFFFFFFF; 465 hashes[1] = 0xFFFFFFFF; 466 } else { 467 rxfilt |= AcceptMulticast; 468 hashes[0] = hashes[1] = 0; 469 /* Now program new ones. */ 470 if_maddr_rlock(ifp); 471 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 472 if (ifma->ifma_addr->sa_family != AF_LINK) 473 continue; 474 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 475 ifma->ifma_addr), ETHER_ADDR_LEN); 476 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 477 } 478 if_maddr_runlock(ifp); 479 } 480 CSR_WRITE_2(sc, RxMacControl, rxfilt); 481 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 482 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 483 } 484 485 static void 486 sge_setvlan(struct sge_softc *sc) 487 { 488 struct ifnet *ifp; 489 uint16_t rxfilt; 490 491 SGE_LOCK_ASSERT(sc); 492 493 ifp = sc->sge_ifp; 494 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 495 return; 496 rxfilt = CSR_READ_2(sc, RxMacControl); 497 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 498 rxfilt |= RXMAC_STRIP_VLAN; 499 else 500 rxfilt &= ~RXMAC_STRIP_VLAN; 501 CSR_WRITE_2(sc, RxMacControl, rxfilt); 502 } 503 504 static void 505 sge_reset(struct sge_softc *sc) 506 { 507 508 CSR_WRITE_4(sc, IntrMask, 0); 509 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 510 511 /* Soft reset. */ 512 CSR_WRITE_4(sc, IntrControl, 0x8000); 513 CSR_READ_4(sc, IntrControl); 514 DELAY(100); 515 CSR_WRITE_4(sc, IntrControl, 0); 516 /* Stop MAC. */ 517 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 518 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 519 520 CSR_WRITE_4(sc, IntrMask, 0); 521 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 522 523 CSR_WRITE_4(sc, GMIIControl, 0); 524 } 525 526 /* 527 * Probe for an SiS chip. Check the PCI vendor and device 528 * IDs against our list and return a device name if we find a match. 529 */ 530 static int 531 sge_probe(device_t dev) 532 { 533 struct sge_type *t; 534 535 t = sge_devs; 536 while (t->sge_name != NULL) { 537 if ((pci_get_vendor(dev) == t->sge_vid) && 538 (pci_get_device(dev) == t->sge_did)) { 539 device_set_desc(dev, t->sge_name); 540 return (BUS_PROBE_DEFAULT); 541 } 542 t++; 543 } 544 545 return (ENXIO); 546 } 547 548 /* 549 * Attach the interface. Allocate softc structures, do ifmedia 550 * setup and ethernet/BPF attach. 551 */ 552 static int 553 sge_attach(device_t dev) 554 { 555 struct sge_softc *sc; 556 struct ifnet *ifp; 557 uint8_t eaddr[ETHER_ADDR_LEN]; 558 int error = 0, rid; 559 560 sc = device_get_softc(dev); 561 sc->sge_dev = dev; 562 563 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 564 MTX_DEF); 565 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 566 567 /* 568 * Map control/status registers. 569 */ 570 pci_enable_busmaster(dev); 571 572 /* Allocate resources. */ 573 sc->sge_res_id = PCIR_BAR(0); 574 sc->sge_res_type = SYS_RES_MEMORY; 575 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 576 &sc->sge_res_id, RF_ACTIVE); 577 if (sc->sge_res == NULL) { 578 device_printf(dev, "couldn't allocate resource\n"); 579 error = ENXIO; 580 goto fail; 581 } 582 583 rid = 0; 584 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 585 RF_SHAREABLE | RF_ACTIVE); 586 if (sc->sge_irq == NULL) { 587 device_printf(dev, "couldn't allocate IRQ resources\n"); 588 error = ENXIO; 589 goto fail; 590 } 591 sc->sge_rev = pci_get_revid(dev); 592 if (pci_get_device(dev) == SIS_DEVICEID_190) 593 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 594 /* Reset the adapter. */ 595 sge_reset(sc); 596 597 /* Get MAC address from the EEPROM. */ 598 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 599 sge_get_mac_addr_apc(sc, eaddr); 600 else 601 sge_get_mac_addr_eeprom(sc, eaddr); 602 603 if ((error = sge_dma_alloc(sc)) != 0) 604 goto fail; 605 606 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 607 if (ifp == NULL) { 608 device_printf(dev, "cannot allocate ifnet structure.\n"); 609 error = ENOSPC; 610 goto fail; 611 } 612 ifp->if_softc = sc; 613 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 614 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 615 ifp->if_ioctl = sge_ioctl; 616 ifp->if_start = sge_start; 617 ifp->if_init = sge_init; 618 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 619 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 620 IFQ_SET_READY(&ifp->if_snd); 621 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4; 622 ifp->if_hwassist = SGE_CSUM_FEATURES | CSUM_TSO; 623 ifp->if_capenable = ifp->if_capabilities; 624 /* 625 * Do MII setup. 626 */ 627 error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd, 628 sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 629 if (error != 0) { 630 device_printf(dev, "attaching PHYs failed\n"); 631 goto fail; 632 } 633 634 /* 635 * Call MI attach routine. 636 */ 637 ether_ifattach(ifp, eaddr); 638 639 /* VLAN setup. */ 640 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 641 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; 642 ifp->if_capenable = ifp->if_capabilities; 643 /* Tell the upper layer(s) we support long frames. */ 644 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 645 646 /* Hook interrupt last to avoid having to lock softc */ 647 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 648 NULL, sge_intr, sc, &sc->sge_intrhand); 649 if (error) { 650 device_printf(dev, "couldn't set up irq\n"); 651 ether_ifdetach(ifp); 652 goto fail; 653 } 654 655 fail: 656 if (error) 657 sge_detach(dev); 658 659 return (error); 660 } 661 662 /* 663 * Shutdown hardware and free up resources. This can be called any 664 * time after the mutex has been initialized. It is called in both 665 * the error case in attach and the normal detach case so it needs 666 * to be careful about only freeing resources that have actually been 667 * allocated. 668 */ 669 static int 670 sge_detach(device_t dev) 671 { 672 struct sge_softc *sc; 673 struct ifnet *ifp; 674 675 sc = device_get_softc(dev); 676 ifp = sc->sge_ifp; 677 /* These should only be active if attach succeeded. */ 678 if (device_is_attached(dev)) { 679 ether_ifdetach(ifp); 680 SGE_LOCK(sc); 681 sge_stop(sc); 682 SGE_UNLOCK(sc); 683 callout_drain(&sc->sge_stat_ch); 684 } 685 if (sc->sge_miibus) 686 device_delete_child(dev, sc->sge_miibus); 687 bus_generic_detach(dev); 688 689 if (sc->sge_intrhand) 690 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 691 if (sc->sge_irq) 692 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 693 if (sc->sge_res) 694 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 695 sc->sge_res); 696 if (ifp) 697 if_free(ifp); 698 sge_dma_free(sc); 699 mtx_destroy(&sc->sge_mtx); 700 701 return (0); 702 } 703 704 /* 705 * Stop all chip I/O so that the kernel's probe routines don't 706 * get confused by errant DMAs when rebooting. 707 */ 708 static int 709 sge_shutdown(device_t dev) 710 { 711 struct sge_softc *sc; 712 713 sc = device_get_softc(dev); 714 SGE_LOCK(sc); 715 sge_stop(sc); 716 SGE_UNLOCK(sc); 717 return (0); 718 } 719 720 static int 721 sge_suspend(device_t dev) 722 { 723 struct sge_softc *sc; 724 struct ifnet *ifp; 725 726 sc = device_get_softc(dev); 727 SGE_LOCK(sc); 728 ifp = sc->sge_ifp; 729 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 730 sge_stop(sc); 731 SGE_UNLOCK(sc); 732 return (0); 733 } 734 735 static int 736 sge_resume(device_t dev) 737 { 738 struct sge_softc *sc; 739 struct ifnet *ifp; 740 741 sc = device_get_softc(dev); 742 SGE_LOCK(sc); 743 ifp = sc->sge_ifp; 744 if ((ifp->if_flags & IFF_UP) != 0) 745 sge_init_locked(sc); 746 SGE_UNLOCK(sc); 747 return (0); 748 } 749 750 static int 751 sge_dma_alloc(struct sge_softc *sc) 752 { 753 struct sge_chain_data *cd; 754 struct sge_list_data *ld; 755 struct sge_rxdesc *rxd; 756 struct sge_txdesc *txd; 757 int error, i; 758 759 cd = &sc->sge_cdata; 760 ld = &sc->sge_ldata; 761 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 762 1, 0, /* alignment, boundary */ 763 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 764 BUS_SPACE_MAXADDR, /* highaddr */ 765 NULL, NULL, /* filter, filterarg */ 766 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 767 1, /* nsegments */ 768 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 769 0, /* flags */ 770 NULL, /* lockfunc */ 771 NULL, /* lockarg */ 772 &cd->sge_tag); 773 if (error != 0) { 774 device_printf(sc->sge_dev, 775 "could not create parent DMA tag.\n"); 776 goto fail; 777 } 778 779 /* RX descriptor ring */ 780 error = bus_dma_tag_create(cd->sge_tag, 781 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 782 BUS_SPACE_MAXADDR, /* lowaddr */ 783 BUS_SPACE_MAXADDR, /* highaddr */ 784 NULL, NULL, /* filter, filterarg */ 785 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 786 SGE_RX_RING_SZ, /* maxsegsize */ 787 0, /* flags */ 788 NULL, /* lockfunc */ 789 NULL, /* lockarg */ 790 &cd->sge_rx_tag); 791 if (error != 0) { 792 device_printf(sc->sge_dev, 793 "could not create Rx ring DMA tag.\n"); 794 goto fail; 795 } 796 /* Allocate DMA'able memory and load DMA map for RX ring. */ 797 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 798 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 799 &cd->sge_rx_dmamap); 800 if (error != 0) { 801 device_printf(sc->sge_dev, 802 "could not allocate DMA'able memory for Rx ring.\n"); 803 goto fail; 804 } 805 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 806 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 807 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 808 if (error != 0) { 809 device_printf(sc->sge_dev, 810 "could not load DMA'able memory for Rx ring.\n"); 811 } 812 813 /* TX descriptor ring */ 814 error = bus_dma_tag_create(cd->sge_tag, 815 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 816 BUS_SPACE_MAXADDR, /* lowaddr */ 817 BUS_SPACE_MAXADDR, /* highaddr */ 818 NULL, NULL, /* filter, filterarg */ 819 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 820 SGE_TX_RING_SZ, /* maxsegsize */ 821 0, /* flags */ 822 NULL, /* lockfunc */ 823 NULL, /* lockarg */ 824 &cd->sge_tx_tag); 825 if (error != 0) { 826 device_printf(sc->sge_dev, 827 "could not create Rx ring DMA tag.\n"); 828 goto fail; 829 } 830 /* Allocate DMA'able memory and load DMA map for TX ring. */ 831 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 832 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 833 &cd->sge_tx_dmamap); 834 if (error != 0) { 835 device_printf(sc->sge_dev, 836 "could not allocate DMA'able memory for Tx ring.\n"); 837 goto fail; 838 } 839 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 840 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 841 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 842 if (error != 0) { 843 device_printf(sc->sge_dev, 844 "could not load DMA'able memory for Rx ring.\n"); 845 goto fail; 846 } 847 848 /* Create DMA tag for Tx buffers. */ 849 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 850 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 851 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 852 if (error != 0) { 853 device_printf(sc->sge_dev, 854 "could not create Tx mbuf DMA tag.\n"); 855 goto fail; 856 } 857 858 /* Create DMA tag for Rx buffers. */ 859 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 860 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 861 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 862 if (error != 0) { 863 device_printf(sc->sge_dev, 864 "could not create Rx mbuf DMA tag.\n"); 865 goto fail; 866 } 867 868 /* Create DMA maps for Tx buffers. */ 869 for (i = 0; i < SGE_TX_RING_CNT; i++) { 870 txd = &cd->sge_txdesc[i]; 871 txd->tx_m = NULL; 872 txd->tx_dmamap = NULL; 873 txd->tx_ndesc = 0; 874 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 875 &txd->tx_dmamap); 876 if (error != 0) { 877 device_printf(sc->sge_dev, 878 "could not create Tx DMA map.\n"); 879 goto fail; 880 } 881 } 882 /* Create spare DMA map for Rx buffer. */ 883 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 884 if (error != 0) { 885 device_printf(sc->sge_dev, 886 "could not create spare Rx DMA map.\n"); 887 goto fail; 888 } 889 /* Create DMA maps for Rx buffers. */ 890 for (i = 0; i < SGE_RX_RING_CNT; i++) { 891 rxd = &cd->sge_rxdesc[i]; 892 rxd->rx_m = NULL; 893 rxd->rx_dmamap = NULL; 894 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 895 &rxd->rx_dmamap); 896 if (error) { 897 device_printf(sc->sge_dev, 898 "could not create Rx DMA map.\n"); 899 goto fail; 900 } 901 } 902 fail: 903 return (error); 904 } 905 906 static void 907 sge_dma_free(struct sge_softc *sc) 908 { 909 struct sge_chain_data *cd; 910 struct sge_list_data *ld; 911 struct sge_rxdesc *rxd; 912 struct sge_txdesc *txd; 913 int i; 914 915 cd = &sc->sge_cdata; 916 ld = &sc->sge_ldata; 917 /* Rx ring. */ 918 if (cd->sge_rx_tag != NULL) { 919 if (ld->sge_rx_paddr != 0) 920 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 921 if (ld->sge_rx_ring != NULL) 922 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 923 cd->sge_rx_dmamap); 924 ld->sge_rx_ring = NULL; 925 ld->sge_rx_paddr = 0; 926 bus_dma_tag_destroy(cd->sge_rx_tag); 927 cd->sge_rx_tag = NULL; 928 } 929 /* Tx ring. */ 930 if (cd->sge_tx_tag != NULL) { 931 if (ld->sge_tx_paddr != 0) 932 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 933 if (ld->sge_tx_ring != NULL) 934 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 935 cd->sge_tx_dmamap); 936 ld->sge_tx_ring = NULL; 937 ld->sge_tx_paddr = 0; 938 bus_dma_tag_destroy(cd->sge_tx_tag); 939 cd->sge_tx_tag = NULL; 940 } 941 /* Rx buffers. */ 942 if (cd->sge_rxmbuf_tag != NULL) { 943 for (i = 0; i < SGE_RX_RING_CNT; i++) { 944 rxd = &cd->sge_rxdesc[i]; 945 if (rxd->rx_dmamap != NULL) { 946 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 947 rxd->rx_dmamap); 948 rxd->rx_dmamap = NULL; 949 } 950 } 951 if (cd->sge_rx_spare_map != NULL) { 952 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 953 cd->sge_rx_spare_map); 954 cd->sge_rx_spare_map = NULL; 955 } 956 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 957 cd->sge_rxmbuf_tag = NULL; 958 } 959 /* Tx buffers. */ 960 if (cd->sge_txmbuf_tag != NULL) { 961 for (i = 0; i < SGE_TX_RING_CNT; i++) { 962 txd = &cd->sge_txdesc[i]; 963 if (txd->tx_dmamap != NULL) { 964 bus_dmamap_destroy(cd->sge_txmbuf_tag, 965 txd->tx_dmamap); 966 txd->tx_dmamap = NULL; 967 } 968 } 969 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 970 cd->sge_txmbuf_tag = NULL; 971 } 972 if (cd->sge_tag != NULL) 973 bus_dma_tag_destroy(cd->sge_tag); 974 cd->sge_tag = NULL; 975 } 976 977 /* 978 * Initialize the TX descriptors. 979 */ 980 static int 981 sge_list_tx_init(struct sge_softc *sc) 982 { 983 struct sge_list_data *ld; 984 struct sge_chain_data *cd; 985 986 SGE_LOCK_ASSERT(sc); 987 ld = &sc->sge_ldata; 988 cd = &sc->sge_cdata; 989 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 990 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 991 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 992 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 993 cd->sge_tx_prod = 0; 994 cd->sge_tx_cons = 0; 995 cd->sge_tx_cnt = 0; 996 return (0); 997 } 998 999 static int 1000 sge_list_tx_free(struct sge_softc *sc) 1001 { 1002 struct sge_chain_data *cd; 1003 struct sge_txdesc *txd; 1004 int i; 1005 1006 SGE_LOCK_ASSERT(sc); 1007 cd = &sc->sge_cdata; 1008 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1009 txd = &cd->sge_txdesc[i]; 1010 if (txd->tx_m != NULL) { 1011 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1012 BUS_DMASYNC_POSTWRITE); 1013 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1014 m_freem(txd->tx_m); 1015 txd->tx_m = NULL; 1016 txd->tx_ndesc = 0; 1017 } 1018 } 1019 1020 return (0); 1021 } 1022 1023 /* 1024 * Initialize the RX descriptors and allocate mbufs for them. Note that 1025 * we arrange the descriptors in a closed ring, so that the last descriptor 1026 * has RING_END flag set. 1027 */ 1028 static int 1029 sge_list_rx_init(struct sge_softc *sc) 1030 { 1031 struct sge_chain_data *cd; 1032 int i; 1033 1034 SGE_LOCK_ASSERT(sc); 1035 cd = &sc->sge_cdata; 1036 cd->sge_rx_cons = 0; 1037 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1038 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1039 if (sge_newbuf(sc, i) != 0) 1040 return (ENOBUFS); 1041 } 1042 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1043 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1044 return (0); 1045 } 1046 1047 static int 1048 sge_list_rx_free(struct sge_softc *sc) 1049 { 1050 struct sge_chain_data *cd; 1051 struct sge_rxdesc *rxd; 1052 int i; 1053 1054 SGE_LOCK_ASSERT(sc); 1055 cd = &sc->sge_cdata; 1056 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1057 rxd = &cd->sge_rxdesc[i]; 1058 if (rxd->rx_m != NULL) { 1059 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1060 BUS_DMASYNC_POSTREAD); 1061 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1062 rxd->rx_dmamap); 1063 m_freem(rxd->rx_m); 1064 rxd->rx_m = NULL; 1065 } 1066 } 1067 return (0); 1068 } 1069 1070 /* 1071 * Initialize an RX descriptor and attach an MBUF cluster. 1072 */ 1073 static int 1074 sge_newbuf(struct sge_softc *sc, int prod) 1075 { 1076 struct mbuf *m; 1077 struct sge_desc *desc; 1078 struct sge_chain_data *cd; 1079 struct sge_rxdesc *rxd; 1080 bus_dma_segment_t segs[1]; 1081 bus_dmamap_t map; 1082 int error, nsegs; 1083 1084 SGE_LOCK_ASSERT(sc); 1085 1086 cd = &sc->sge_cdata; 1087 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1088 if (m == NULL) 1089 return (ENOBUFS); 1090 m->m_len = m->m_pkthdr.len = MCLBYTES; 1091 m_adj(m, SGE_RX_BUF_ALIGN); 1092 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1093 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1094 if (error != 0) { 1095 m_freem(m); 1096 return (error); 1097 } 1098 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1099 rxd = &cd->sge_rxdesc[prod]; 1100 if (rxd->rx_m != NULL) { 1101 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1102 BUS_DMASYNC_POSTREAD); 1103 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1104 } 1105 map = rxd->rx_dmamap; 1106 rxd->rx_dmamap = cd->sge_rx_spare_map; 1107 cd->sge_rx_spare_map = map; 1108 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1109 BUS_DMASYNC_PREREAD); 1110 rxd->rx_m = m; 1111 1112 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1113 desc->sge_sts_size = 0; 1114 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1115 desc->sge_flags = htole32(segs[0].ds_len); 1116 if (prod == SGE_RX_RING_CNT - 1) 1117 desc->sge_flags |= htole32(RING_END); 1118 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1119 return (0); 1120 } 1121 1122 static __inline void 1123 sge_discard_rxbuf(struct sge_softc *sc, int index) 1124 { 1125 struct sge_desc *desc; 1126 1127 desc = &sc->sge_ldata.sge_rx_ring[index]; 1128 desc->sge_sts_size = 0; 1129 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1130 if (index == SGE_RX_RING_CNT - 1) 1131 desc->sge_flags |= htole32(RING_END); 1132 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1133 } 1134 1135 /* 1136 * A frame has been uploaded: pass the resulting mbuf chain up to 1137 * the higher level protocols. 1138 */ 1139 static void 1140 sge_rxeof(struct sge_softc *sc) 1141 { 1142 struct ifnet *ifp; 1143 struct mbuf *m; 1144 struct sge_chain_data *cd; 1145 struct sge_desc *cur_rx; 1146 uint32_t rxinfo, rxstat; 1147 int cons, prog; 1148 1149 SGE_LOCK_ASSERT(sc); 1150 1151 ifp = sc->sge_ifp; 1152 cd = &sc->sge_cdata; 1153 1154 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1155 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1156 cons = cd->sge_rx_cons; 1157 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1158 SGE_INC(cons, SGE_RX_RING_CNT)) { 1159 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1160 break; 1161 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1162 rxinfo = le32toh(cur_rx->sge_cmdsts); 1163 if ((rxinfo & RDC_OWN) != 0) 1164 break; 1165 rxstat = le32toh(cur_rx->sge_sts_size); 1166 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1167 SGE_RX_NSEGS(rxstat) != 1) { 1168 /* XXX We don't support multi-segment frames yet. */ 1169 #ifdef SGE_SHOW_ERRORS 1170 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1171 RX_ERR_BITS); 1172 #endif 1173 sge_discard_rxbuf(sc, cons); 1174 ifp->if_ierrors++; 1175 continue; 1176 } 1177 m = cd->sge_rxdesc[cons].rx_m; 1178 if (sge_newbuf(sc, cons) != 0) { 1179 sge_discard_rxbuf(sc, cons); 1180 ifp->if_iqdrops++; 1181 continue; 1182 } 1183 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1184 if ((rxinfo & RDC_IP_CSUM) != 0 && 1185 (rxinfo & RDC_IP_CSUM_OK) != 0) 1186 m->m_pkthdr.csum_flags |= 1187 CSUM_IP_CHECKED | CSUM_IP_VALID; 1188 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1189 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1190 ((rxinfo & RDC_UDP_CSUM) != 0 && 1191 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1192 m->m_pkthdr.csum_flags |= 1193 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1194 m->m_pkthdr.csum_data = 0xffff; 1195 } 1196 } 1197 /* Check for VLAN tagged frame. */ 1198 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1199 (rxstat & RDS_VLAN) != 0) { 1200 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1201 m->m_flags |= M_VLANTAG; 1202 } 1203 /* 1204 * Account for 10bytes auto padding which is used 1205 * to align IP header on 32bit boundary. Also note, 1206 * CRC bytes is automatically removed by the 1207 * hardware. 1208 */ 1209 m->m_data += SGE_RX_PAD_BYTES; 1210 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1211 SGE_RX_PAD_BYTES; 1212 m->m_pkthdr.rcvif = ifp; 1213 ifp->if_ipackets++; 1214 SGE_UNLOCK(sc); 1215 (*ifp->if_input)(ifp, m); 1216 SGE_LOCK(sc); 1217 } 1218 1219 if (prog > 0) { 1220 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1221 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1222 cd->sge_rx_cons = cons; 1223 } 1224 } 1225 1226 /* 1227 * A frame was downloaded to the chip. It's safe for us to clean up 1228 * the list buffers. 1229 */ 1230 static void 1231 sge_txeof(struct sge_softc *sc) 1232 { 1233 struct ifnet *ifp; 1234 struct sge_list_data *ld; 1235 struct sge_chain_data *cd; 1236 struct sge_txdesc *txd; 1237 uint32_t txstat; 1238 int cons, nsegs, prod; 1239 1240 SGE_LOCK_ASSERT(sc); 1241 1242 ifp = sc->sge_ifp; 1243 ld = &sc->sge_ldata; 1244 cd = &sc->sge_cdata; 1245 1246 if (cd->sge_tx_cnt == 0) 1247 return; 1248 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1249 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1250 cons = cd->sge_tx_cons; 1251 prod = cd->sge_tx_prod; 1252 for (; cons != prod;) { 1253 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1254 if ((txstat & TDC_OWN) != 0) 1255 break; 1256 /* 1257 * Only the first descriptor of multi-descriptor transmission 1258 * is updated by controller. Driver should skip entire 1259 * chained buffers for the transmitted frame. In other words 1260 * TDC_OWN bit is valid only at the first descriptor of a 1261 * multi-descriptor transmission. 1262 */ 1263 if (SGE_TX_ERROR(txstat) != 0) { 1264 #ifdef SGE_SHOW_ERRORS 1265 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1266 txstat, TX_ERR_BITS); 1267 #endif 1268 ifp->if_oerrors++; 1269 } else { 1270 #ifdef notyet 1271 ifp->if_collisions += (txstat & 0xFFFF) - 1; 1272 #endif 1273 ifp->if_opackets++; 1274 } 1275 txd = &cd->sge_txdesc[cons]; 1276 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1277 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1278 SGE_INC(cons, SGE_TX_RING_CNT); 1279 } 1280 /* Reclaim transmitted mbuf. */ 1281 KASSERT(txd->tx_m != NULL, 1282 ("%s: freeing NULL mbuf\n", __func__)); 1283 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1284 BUS_DMASYNC_POSTWRITE); 1285 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1286 m_freem(txd->tx_m); 1287 txd->tx_m = NULL; 1288 cd->sge_tx_cnt -= txd->tx_ndesc; 1289 KASSERT(cd->sge_tx_cnt >= 0, 1290 ("%s: Active Tx desc counter was garbled\n", __func__)); 1291 txd->tx_ndesc = 0; 1292 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1293 } 1294 cd->sge_tx_cons = cons; 1295 if (cd->sge_tx_cnt == 0) 1296 sc->sge_timer = 0; 1297 } 1298 1299 static void 1300 sge_tick(void *arg) 1301 { 1302 struct sge_softc *sc; 1303 struct mii_data *mii; 1304 struct ifnet *ifp; 1305 1306 sc = arg; 1307 SGE_LOCK_ASSERT(sc); 1308 1309 ifp = sc->sge_ifp; 1310 mii = device_get_softc(sc->sge_miibus); 1311 mii_tick(mii); 1312 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1313 sge_miibus_statchg(sc->sge_dev); 1314 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1315 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1316 sge_start_locked(ifp); 1317 } 1318 /* 1319 * Reclaim transmitted frames here as we do not request 1320 * Tx completion interrupt for every queued frames to 1321 * reduce excessive interrupts. 1322 */ 1323 sge_txeof(sc); 1324 sge_watchdog(sc); 1325 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1326 } 1327 1328 static void 1329 sge_intr(void *arg) 1330 { 1331 struct sge_softc *sc; 1332 struct ifnet *ifp; 1333 uint32_t status; 1334 1335 sc = arg; 1336 SGE_LOCK(sc); 1337 ifp = sc->sge_ifp; 1338 1339 status = CSR_READ_4(sc, IntrStatus); 1340 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1341 /* Not ours. */ 1342 SGE_UNLOCK(sc); 1343 return; 1344 } 1345 /* Acknowledge interrupts. */ 1346 CSR_WRITE_4(sc, IntrStatus, status); 1347 /* Disable further interrupts. */ 1348 CSR_WRITE_4(sc, IntrMask, 0); 1349 /* 1350 * It seems the controller supports some kind of interrupt 1351 * moderation mechanism but we still don't know how to 1352 * enable that. To reduce number of generated interrupts 1353 * under load we check pending interrupts in a loop. This 1354 * will increase number of register access and is not correct 1355 * way to handle interrupt moderation but there seems to be 1356 * no other way at this time. 1357 */ 1358 for (;;) { 1359 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1360 break; 1361 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1362 sge_rxeof(sc); 1363 /* Wakeup Rx MAC. */ 1364 if ((status & INTR_RX_IDLE) != 0) 1365 CSR_WRITE_4(sc, RX_CTL, 1366 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1367 } 1368 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1369 sge_txeof(sc); 1370 status = CSR_READ_4(sc, IntrStatus); 1371 if ((status & SGE_INTRS) == 0) 1372 break; 1373 /* Acknowledge interrupts. */ 1374 CSR_WRITE_4(sc, IntrStatus, status); 1375 } 1376 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1377 /* Re-enable interrupts */ 1378 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1379 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1380 sge_start_locked(ifp); 1381 } 1382 SGE_UNLOCK(sc); 1383 } 1384 1385 /* 1386 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1387 * pointers to the fragment pointers. 1388 */ 1389 static int 1390 sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1391 { 1392 struct mbuf *m; 1393 struct sge_desc *desc; 1394 struct sge_txdesc *txd; 1395 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1396 uint32_t cflags, mss; 1397 int error, i, nsegs, prod, si; 1398 1399 SGE_LOCK_ASSERT(sc); 1400 1401 si = prod = sc->sge_cdata.sge_tx_prod; 1402 txd = &sc->sge_cdata.sge_txdesc[prod]; 1403 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1404 struct ether_header *eh; 1405 struct ip *ip; 1406 struct tcphdr *tcp; 1407 uint32_t ip_off, poff; 1408 1409 if (M_WRITABLE(*m_head) == 0) { 1410 /* Get a writable copy. */ 1411 m = m_dup(*m_head, M_NOWAIT); 1412 m_freem(*m_head); 1413 if (m == NULL) { 1414 *m_head = NULL; 1415 return (ENOBUFS); 1416 } 1417 *m_head = m; 1418 } 1419 ip_off = sizeof(struct ether_header); 1420 m = m_pullup(*m_head, ip_off); 1421 if (m == NULL) { 1422 *m_head = NULL; 1423 return (ENOBUFS); 1424 } 1425 eh = mtod(m, struct ether_header *); 1426 /* Check the existence of VLAN tag. */ 1427 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1428 ip_off = sizeof(struct ether_vlan_header); 1429 m = m_pullup(m, ip_off); 1430 if (m == NULL) { 1431 *m_head = NULL; 1432 return (ENOBUFS); 1433 } 1434 } 1435 m = m_pullup(m, ip_off + sizeof(struct ip)); 1436 if (m == NULL) { 1437 *m_head = NULL; 1438 return (ENOBUFS); 1439 } 1440 ip = (struct ip *)(mtod(m, char *) + ip_off); 1441 poff = ip_off + (ip->ip_hl << 2); 1442 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1443 if (m == NULL) { 1444 *m_head = NULL; 1445 return (ENOBUFS); 1446 } 1447 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1448 m = m_pullup(m, poff + (tcp->th_off << 2)); 1449 if (m == NULL) { 1450 *m_head = NULL; 1451 return (ENOBUFS); 1452 } 1453 /* 1454 * Reset IP checksum and recompute TCP pseudo 1455 * checksum that NDIS specification requires. 1456 */ 1457 ip = (struct ip *)(mtod(m, char *) + ip_off); 1458 ip->ip_sum = 0; 1459 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1460 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1461 htons(IPPROTO_TCP)); 1462 *m_head = m; 1463 } 1464 1465 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1466 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1467 if (error == EFBIG) { 1468 m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS); 1469 if (m == NULL) { 1470 m_freem(*m_head); 1471 *m_head = NULL; 1472 return (ENOBUFS); 1473 } 1474 *m_head = m; 1475 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1476 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1477 if (error != 0) { 1478 m_freem(*m_head); 1479 *m_head = NULL; 1480 return (error); 1481 } 1482 } else if (error != 0) 1483 return (error); 1484 1485 KASSERT(nsegs != 0, ("zero segment returned")); 1486 /* Check descriptor overrun. */ 1487 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1488 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1489 return (ENOBUFS); 1490 } 1491 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1492 BUS_DMASYNC_PREWRITE); 1493 1494 m = *m_head; 1495 cflags = 0; 1496 mss = 0; 1497 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1498 cflags |= TDC_LS; 1499 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1500 mss <<= 16; 1501 } else { 1502 if (m->m_pkthdr.csum_flags & CSUM_IP) 1503 cflags |= TDC_IP_CSUM; 1504 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1505 cflags |= TDC_TCP_CSUM; 1506 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1507 cflags |= TDC_UDP_CSUM; 1508 } 1509 for (i = 0; i < nsegs; i++) { 1510 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1511 if (i == 0) { 1512 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1513 desc->sge_cmdsts = 0; 1514 } else { 1515 desc->sge_sts_size = 0; 1516 desc->sge_cmdsts = htole32(TDC_OWN); 1517 } 1518 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1519 desc->sge_flags = htole32(txsegs[i].ds_len); 1520 if (prod == SGE_TX_RING_CNT - 1) 1521 desc->sge_flags |= htole32(RING_END); 1522 sc->sge_cdata.sge_tx_cnt++; 1523 SGE_INC(prod, SGE_TX_RING_CNT); 1524 } 1525 /* Update producer index. */ 1526 sc->sge_cdata.sge_tx_prod = prod; 1527 1528 desc = &sc->sge_ldata.sge_tx_ring[si]; 1529 /* Configure VLAN. */ 1530 if((m->m_flags & M_VLANTAG) != 0) { 1531 cflags |= m->m_pkthdr.ether_vtag; 1532 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1533 } 1534 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1535 #if 1 1536 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1537 desc->sge_cmdsts |= htole32(TDC_BST); 1538 #else 1539 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1540 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1541 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1542 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1543 } 1544 #endif 1545 /* Request interrupt and give ownership to controller. */ 1546 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1547 txd->tx_m = m; 1548 txd->tx_ndesc = nsegs; 1549 return (0); 1550 } 1551 1552 static void 1553 sge_start(struct ifnet *ifp) 1554 { 1555 struct sge_softc *sc; 1556 1557 sc = ifp->if_softc; 1558 SGE_LOCK(sc); 1559 sge_start_locked(ifp); 1560 SGE_UNLOCK(sc); 1561 } 1562 1563 static void 1564 sge_start_locked(struct ifnet *ifp) 1565 { 1566 struct sge_softc *sc; 1567 struct mbuf *m_head; 1568 int queued = 0; 1569 1570 sc = ifp->if_softc; 1571 SGE_LOCK_ASSERT(sc); 1572 1573 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1574 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1575 IFF_DRV_RUNNING) 1576 return; 1577 1578 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1579 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1580 SGE_MAXTXSEGS)) { 1581 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1582 break; 1583 } 1584 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1585 if (m_head == NULL) 1586 break; 1587 if (sge_encap(sc, &m_head)) { 1588 if (m_head == NULL) 1589 break; 1590 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1591 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1592 break; 1593 } 1594 queued++; 1595 /* 1596 * If there's a BPF listener, bounce a copy of this frame 1597 * to him. 1598 */ 1599 BPF_MTAP(ifp, m_head); 1600 } 1601 1602 if (queued > 0) { 1603 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1604 sc->sge_cdata.sge_tx_dmamap, 1605 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1606 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1607 sc->sge_timer = 5; 1608 } 1609 } 1610 1611 static void 1612 sge_init(void *arg) 1613 { 1614 struct sge_softc *sc; 1615 1616 sc = arg; 1617 SGE_LOCK(sc); 1618 sge_init_locked(sc); 1619 SGE_UNLOCK(sc); 1620 } 1621 1622 static void 1623 sge_init_locked(struct sge_softc *sc) 1624 { 1625 struct ifnet *ifp; 1626 struct mii_data *mii; 1627 uint16_t rxfilt; 1628 int i; 1629 1630 SGE_LOCK_ASSERT(sc); 1631 ifp = sc->sge_ifp; 1632 mii = device_get_softc(sc->sge_miibus); 1633 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1634 return; 1635 /* 1636 * Cancel pending I/O and free all RX/TX buffers. 1637 */ 1638 sge_stop(sc); 1639 sge_reset(sc); 1640 1641 /* Init circular RX list. */ 1642 if (sge_list_rx_init(sc) == ENOBUFS) { 1643 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1644 sge_stop(sc); 1645 return; 1646 } 1647 /* Init TX descriptors. */ 1648 sge_list_tx_init(sc); 1649 /* 1650 * Load the address of the RX and TX lists. 1651 */ 1652 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1653 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1654 1655 CSR_WRITE_4(sc, TxMacControl, 0x60); 1656 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1657 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1658 /* Allow receiving VLAN frames. */ 1659 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1660 SGE_RX_PAD_BYTES); 1661 1662 for (i = 0; i < ETHER_ADDR_LEN; i++) 1663 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1664 /* Configure RX MAC. */ 1665 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1666 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1667 sge_rxfilter(sc); 1668 sge_setvlan(sc); 1669 1670 /* Initialize default speed/duplex information. */ 1671 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1672 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1673 sc->sge_flags |= SGE_FLAG_FDX; 1674 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1675 CSR_WRITE_4(sc, StationControl, 0x04008001); 1676 else 1677 CSR_WRITE_4(sc, StationControl, 0x04000001); 1678 /* 1679 * XXX Try to mitigate interrupts. 1680 */ 1681 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1682 #ifdef notyet 1683 if (sc->sge_intrcontrol != 0) 1684 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1685 if (sc->sge_intrtimer != 0) 1686 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1687 #endif 1688 1689 /* 1690 * Clear and enable interrupts. 1691 */ 1692 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1693 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1694 1695 /* Enable receiver and transmitter. */ 1696 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1697 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1698 1699 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1700 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1701 1702 sc->sge_flags &= ~SGE_FLAG_LINK; 1703 mii_mediachg(mii); 1704 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1705 } 1706 1707 /* 1708 * Set media options. 1709 */ 1710 static int 1711 sge_ifmedia_upd(struct ifnet *ifp) 1712 { 1713 struct sge_softc *sc; 1714 struct mii_data *mii; 1715 struct mii_softc *miisc; 1716 int error; 1717 1718 sc = ifp->if_softc; 1719 SGE_LOCK(sc); 1720 mii = device_get_softc(sc->sge_miibus); 1721 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1722 PHY_RESET(miisc); 1723 error = mii_mediachg(mii); 1724 SGE_UNLOCK(sc); 1725 1726 return (error); 1727 } 1728 1729 /* 1730 * Report current media status. 1731 */ 1732 static void 1733 sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1734 { 1735 struct sge_softc *sc; 1736 struct mii_data *mii; 1737 1738 sc = ifp->if_softc; 1739 SGE_LOCK(sc); 1740 mii = device_get_softc(sc->sge_miibus); 1741 if ((ifp->if_flags & IFF_UP) == 0) { 1742 SGE_UNLOCK(sc); 1743 return; 1744 } 1745 mii_pollstat(mii); 1746 ifmr->ifm_active = mii->mii_media_active; 1747 ifmr->ifm_status = mii->mii_media_status; 1748 SGE_UNLOCK(sc); 1749 } 1750 1751 static int 1752 sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1753 { 1754 struct sge_softc *sc; 1755 struct ifreq *ifr; 1756 struct mii_data *mii; 1757 int error = 0, mask, reinit; 1758 1759 sc = ifp->if_softc; 1760 ifr = (struct ifreq *)data; 1761 1762 switch(command) { 1763 case SIOCSIFFLAGS: 1764 SGE_LOCK(sc); 1765 if ((ifp->if_flags & IFF_UP) != 0) { 1766 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1767 ((ifp->if_flags ^ sc->sge_if_flags) & 1768 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1769 sge_rxfilter(sc); 1770 else 1771 sge_init_locked(sc); 1772 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1773 sge_stop(sc); 1774 sc->sge_if_flags = ifp->if_flags; 1775 SGE_UNLOCK(sc); 1776 break; 1777 case SIOCSIFCAP: 1778 SGE_LOCK(sc); 1779 reinit = 0; 1780 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1781 if ((mask & IFCAP_TXCSUM) != 0 && 1782 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1783 ifp->if_capenable ^= IFCAP_TXCSUM; 1784 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1785 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1786 else 1787 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1788 } 1789 if ((mask & IFCAP_RXCSUM) != 0 && 1790 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1791 ifp->if_capenable ^= IFCAP_RXCSUM; 1792 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1793 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1794 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1795 if ((mask & IFCAP_TSO4) != 0 && 1796 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 1797 ifp->if_capenable ^= IFCAP_TSO4; 1798 if ((ifp->if_capenable & IFCAP_TSO4) != 0) 1799 ifp->if_hwassist |= CSUM_TSO; 1800 else 1801 ifp->if_hwassist &= ~CSUM_TSO; 1802 } 1803 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1804 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1805 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1806 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1807 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1808 /* 1809 * Due to unknown reason, toggling VLAN hardware 1810 * tagging require interface reinitialization. 1811 */ 1812 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1813 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1814 ifp->if_capenable &= 1815 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1816 reinit = 1; 1817 } 1818 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1819 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1820 sge_init_locked(sc); 1821 } 1822 SGE_UNLOCK(sc); 1823 VLAN_CAPABILITIES(ifp); 1824 break; 1825 case SIOCADDMULTI: 1826 case SIOCDELMULTI: 1827 SGE_LOCK(sc); 1828 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1829 sge_rxfilter(sc); 1830 SGE_UNLOCK(sc); 1831 break; 1832 case SIOCGIFMEDIA: 1833 case SIOCSIFMEDIA: 1834 mii = device_get_softc(sc->sge_miibus); 1835 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1836 break; 1837 default: 1838 error = ether_ioctl(ifp, command, data); 1839 break; 1840 } 1841 1842 return (error); 1843 } 1844 1845 static void 1846 sge_watchdog(struct sge_softc *sc) 1847 { 1848 struct ifnet *ifp; 1849 1850 SGE_LOCK_ASSERT(sc); 1851 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1852 return; 1853 1854 ifp = sc->sge_ifp; 1855 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1856 if (1 || bootverbose) 1857 device_printf(sc->sge_dev, 1858 "watchdog timeout (lost link)\n"); 1859 ifp->if_oerrors++; 1860 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1861 sge_init_locked(sc); 1862 return; 1863 } 1864 device_printf(sc->sge_dev, "watchdog timeout\n"); 1865 ifp->if_oerrors++; 1866 1867 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1868 sge_init_locked(sc); 1869 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1870 sge_start_locked(ifp); 1871 } 1872 1873 /* 1874 * Stop the adapter and free any mbufs allocated to the 1875 * RX and TX lists. 1876 */ 1877 static void 1878 sge_stop(struct sge_softc *sc) 1879 { 1880 struct ifnet *ifp; 1881 1882 ifp = sc->sge_ifp; 1883 1884 SGE_LOCK_ASSERT(sc); 1885 1886 sc->sge_timer = 0; 1887 callout_stop(&sc->sge_stat_ch); 1888 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1889 1890 CSR_WRITE_4(sc, IntrMask, 0); 1891 CSR_READ_4(sc, IntrMask); 1892 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1893 /* Stop TX/RX MAC. */ 1894 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1895 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1896 /* XXX Can we assume active DMA cycles gone? */ 1897 DELAY(2000); 1898 CSR_WRITE_4(sc, IntrMask, 0); 1899 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1900 1901 sc->sge_flags &= ~SGE_FLAG_LINK; 1902 sge_list_rx_free(sc); 1903 sge_list_tx_free(sc); 1904 } 1905