1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 5 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 6 * Copyright (c) 1997, 1998, 1999 7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Bill Paul. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 27 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 28 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 35 * OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 /* 42 * SiS 190/191 PCI Ethernet NIC driver. 43 * 44 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 45 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 46 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 47 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 48 * review and very useful comments. 49 * 50 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 51 * Linux and Solaris drivers. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/bus.h> 57 #include <sys/endian.h> 58 #include <sys/kernel.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/mbuf.h> 62 #include <sys/module.h> 63 #include <sys/mutex.h> 64 #include <sys/rman.h> 65 #include <sys/socket.h> 66 #include <sys/sockio.h> 67 68 #include <net/bpf.h> 69 #include <net/if.h> 70 #include <net/if_var.h> 71 #include <net/if_arp.h> 72 #include <net/ethernet.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 #include <net/if_types.h> 76 #include <net/if_vlan_var.h> 77 78 #include <netinet/in.h> 79 #include <netinet/in_systm.h> 80 #include <netinet/ip.h> 81 #include <netinet/tcp.h> 82 83 #include <machine/bus.h> 84 #include <machine/in_cksum.h> 85 86 #include <dev/mii/mii.h> 87 #include <dev/mii/miivar.h> 88 89 #include <dev/pci/pcireg.h> 90 #include <dev/pci/pcivar.h> 91 92 #include <dev/sge/if_sgereg.h> 93 94 MODULE_DEPEND(sge, pci, 1, 1, 1); 95 MODULE_DEPEND(sge, ether, 1, 1, 1); 96 MODULE_DEPEND(sge, miibus, 1, 1, 1); 97 98 /* "device miibus0" required. See GENERIC if you get errors here. */ 99 #include "miibus_if.h" 100 101 /* 102 * Various supported device vendors/types and their names. 103 */ 104 static struct sge_type sge_devs[] = { 105 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 106 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 107 { 0, 0, NULL } 108 }; 109 110 static int sge_probe(device_t); 111 static int sge_attach(device_t); 112 static int sge_detach(device_t); 113 static int sge_shutdown(device_t); 114 static int sge_suspend(device_t); 115 static int sge_resume(device_t); 116 117 static int sge_miibus_readreg(device_t, int, int); 118 static int sge_miibus_writereg(device_t, int, int, int); 119 static void sge_miibus_statchg(device_t); 120 121 static int sge_newbuf(struct sge_softc *, int); 122 static int sge_encap(struct sge_softc *, struct mbuf **); 123 static __inline void 124 sge_discard_rxbuf(struct sge_softc *, int); 125 static void sge_rxeof(struct sge_softc *); 126 static void sge_txeof(struct sge_softc *); 127 static void sge_intr(void *); 128 static void sge_tick(void *); 129 static void sge_start(struct ifnet *); 130 static void sge_start_locked(struct ifnet *); 131 static int sge_ioctl(struct ifnet *, u_long, caddr_t); 132 static void sge_init(void *); 133 static void sge_init_locked(struct sge_softc *); 134 static void sge_stop(struct sge_softc *); 135 static void sge_watchdog(struct sge_softc *); 136 static int sge_ifmedia_upd(struct ifnet *); 137 static void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 139 static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 140 static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 141 static uint16_t sge_read_eeprom(struct sge_softc *, int); 142 143 static void sge_rxfilter(struct sge_softc *); 144 static void sge_setvlan(struct sge_softc *); 145 static void sge_reset(struct sge_softc *); 146 static int sge_list_rx_init(struct sge_softc *); 147 static int sge_list_rx_free(struct sge_softc *); 148 static int sge_list_tx_init(struct sge_softc *); 149 static int sge_list_tx_free(struct sge_softc *); 150 151 static int sge_dma_alloc(struct sge_softc *); 152 static void sge_dma_free(struct sge_softc *); 153 static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 154 155 static device_method_t sge_methods[] = { 156 /* Device interface */ 157 DEVMETHOD(device_probe, sge_probe), 158 DEVMETHOD(device_attach, sge_attach), 159 DEVMETHOD(device_detach, sge_detach), 160 DEVMETHOD(device_suspend, sge_suspend), 161 DEVMETHOD(device_resume, sge_resume), 162 DEVMETHOD(device_shutdown, sge_shutdown), 163 164 /* MII interface */ 165 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 166 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 167 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 168 169 DEVMETHOD_END 170 }; 171 172 static driver_t sge_driver = { 173 "sge", sge_methods, sizeof(struct sge_softc) 174 }; 175 176 static devclass_t sge_devclass; 177 178 DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 179 DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 180 181 /* 182 * Register space access macros. 183 */ 184 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 185 #define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 186 #define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 187 188 #define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 189 #define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 190 #define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 191 192 /* Define to show Tx/Rx error status. */ 193 #undef SGE_SHOW_ERRORS 194 195 #define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 196 197 static void 198 sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 199 { 200 bus_addr_t *p; 201 202 if (error != 0) 203 return; 204 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 205 p = arg; 206 *p = segs->ds_addr; 207 } 208 209 /* 210 * Read a sequence of words from the EEPROM. 211 */ 212 static uint16_t 213 sge_read_eeprom(struct sge_softc *sc, int offset) 214 { 215 uint32_t val; 216 int i; 217 218 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 219 CSR_WRITE_4(sc, ROMInterface, 220 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 221 DELAY(500); 222 for (i = 0; i < SGE_TIMEOUT; i++) { 223 val = CSR_READ_4(sc, ROMInterface); 224 if ((val & EI_REQ) == 0) 225 break; 226 DELAY(100); 227 } 228 if (i == SGE_TIMEOUT) { 229 device_printf(sc->sge_dev, 230 "EEPROM read timeout : 0x%08x\n", val); 231 return (0xffff); 232 } 233 234 return ((val & EI_DATA) >> EI_DATA_SHIFT); 235 } 236 237 static int 238 sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 239 { 240 uint16_t val; 241 int i; 242 243 val = sge_read_eeprom(sc, EEPROMSignature); 244 if (val == 0xffff || val == 0) { 245 device_printf(sc->sge_dev, 246 "invalid EEPROM signature : 0x%04x\n", val); 247 return (EINVAL); 248 } 249 250 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 251 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 252 dest[i + 0] = (uint8_t)val; 253 dest[i + 1] = (uint8_t)(val >> 8); 254 } 255 256 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 257 sc->sge_flags |= SGE_FLAG_RGMII; 258 return (0); 259 } 260 261 /* 262 * For SiS96x, APC CMOS RAM is used to store ethernet address. 263 * APC CMOS RAM is accessed through ISA bridge. 264 */ 265 static int 266 sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 267 { 268 #if defined(__amd64__) || defined(__i386__) 269 devclass_t pci; 270 device_t bus, dev = NULL; 271 device_t *kids; 272 struct apc_tbl { 273 uint16_t vid; 274 uint16_t did; 275 } *tp, apc_tbls[] = { 276 { SIS_VENDORID, 0x0965 }, 277 { SIS_VENDORID, 0x0966 }, 278 { SIS_VENDORID, 0x0968 } 279 }; 280 uint8_t reg; 281 int busnum, i, j, numkids; 282 283 pci = devclass_find("pci"); 284 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 285 bus = devclass_get_device(pci, busnum); 286 if (!bus) 287 continue; 288 if (device_get_children(bus, &kids, &numkids) != 0) 289 continue; 290 for (i = 0; i < numkids; i++) { 291 dev = kids[i]; 292 if (pci_get_class(dev) == PCIC_BRIDGE && 293 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 294 tp = apc_tbls; 295 for (j = 0; j < nitems(apc_tbls); j++) { 296 if (pci_get_vendor(dev) == tp->vid && 297 pci_get_device(dev) == tp->did) { 298 free(kids, M_TEMP); 299 goto apc_found; 300 } 301 tp++; 302 } 303 } 304 } 305 free(kids, M_TEMP); 306 } 307 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 308 return (EINVAL); 309 apc_found: 310 /* Enable port 0x78 and 0x79 to access APC registers. */ 311 reg = pci_read_config(dev, 0x48, 1); 312 pci_write_config(dev, 0x48, reg & ~0x02, 1); 313 DELAY(50); 314 pci_read_config(dev, 0x48, 1); 315 /* Read stored ethernet address. */ 316 for (i = 0; i < ETHER_ADDR_LEN; i++) { 317 outb(0x78, 0x09 + i); 318 dest[i] = inb(0x79); 319 } 320 outb(0x78, 0x12); 321 if ((inb(0x79) & 0x80) != 0) 322 sc->sge_flags |= SGE_FLAG_RGMII; 323 /* Restore access to APC registers. */ 324 pci_write_config(dev, 0x48, reg, 1); 325 326 return (0); 327 #else 328 return (EINVAL); 329 #endif 330 } 331 332 static int 333 sge_miibus_readreg(device_t dev, int phy, int reg) 334 { 335 struct sge_softc *sc; 336 uint32_t val; 337 int i; 338 339 sc = device_get_softc(dev); 340 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 341 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 342 DELAY(10); 343 for (i = 0; i < SGE_TIMEOUT; i++) { 344 val = CSR_READ_4(sc, GMIIControl); 345 if ((val & GMI_REQ) == 0) 346 break; 347 DELAY(10); 348 } 349 if (i == SGE_TIMEOUT) { 350 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 351 return (0); 352 } 353 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 354 } 355 356 static int 357 sge_miibus_writereg(device_t dev, int phy, int reg, int data) 358 { 359 struct sge_softc *sc; 360 uint32_t val; 361 int i; 362 363 sc = device_get_softc(dev); 364 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 365 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 366 GMI_OP_WR | GMI_REQ); 367 DELAY(10); 368 for (i = 0; i < SGE_TIMEOUT; i++) { 369 val = CSR_READ_4(sc, GMIIControl); 370 if ((val & GMI_REQ) == 0) 371 break; 372 DELAY(10); 373 } 374 if (i == SGE_TIMEOUT) 375 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 376 return (0); 377 } 378 379 static void 380 sge_miibus_statchg(device_t dev) 381 { 382 struct sge_softc *sc; 383 struct mii_data *mii; 384 struct ifnet *ifp; 385 uint32_t ctl, speed; 386 387 sc = device_get_softc(dev); 388 mii = device_get_softc(sc->sge_miibus); 389 ifp = sc->sge_ifp; 390 if (mii == NULL || ifp == NULL || 391 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 392 return; 393 speed = 0; 394 sc->sge_flags &= ~SGE_FLAG_LINK; 395 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 396 (IFM_ACTIVE | IFM_AVALID)) { 397 switch (IFM_SUBTYPE(mii->mii_media_active)) { 398 case IFM_10_T: 399 sc->sge_flags |= SGE_FLAG_LINK; 400 speed = SC_SPEED_10; 401 break; 402 case IFM_100_TX: 403 sc->sge_flags |= SGE_FLAG_LINK; 404 speed = SC_SPEED_100; 405 break; 406 case IFM_1000_T: 407 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 408 sc->sge_flags |= SGE_FLAG_LINK; 409 speed = SC_SPEED_1000; 410 } 411 break; 412 default: 413 break; 414 } 415 } 416 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 417 return; 418 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 419 ctl = CSR_READ_4(sc, StationControl); 420 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 421 if (speed == SC_SPEED_1000) { 422 ctl |= 0x07000000; 423 sc->sge_flags |= SGE_FLAG_SPEED_1000; 424 } else { 425 ctl |= 0x04000000; 426 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 427 } 428 #ifdef notyet 429 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 430 ctl |= 0x03000000; 431 #endif 432 ctl |= speed; 433 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 434 ctl |= SC_FDX; 435 sc->sge_flags |= SGE_FLAG_FDX; 436 } else 437 sc->sge_flags &= ~SGE_FLAG_FDX; 438 CSR_WRITE_4(sc, StationControl, ctl); 439 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 440 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 441 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 442 } 443 } 444 445 static void 446 sge_rxfilter(struct sge_softc *sc) 447 { 448 struct ifnet *ifp; 449 struct ifmultiaddr *ifma; 450 uint32_t crc, hashes[2]; 451 uint16_t rxfilt; 452 453 SGE_LOCK_ASSERT(sc); 454 455 ifp = sc->sge_ifp; 456 rxfilt = CSR_READ_2(sc, RxMacControl); 457 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 458 rxfilt |= AcceptMyPhys; 459 if ((ifp->if_flags & IFF_BROADCAST) != 0) 460 rxfilt |= AcceptBroadcast; 461 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 462 if ((ifp->if_flags & IFF_PROMISC) != 0) 463 rxfilt |= AcceptAllPhys; 464 rxfilt |= AcceptMulticast; 465 hashes[0] = 0xFFFFFFFF; 466 hashes[1] = 0xFFFFFFFF; 467 } else { 468 rxfilt |= AcceptMulticast; 469 hashes[0] = hashes[1] = 0; 470 /* Now program new ones. */ 471 if_maddr_rlock(ifp); 472 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 473 if (ifma->ifma_addr->sa_family != AF_LINK) 474 continue; 475 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 476 ifma->ifma_addr), ETHER_ADDR_LEN); 477 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 478 } 479 if_maddr_runlock(ifp); 480 } 481 CSR_WRITE_2(sc, RxMacControl, rxfilt); 482 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 483 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 484 } 485 486 static void 487 sge_setvlan(struct sge_softc *sc) 488 { 489 struct ifnet *ifp; 490 uint16_t rxfilt; 491 492 SGE_LOCK_ASSERT(sc); 493 494 ifp = sc->sge_ifp; 495 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 496 return; 497 rxfilt = CSR_READ_2(sc, RxMacControl); 498 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 499 rxfilt |= RXMAC_STRIP_VLAN; 500 else 501 rxfilt &= ~RXMAC_STRIP_VLAN; 502 CSR_WRITE_2(sc, RxMacControl, rxfilt); 503 } 504 505 static void 506 sge_reset(struct sge_softc *sc) 507 { 508 509 CSR_WRITE_4(sc, IntrMask, 0); 510 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 511 512 /* Soft reset. */ 513 CSR_WRITE_4(sc, IntrControl, 0x8000); 514 CSR_READ_4(sc, IntrControl); 515 DELAY(100); 516 CSR_WRITE_4(sc, IntrControl, 0); 517 /* Stop MAC. */ 518 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 519 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 520 521 CSR_WRITE_4(sc, IntrMask, 0); 522 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 523 524 CSR_WRITE_4(sc, GMIIControl, 0); 525 } 526 527 /* 528 * Probe for an SiS chip. Check the PCI vendor and device 529 * IDs against our list and return a device name if we find a match. 530 */ 531 static int 532 sge_probe(device_t dev) 533 { 534 struct sge_type *t; 535 536 t = sge_devs; 537 while (t->sge_name != NULL) { 538 if ((pci_get_vendor(dev) == t->sge_vid) && 539 (pci_get_device(dev) == t->sge_did)) { 540 device_set_desc(dev, t->sge_name); 541 return (BUS_PROBE_DEFAULT); 542 } 543 t++; 544 } 545 546 return (ENXIO); 547 } 548 549 /* 550 * Attach the interface. Allocate softc structures, do ifmedia 551 * setup and ethernet/BPF attach. 552 */ 553 static int 554 sge_attach(device_t dev) 555 { 556 struct sge_softc *sc; 557 struct ifnet *ifp; 558 uint8_t eaddr[ETHER_ADDR_LEN]; 559 int error = 0, rid; 560 561 sc = device_get_softc(dev); 562 sc->sge_dev = dev; 563 564 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 565 MTX_DEF); 566 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 567 568 /* 569 * Map control/status registers. 570 */ 571 pci_enable_busmaster(dev); 572 573 /* Allocate resources. */ 574 sc->sge_res_id = PCIR_BAR(0); 575 sc->sge_res_type = SYS_RES_MEMORY; 576 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 577 &sc->sge_res_id, RF_ACTIVE); 578 if (sc->sge_res == NULL) { 579 device_printf(dev, "couldn't allocate resource\n"); 580 error = ENXIO; 581 goto fail; 582 } 583 584 rid = 0; 585 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 586 RF_SHAREABLE | RF_ACTIVE); 587 if (sc->sge_irq == NULL) { 588 device_printf(dev, "couldn't allocate IRQ resources\n"); 589 error = ENXIO; 590 goto fail; 591 } 592 sc->sge_rev = pci_get_revid(dev); 593 if (pci_get_device(dev) == SIS_DEVICEID_190) 594 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 595 /* Reset the adapter. */ 596 sge_reset(sc); 597 598 /* Get MAC address from the EEPROM. */ 599 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 600 sge_get_mac_addr_apc(sc, eaddr); 601 else 602 sge_get_mac_addr_eeprom(sc, eaddr); 603 604 if ((error = sge_dma_alloc(sc)) != 0) 605 goto fail; 606 607 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 608 if (ifp == NULL) { 609 device_printf(dev, "cannot allocate ifnet structure.\n"); 610 error = ENOSPC; 611 goto fail; 612 } 613 ifp->if_softc = sc; 614 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 615 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 616 ifp->if_ioctl = sge_ioctl; 617 ifp->if_start = sge_start; 618 ifp->if_init = sge_init; 619 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 620 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 621 IFQ_SET_READY(&ifp->if_snd); 622 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4; 623 ifp->if_hwassist = SGE_CSUM_FEATURES | CSUM_TSO; 624 ifp->if_capenable = ifp->if_capabilities; 625 /* 626 * Do MII setup. 627 */ 628 error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd, 629 sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 630 if (error != 0) { 631 device_printf(dev, "attaching PHYs failed\n"); 632 goto fail; 633 } 634 635 /* 636 * Call MI attach routine. 637 */ 638 ether_ifattach(ifp, eaddr); 639 640 /* VLAN setup. */ 641 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 642 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; 643 ifp->if_capenable = ifp->if_capabilities; 644 /* Tell the upper layer(s) we support long frames. */ 645 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 646 647 /* Hook interrupt last to avoid having to lock softc */ 648 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 649 NULL, sge_intr, sc, &sc->sge_intrhand); 650 if (error) { 651 device_printf(dev, "couldn't set up irq\n"); 652 ether_ifdetach(ifp); 653 goto fail; 654 } 655 656 fail: 657 if (error) 658 sge_detach(dev); 659 660 return (error); 661 } 662 663 /* 664 * Shutdown hardware and free up resources. This can be called any 665 * time after the mutex has been initialized. It is called in both 666 * the error case in attach and the normal detach case so it needs 667 * to be careful about only freeing resources that have actually been 668 * allocated. 669 */ 670 static int 671 sge_detach(device_t dev) 672 { 673 struct sge_softc *sc; 674 struct ifnet *ifp; 675 676 sc = device_get_softc(dev); 677 ifp = sc->sge_ifp; 678 /* These should only be active if attach succeeded. */ 679 if (device_is_attached(dev)) { 680 ether_ifdetach(ifp); 681 SGE_LOCK(sc); 682 sge_stop(sc); 683 SGE_UNLOCK(sc); 684 callout_drain(&sc->sge_stat_ch); 685 } 686 if (sc->sge_miibus) 687 device_delete_child(dev, sc->sge_miibus); 688 bus_generic_detach(dev); 689 690 if (sc->sge_intrhand) 691 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 692 if (sc->sge_irq) 693 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 694 if (sc->sge_res) 695 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 696 sc->sge_res); 697 if (ifp) 698 if_free(ifp); 699 sge_dma_free(sc); 700 mtx_destroy(&sc->sge_mtx); 701 702 return (0); 703 } 704 705 /* 706 * Stop all chip I/O so that the kernel's probe routines don't 707 * get confused by errant DMAs when rebooting. 708 */ 709 static int 710 sge_shutdown(device_t dev) 711 { 712 struct sge_softc *sc; 713 714 sc = device_get_softc(dev); 715 SGE_LOCK(sc); 716 sge_stop(sc); 717 SGE_UNLOCK(sc); 718 return (0); 719 } 720 721 static int 722 sge_suspend(device_t dev) 723 { 724 struct sge_softc *sc; 725 struct ifnet *ifp; 726 727 sc = device_get_softc(dev); 728 SGE_LOCK(sc); 729 ifp = sc->sge_ifp; 730 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 731 sge_stop(sc); 732 SGE_UNLOCK(sc); 733 return (0); 734 } 735 736 static int 737 sge_resume(device_t dev) 738 { 739 struct sge_softc *sc; 740 struct ifnet *ifp; 741 742 sc = device_get_softc(dev); 743 SGE_LOCK(sc); 744 ifp = sc->sge_ifp; 745 if ((ifp->if_flags & IFF_UP) != 0) 746 sge_init_locked(sc); 747 SGE_UNLOCK(sc); 748 return (0); 749 } 750 751 static int 752 sge_dma_alloc(struct sge_softc *sc) 753 { 754 struct sge_chain_data *cd; 755 struct sge_list_data *ld; 756 struct sge_rxdesc *rxd; 757 struct sge_txdesc *txd; 758 int error, i; 759 760 cd = &sc->sge_cdata; 761 ld = &sc->sge_ldata; 762 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 763 1, 0, /* alignment, boundary */ 764 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 765 BUS_SPACE_MAXADDR, /* highaddr */ 766 NULL, NULL, /* filter, filterarg */ 767 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 768 1, /* nsegments */ 769 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 770 0, /* flags */ 771 NULL, /* lockfunc */ 772 NULL, /* lockarg */ 773 &cd->sge_tag); 774 if (error != 0) { 775 device_printf(sc->sge_dev, 776 "could not create parent DMA tag.\n"); 777 goto fail; 778 } 779 780 /* RX descriptor ring */ 781 error = bus_dma_tag_create(cd->sge_tag, 782 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 783 BUS_SPACE_MAXADDR, /* lowaddr */ 784 BUS_SPACE_MAXADDR, /* highaddr */ 785 NULL, NULL, /* filter, filterarg */ 786 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 787 SGE_RX_RING_SZ, /* maxsegsize */ 788 0, /* flags */ 789 NULL, /* lockfunc */ 790 NULL, /* lockarg */ 791 &cd->sge_rx_tag); 792 if (error != 0) { 793 device_printf(sc->sge_dev, 794 "could not create Rx ring DMA tag.\n"); 795 goto fail; 796 } 797 /* Allocate DMA'able memory and load DMA map for RX ring. */ 798 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 799 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 800 &cd->sge_rx_dmamap); 801 if (error != 0) { 802 device_printf(sc->sge_dev, 803 "could not allocate DMA'able memory for Rx ring.\n"); 804 goto fail; 805 } 806 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 807 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 808 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 809 if (error != 0) { 810 device_printf(sc->sge_dev, 811 "could not load DMA'able memory for Rx ring.\n"); 812 } 813 814 /* TX descriptor ring */ 815 error = bus_dma_tag_create(cd->sge_tag, 816 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 817 BUS_SPACE_MAXADDR, /* lowaddr */ 818 BUS_SPACE_MAXADDR, /* highaddr */ 819 NULL, NULL, /* filter, filterarg */ 820 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 821 SGE_TX_RING_SZ, /* maxsegsize */ 822 0, /* flags */ 823 NULL, /* lockfunc */ 824 NULL, /* lockarg */ 825 &cd->sge_tx_tag); 826 if (error != 0) { 827 device_printf(sc->sge_dev, 828 "could not create Rx ring DMA tag.\n"); 829 goto fail; 830 } 831 /* Allocate DMA'able memory and load DMA map for TX ring. */ 832 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 833 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 834 &cd->sge_tx_dmamap); 835 if (error != 0) { 836 device_printf(sc->sge_dev, 837 "could not allocate DMA'able memory for Tx ring.\n"); 838 goto fail; 839 } 840 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 841 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 842 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 843 if (error != 0) { 844 device_printf(sc->sge_dev, 845 "could not load DMA'able memory for Rx ring.\n"); 846 goto fail; 847 } 848 849 /* Create DMA tag for Tx buffers. */ 850 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 851 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 852 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 853 if (error != 0) { 854 device_printf(sc->sge_dev, 855 "could not create Tx mbuf DMA tag.\n"); 856 goto fail; 857 } 858 859 /* Create DMA tag for Rx buffers. */ 860 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 861 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 862 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 863 if (error != 0) { 864 device_printf(sc->sge_dev, 865 "could not create Rx mbuf DMA tag.\n"); 866 goto fail; 867 } 868 869 /* Create DMA maps for Tx buffers. */ 870 for (i = 0; i < SGE_TX_RING_CNT; i++) { 871 txd = &cd->sge_txdesc[i]; 872 txd->tx_m = NULL; 873 txd->tx_dmamap = NULL; 874 txd->tx_ndesc = 0; 875 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 876 &txd->tx_dmamap); 877 if (error != 0) { 878 device_printf(sc->sge_dev, 879 "could not create Tx DMA map.\n"); 880 goto fail; 881 } 882 } 883 /* Create spare DMA map for Rx buffer. */ 884 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 885 if (error != 0) { 886 device_printf(sc->sge_dev, 887 "could not create spare Rx DMA map.\n"); 888 goto fail; 889 } 890 /* Create DMA maps for Rx buffers. */ 891 for (i = 0; i < SGE_RX_RING_CNT; i++) { 892 rxd = &cd->sge_rxdesc[i]; 893 rxd->rx_m = NULL; 894 rxd->rx_dmamap = NULL; 895 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 896 &rxd->rx_dmamap); 897 if (error) { 898 device_printf(sc->sge_dev, 899 "could not create Rx DMA map.\n"); 900 goto fail; 901 } 902 } 903 fail: 904 return (error); 905 } 906 907 static void 908 sge_dma_free(struct sge_softc *sc) 909 { 910 struct sge_chain_data *cd; 911 struct sge_list_data *ld; 912 struct sge_rxdesc *rxd; 913 struct sge_txdesc *txd; 914 int i; 915 916 cd = &sc->sge_cdata; 917 ld = &sc->sge_ldata; 918 /* Rx ring. */ 919 if (cd->sge_rx_tag != NULL) { 920 if (ld->sge_rx_paddr != 0) 921 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 922 if (ld->sge_rx_ring != NULL) 923 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 924 cd->sge_rx_dmamap); 925 ld->sge_rx_ring = NULL; 926 ld->sge_rx_paddr = 0; 927 bus_dma_tag_destroy(cd->sge_rx_tag); 928 cd->sge_rx_tag = NULL; 929 } 930 /* Tx ring. */ 931 if (cd->sge_tx_tag != NULL) { 932 if (ld->sge_tx_paddr != 0) 933 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 934 if (ld->sge_tx_ring != NULL) 935 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 936 cd->sge_tx_dmamap); 937 ld->sge_tx_ring = NULL; 938 ld->sge_tx_paddr = 0; 939 bus_dma_tag_destroy(cd->sge_tx_tag); 940 cd->sge_tx_tag = NULL; 941 } 942 /* Rx buffers. */ 943 if (cd->sge_rxmbuf_tag != NULL) { 944 for (i = 0; i < SGE_RX_RING_CNT; i++) { 945 rxd = &cd->sge_rxdesc[i]; 946 if (rxd->rx_dmamap != NULL) { 947 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 948 rxd->rx_dmamap); 949 rxd->rx_dmamap = NULL; 950 } 951 } 952 if (cd->sge_rx_spare_map != NULL) { 953 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 954 cd->sge_rx_spare_map); 955 cd->sge_rx_spare_map = NULL; 956 } 957 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 958 cd->sge_rxmbuf_tag = NULL; 959 } 960 /* Tx buffers. */ 961 if (cd->sge_txmbuf_tag != NULL) { 962 for (i = 0; i < SGE_TX_RING_CNT; i++) { 963 txd = &cd->sge_txdesc[i]; 964 if (txd->tx_dmamap != NULL) { 965 bus_dmamap_destroy(cd->sge_txmbuf_tag, 966 txd->tx_dmamap); 967 txd->tx_dmamap = NULL; 968 } 969 } 970 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 971 cd->sge_txmbuf_tag = NULL; 972 } 973 if (cd->sge_tag != NULL) 974 bus_dma_tag_destroy(cd->sge_tag); 975 cd->sge_tag = NULL; 976 } 977 978 /* 979 * Initialize the TX descriptors. 980 */ 981 static int 982 sge_list_tx_init(struct sge_softc *sc) 983 { 984 struct sge_list_data *ld; 985 struct sge_chain_data *cd; 986 987 SGE_LOCK_ASSERT(sc); 988 ld = &sc->sge_ldata; 989 cd = &sc->sge_cdata; 990 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 991 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 992 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 993 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 994 cd->sge_tx_prod = 0; 995 cd->sge_tx_cons = 0; 996 cd->sge_tx_cnt = 0; 997 return (0); 998 } 999 1000 static int 1001 sge_list_tx_free(struct sge_softc *sc) 1002 { 1003 struct sge_chain_data *cd; 1004 struct sge_txdesc *txd; 1005 int i; 1006 1007 SGE_LOCK_ASSERT(sc); 1008 cd = &sc->sge_cdata; 1009 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1010 txd = &cd->sge_txdesc[i]; 1011 if (txd->tx_m != NULL) { 1012 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1013 BUS_DMASYNC_POSTWRITE); 1014 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1015 m_freem(txd->tx_m); 1016 txd->tx_m = NULL; 1017 txd->tx_ndesc = 0; 1018 } 1019 } 1020 1021 return (0); 1022 } 1023 1024 /* 1025 * Initialize the RX descriptors and allocate mbufs for them. Note that 1026 * we arrange the descriptors in a closed ring, so that the last descriptor 1027 * has RING_END flag set. 1028 */ 1029 static int 1030 sge_list_rx_init(struct sge_softc *sc) 1031 { 1032 struct sge_chain_data *cd; 1033 int i; 1034 1035 SGE_LOCK_ASSERT(sc); 1036 cd = &sc->sge_cdata; 1037 cd->sge_rx_cons = 0; 1038 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1039 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1040 if (sge_newbuf(sc, i) != 0) 1041 return (ENOBUFS); 1042 } 1043 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1044 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1045 return (0); 1046 } 1047 1048 static int 1049 sge_list_rx_free(struct sge_softc *sc) 1050 { 1051 struct sge_chain_data *cd; 1052 struct sge_rxdesc *rxd; 1053 int i; 1054 1055 SGE_LOCK_ASSERT(sc); 1056 cd = &sc->sge_cdata; 1057 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1058 rxd = &cd->sge_rxdesc[i]; 1059 if (rxd->rx_m != NULL) { 1060 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1061 BUS_DMASYNC_POSTREAD); 1062 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1063 rxd->rx_dmamap); 1064 m_freem(rxd->rx_m); 1065 rxd->rx_m = NULL; 1066 } 1067 } 1068 return (0); 1069 } 1070 1071 /* 1072 * Initialize an RX descriptor and attach an MBUF cluster. 1073 */ 1074 static int 1075 sge_newbuf(struct sge_softc *sc, int prod) 1076 { 1077 struct mbuf *m; 1078 struct sge_desc *desc; 1079 struct sge_chain_data *cd; 1080 struct sge_rxdesc *rxd; 1081 bus_dma_segment_t segs[1]; 1082 bus_dmamap_t map; 1083 int error, nsegs; 1084 1085 SGE_LOCK_ASSERT(sc); 1086 1087 cd = &sc->sge_cdata; 1088 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1089 if (m == NULL) 1090 return (ENOBUFS); 1091 m->m_len = m->m_pkthdr.len = MCLBYTES; 1092 m_adj(m, SGE_RX_BUF_ALIGN); 1093 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1094 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1095 if (error != 0) { 1096 m_freem(m); 1097 return (error); 1098 } 1099 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1100 rxd = &cd->sge_rxdesc[prod]; 1101 if (rxd->rx_m != NULL) { 1102 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1103 BUS_DMASYNC_POSTREAD); 1104 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1105 } 1106 map = rxd->rx_dmamap; 1107 rxd->rx_dmamap = cd->sge_rx_spare_map; 1108 cd->sge_rx_spare_map = map; 1109 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1110 BUS_DMASYNC_PREREAD); 1111 rxd->rx_m = m; 1112 1113 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1114 desc->sge_sts_size = 0; 1115 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1116 desc->sge_flags = htole32(segs[0].ds_len); 1117 if (prod == SGE_RX_RING_CNT - 1) 1118 desc->sge_flags |= htole32(RING_END); 1119 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1120 return (0); 1121 } 1122 1123 static __inline void 1124 sge_discard_rxbuf(struct sge_softc *sc, int index) 1125 { 1126 struct sge_desc *desc; 1127 1128 desc = &sc->sge_ldata.sge_rx_ring[index]; 1129 desc->sge_sts_size = 0; 1130 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1131 if (index == SGE_RX_RING_CNT - 1) 1132 desc->sge_flags |= htole32(RING_END); 1133 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1134 } 1135 1136 /* 1137 * A frame has been uploaded: pass the resulting mbuf chain up to 1138 * the higher level protocols. 1139 */ 1140 static void 1141 sge_rxeof(struct sge_softc *sc) 1142 { 1143 struct ifnet *ifp; 1144 struct mbuf *m; 1145 struct sge_chain_data *cd; 1146 struct sge_desc *cur_rx; 1147 uint32_t rxinfo, rxstat; 1148 int cons, prog; 1149 1150 SGE_LOCK_ASSERT(sc); 1151 1152 ifp = sc->sge_ifp; 1153 cd = &sc->sge_cdata; 1154 1155 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1156 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1157 cons = cd->sge_rx_cons; 1158 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1159 SGE_INC(cons, SGE_RX_RING_CNT)) { 1160 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1161 break; 1162 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1163 rxinfo = le32toh(cur_rx->sge_cmdsts); 1164 if ((rxinfo & RDC_OWN) != 0) 1165 break; 1166 rxstat = le32toh(cur_rx->sge_sts_size); 1167 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1168 SGE_RX_NSEGS(rxstat) != 1) { 1169 /* XXX We don't support multi-segment frames yet. */ 1170 #ifdef SGE_SHOW_ERRORS 1171 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1172 RX_ERR_BITS); 1173 #endif 1174 sge_discard_rxbuf(sc, cons); 1175 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1176 continue; 1177 } 1178 m = cd->sge_rxdesc[cons].rx_m; 1179 if (sge_newbuf(sc, cons) != 0) { 1180 sge_discard_rxbuf(sc, cons); 1181 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1182 continue; 1183 } 1184 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1185 if ((rxinfo & RDC_IP_CSUM) != 0 && 1186 (rxinfo & RDC_IP_CSUM_OK) != 0) 1187 m->m_pkthdr.csum_flags |= 1188 CSUM_IP_CHECKED | CSUM_IP_VALID; 1189 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1190 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1191 ((rxinfo & RDC_UDP_CSUM) != 0 && 1192 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1193 m->m_pkthdr.csum_flags |= 1194 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1195 m->m_pkthdr.csum_data = 0xffff; 1196 } 1197 } 1198 /* Check for VLAN tagged frame. */ 1199 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1200 (rxstat & RDS_VLAN) != 0) { 1201 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1202 m->m_flags |= M_VLANTAG; 1203 } 1204 /* 1205 * Account for 10bytes auto padding which is used 1206 * to align IP header on 32bit boundary. Also note, 1207 * CRC bytes is automatically removed by the 1208 * hardware. 1209 */ 1210 m->m_data += SGE_RX_PAD_BYTES; 1211 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1212 SGE_RX_PAD_BYTES; 1213 m->m_pkthdr.rcvif = ifp; 1214 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1215 SGE_UNLOCK(sc); 1216 (*ifp->if_input)(ifp, m); 1217 SGE_LOCK(sc); 1218 } 1219 1220 if (prog > 0) { 1221 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1222 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1223 cd->sge_rx_cons = cons; 1224 } 1225 } 1226 1227 /* 1228 * A frame was downloaded to the chip. It's safe for us to clean up 1229 * the list buffers. 1230 */ 1231 static void 1232 sge_txeof(struct sge_softc *sc) 1233 { 1234 struct ifnet *ifp; 1235 struct sge_list_data *ld; 1236 struct sge_chain_data *cd; 1237 struct sge_txdesc *txd; 1238 uint32_t txstat; 1239 int cons, nsegs, prod; 1240 1241 SGE_LOCK_ASSERT(sc); 1242 1243 ifp = sc->sge_ifp; 1244 ld = &sc->sge_ldata; 1245 cd = &sc->sge_cdata; 1246 1247 if (cd->sge_tx_cnt == 0) 1248 return; 1249 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1250 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1251 cons = cd->sge_tx_cons; 1252 prod = cd->sge_tx_prod; 1253 for (; cons != prod;) { 1254 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1255 if ((txstat & TDC_OWN) != 0) 1256 break; 1257 /* 1258 * Only the first descriptor of multi-descriptor transmission 1259 * is updated by controller. Driver should skip entire 1260 * chained buffers for the transmitted frame. In other words 1261 * TDC_OWN bit is valid only at the first descriptor of a 1262 * multi-descriptor transmission. 1263 */ 1264 if (SGE_TX_ERROR(txstat) != 0) { 1265 #ifdef SGE_SHOW_ERRORS 1266 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1267 txstat, TX_ERR_BITS); 1268 #endif 1269 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1270 } else { 1271 #ifdef notyet 1272 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0xFFFF) - 1); 1273 #endif 1274 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1275 } 1276 txd = &cd->sge_txdesc[cons]; 1277 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1278 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1279 SGE_INC(cons, SGE_TX_RING_CNT); 1280 } 1281 /* Reclaim transmitted mbuf. */ 1282 KASSERT(txd->tx_m != NULL, 1283 ("%s: freeing NULL mbuf\n", __func__)); 1284 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1285 BUS_DMASYNC_POSTWRITE); 1286 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1287 m_freem(txd->tx_m); 1288 txd->tx_m = NULL; 1289 cd->sge_tx_cnt -= txd->tx_ndesc; 1290 KASSERT(cd->sge_tx_cnt >= 0, 1291 ("%s: Active Tx desc counter was garbled\n", __func__)); 1292 txd->tx_ndesc = 0; 1293 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1294 } 1295 cd->sge_tx_cons = cons; 1296 if (cd->sge_tx_cnt == 0) 1297 sc->sge_timer = 0; 1298 } 1299 1300 static void 1301 sge_tick(void *arg) 1302 { 1303 struct sge_softc *sc; 1304 struct mii_data *mii; 1305 struct ifnet *ifp; 1306 1307 sc = arg; 1308 SGE_LOCK_ASSERT(sc); 1309 1310 ifp = sc->sge_ifp; 1311 mii = device_get_softc(sc->sge_miibus); 1312 mii_tick(mii); 1313 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1314 sge_miibus_statchg(sc->sge_dev); 1315 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1316 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1317 sge_start_locked(ifp); 1318 } 1319 /* 1320 * Reclaim transmitted frames here as we do not request 1321 * Tx completion interrupt for every queued frames to 1322 * reduce excessive interrupts. 1323 */ 1324 sge_txeof(sc); 1325 sge_watchdog(sc); 1326 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1327 } 1328 1329 static void 1330 sge_intr(void *arg) 1331 { 1332 struct sge_softc *sc; 1333 struct ifnet *ifp; 1334 uint32_t status; 1335 1336 sc = arg; 1337 SGE_LOCK(sc); 1338 ifp = sc->sge_ifp; 1339 1340 status = CSR_READ_4(sc, IntrStatus); 1341 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1342 /* Not ours. */ 1343 SGE_UNLOCK(sc); 1344 return; 1345 } 1346 /* Acknowledge interrupts. */ 1347 CSR_WRITE_4(sc, IntrStatus, status); 1348 /* Disable further interrupts. */ 1349 CSR_WRITE_4(sc, IntrMask, 0); 1350 /* 1351 * It seems the controller supports some kind of interrupt 1352 * moderation mechanism but we still don't know how to 1353 * enable that. To reduce number of generated interrupts 1354 * under load we check pending interrupts in a loop. This 1355 * will increase number of register access and is not correct 1356 * way to handle interrupt moderation but there seems to be 1357 * no other way at this time. 1358 */ 1359 for (;;) { 1360 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1361 break; 1362 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1363 sge_rxeof(sc); 1364 /* Wakeup Rx MAC. */ 1365 if ((status & INTR_RX_IDLE) != 0) 1366 CSR_WRITE_4(sc, RX_CTL, 1367 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1368 } 1369 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1370 sge_txeof(sc); 1371 status = CSR_READ_4(sc, IntrStatus); 1372 if ((status & SGE_INTRS) == 0) 1373 break; 1374 /* Acknowledge interrupts. */ 1375 CSR_WRITE_4(sc, IntrStatus, status); 1376 } 1377 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1378 /* Re-enable interrupts */ 1379 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1380 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1381 sge_start_locked(ifp); 1382 } 1383 SGE_UNLOCK(sc); 1384 } 1385 1386 /* 1387 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1388 * pointers to the fragment pointers. 1389 */ 1390 static int 1391 sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1392 { 1393 struct mbuf *m; 1394 struct sge_desc *desc; 1395 struct sge_txdesc *txd; 1396 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1397 uint32_t cflags, mss; 1398 int error, i, nsegs, prod, si; 1399 1400 SGE_LOCK_ASSERT(sc); 1401 1402 si = prod = sc->sge_cdata.sge_tx_prod; 1403 txd = &sc->sge_cdata.sge_txdesc[prod]; 1404 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1405 struct ether_header *eh; 1406 struct ip *ip; 1407 struct tcphdr *tcp; 1408 uint32_t ip_off, poff; 1409 1410 if (M_WRITABLE(*m_head) == 0) { 1411 /* Get a writable copy. */ 1412 m = m_dup(*m_head, M_NOWAIT); 1413 m_freem(*m_head); 1414 if (m == NULL) { 1415 *m_head = NULL; 1416 return (ENOBUFS); 1417 } 1418 *m_head = m; 1419 } 1420 ip_off = sizeof(struct ether_header); 1421 m = m_pullup(*m_head, ip_off); 1422 if (m == NULL) { 1423 *m_head = NULL; 1424 return (ENOBUFS); 1425 } 1426 eh = mtod(m, struct ether_header *); 1427 /* Check the existence of VLAN tag. */ 1428 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1429 ip_off = sizeof(struct ether_vlan_header); 1430 m = m_pullup(m, ip_off); 1431 if (m == NULL) { 1432 *m_head = NULL; 1433 return (ENOBUFS); 1434 } 1435 } 1436 m = m_pullup(m, ip_off + sizeof(struct ip)); 1437 if (m == NULL) { 1438 *m_head = NULL; 1439 return (ENOBUFS); 1440 } 1441 ip = (struct ip *)(mtod(m, char *) + ip_off); 1442 poff = ip_off + (ip->ip_hl << 2); 1443 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1444 if (m == NULL) { 1445 *m_head = NULL; 1446 return (ENOBUFS); 1447 } 1448 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1449 m = m_pullup(m, poff + (tcp->th_off << 2)); 1450 if (m == NULL) { 1451 *m_head = NULL; 1452 return (ENOBUFS); 1453 } 1454 /* 1455 * Reset IP checksum and recompute TCP pseudo 1456 * checksum that NDIS specification requires. 1457 */ 1458 ip = (struct ip *)(mtod(m, char *) + ip_off); 1459 ip->ip_sum = 0; 1460 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1461 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1462 htons(IPPROTO_TCP)); 1463 *m_head = m; 1464 } 1465 1466 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1467 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1468 if (error == EFBIG) { 1469 m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS); 1470 if (m == NULL) { 1471 m_freem(*m_head); 1472 *m_head = NULL; 1473 return (ENOBUFS); 1474 } 1475 *m_head = m; 1476 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1477 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1478 if (error != 0) { 1479 m_freem(*m_head); 1480 *m_head = NULL; 1481 return (error); 1482 } 1483 } else if (error != 0) 1484 return (error); 1485 1486 KASSERT(nsegs != 0, ("zero segment returned")); 1487 /* Check descriptor overrun. */ 1488 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1489 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1490 return (ENOBUFS); 1491 } 1492 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1493 BUS_DMASYNC_PREWRITE); 1494 1495 m = *m_head; 1496 cflags = 0; 1497 mss = 0; 1498 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1499 cflags |= TDC_LS; 1500 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1501 mss <<= 16; 1502 } else { 1503 if (m->m_pkthdr.csum_flags & CSUM_IP) 1504 cflags |= TDC_IP_CSUM; 1505 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1506 cflags |= TDC_TCP_CSUM; 1507 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1508 cflags |= TDC_UDP_CSUM; 1509 } 1510 for (i = 0; i < nsegs; i++) { 1511 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1512 if (i == 0) { 1513 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1514 desc->sge_cmdsts = 0; 1515 } else { 1516 desc->sge_sts_size = 0; 1517 desc->sge_cmdsts = htole32(TDC_OWN); 1518 } 1519 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1520 desc->sge_flags = htole32(txsegs[i].ds_len); 1521 if (prod == SGE_TX_RING_CNT - 1) 1522 desc->sge_flags |= htole32(RING_END); 1523 sc->sge_cdata.sge_tx_cnt++; 1524 SGE_INC(prod, SGE_TX_RING_CNT); 1525 } 1526 /* Update producer index. */ 1527 sc->sge_cdata.sge_tx_prod = prod; 1528 1529 desc = &sc->sge_ldata.sge_tx_ring[si]; 1530 /* Configure VLAN. */ 1531 if((m->m_flags & M_VLANTAG) != 0) { 1532 cflags |= m->m_pkthdr.ether_vtag; 1533 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1534 } 1535 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1536 #if 1 1537 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1538 desc->sge_cmdsts |= htole32(TDC_BST); 1539 #else 1540 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1541 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1542 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1543 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1544 } 1545 #endif 1546 /* Request interrupt and give ownership to controller. */ 1547 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1548 txd->tx_m = m; 1549 txd->tx_ndesc = nsegs; 1550 return (0); 1551 } 1552 1553 static void 1554 sge_start(struct ifnet *ifp) 1555 { 1556 struct sge_softc *sc; 1557 1558 sc = ifp->if_softc; 1559 SGE_LOCK(sc); 1560 sge_start_locked(ifp); 1561 SGE_UNLOCK(sc); 1562 } 1563 1564 static void 1565 sge_start_locked(struct ifnet *ifp) 1566 { 1567 struct sge_softc *sc; 1568 struct mbuf *m_head; 1569 int queued = 0; 1570 1571 sc = ifp->if_softc; 1572 SGE_LOCK_ASSERT(sc); 1573 1574 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1575 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1576 IFF_DRV_RUNNING) 1577 return; 1578 1579 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1580 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1581 SGE_MAXTXSEGS)) { 1582 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1583 break; 1584 } 1585 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1586 if (m_head == NULL) 1587 break; 1588 if (sge_encap(sc, &m_head)) { 1589 if (m_head == NULL) 1590 break; 1591 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1592 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1593 break; 1594 } 1595 queued++; 1596 /* 1597 * If there's a BPF listener, bounce a copy of this frame 1598 * to him. 1599 */ 1600 BPF_MTAP(ifp, m_head); 1601 } 1602 1603 if (queued > 0) { 1604 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1605 sc->sge_cdata.sge_tx_dmamap, 1606 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1607 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1608 sc->sge_timer = 5; 1609 } 1610 } 1611 1612 static void 1613 sge_init(void *arg) 1614 { 1615 struct sge_softc *sc; 1616 1617 sc = arg; 1618 SGE_LOCK(sc); 1619 sge_init_locked(sc); 1620 SGE_UNLOCK(sc); 1621 } 1622 1623 static void 1624 sge_init_locked(struct sge_softc *sc) 1625 { 1626 struct ifnet *ifp; 1627 struct mii_data *mii; 1628 uint16_t rxfilt; 1629 int i; 1630 1631 SGE_LOCK_ASSERT(sc); 1632 ifp = sc->sge_ifp; 1633 mii = device_get_softc(sc->sge_miibus); 1634 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1635 return; 1636 /* 1637 * Cancel pending I/O and free all RX/TX buffers. 1638 */ 1639 sge_stop(sc); 1640 sge_reset(sc); 1641 1642 /* Init circular RX list. */ 1643 if (sge_list_rx_init(sc) == ENOBUFS) { 1644 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1645 sge_stop(sc); 1646 return; 1647 } 1648 /* Init TX descriptors. */ 1649 sge_list_tx_init(sc); 1650 /* 1651 * Load the address of the RX and TX lists. 1652 */ 1653 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1654 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1655 1656 CSR_WRITE_4(sc, TxMacControl, 0x60); 1657 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1658 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1659 /* Allow receiving VLAN frames. */ 1660 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1661 SGE_RX_PAD_BYTES); 1662 1663 for (i = 0; i < ETHER_ADDR_LEN; i++) 1664 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1665 /* Configure RX MAC. */ 1666 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1667 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1668 sge_rxfilter(sc); 1669 sge_setvlan(sc); 1670 1671 /* Initialize default speed/duplex information. */ 1672 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1673 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1674 sc->sge_flags |= SGE_FLAG_FDX; 1675 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1676 CSR_WRITE_4(sc, StationControl, 0x04008001); 1677 else 1678 CSR_WRITE_4(sc, StationControl, 0x04000001); 1679 /* 1680 * XXX Try to mitigate interrupts. 1681 */ 1682 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1683 #ifdef notyet 1684 if (sc->sge_intrcontrol != 0) 1685 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1686 if (sc->sge_intrtimer != 0) 1687 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1688 #endif 1689 1690 /* 1691 * Clear and enable interrupts. 1692 */ 1693 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1694 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1695 1696 /* Enable receiver and transmitter. */ 1697 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1698 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1699 1700 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1701 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1702 1703 sc->sge_flags &= ~SGE_FLAG_LINK; 1704 mii_mediachg(mii); 1705 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1706 } 1707 1708 /* 1709 * Set media options. 1710 */ 1711 static int 1712 sge_ifmedia_upd(struct ifnet *ifp) 1713 { 1714 struct sge_softc *sc; 1715 struct mii_data *mii; 1716 struct mii_softc *miisc; 1717 int error; 1718 1719 sc = ifp->if_softc; 1720 SGE_LOCK(sc); 1721 mii = device_get_softc(sc->sge_miibus); 1722 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1723 PHY_RESET(miisc); 1724 error = mii_mediachg(mii); 1725 SGE_UNLOCK(sc); 1726 1727 return (error); 1728 } 1729 1730 /* 1731 * Report current media status. 1732 */ 1733 static void 1734 sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1735 { 1736 struct sge_softc *sc; 1737 struct mii_data *mii; 1738 1739 sc = ifp->if_softc; 1740 SGE_LOCK(sc); 1741 mii = device_get_softc(sc->sge_miibus); 1742 if ((ifp->if_flags & IFF_UP) == 0) { 1743 SGE_UNLOCK(sc); 1744 return; 1745 } 1746 mii_pollstat(mii); 1747 ifmr->ifm_active = mii->mii_media_active; 1748 ifmr->ifm_status = mii->mii_media_status; 1749 SGE_UNLOCK(sc); 1750 } 1751 1752 static int 1753 sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1754 { 1755 struct sge_softc *sc; 1756 struct ifreq *ifr; 1757 struct mii_data *mii; 1758 int error = 0, mask, reinit; 1759 1760 sc = ifp->if_softc; 1761 ifr = (struct ifreq *)data; 1762 1763 switch(command) { 1764 case SIOCSIFFLAGS: 1765 SGE_LOCK(sc); 1766 if ((ifp->if_flags & IFF_UP) != 0) { 1767 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1768 ((ifp->if_flags ^ sc->sge_if_flags) & 1769 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1770 sge_rxfilter(sc); 1771 else 1772 sge_init_locked(sc); 1773 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1774 sge_stop(sc); 1775 sc->sge_if_flags = ifp->if_flags; 1776 SGE_UNLOCK(sc); 1777 break; 1778 case SIOCSIFCAP: 1779 SGE_LOCK(sc); 1780 reinit = 0; 1781 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1782 if ((mask & IFCAP_TXCSUM) != 0 && 1783 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1784 ifp->if_capenable ^= IFCAP_TXCSUM; 1785 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1786 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1787 else 1788 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1789 } 1790 if ((mask & IFCAP_RXCSUM) != 0 && 1791 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1792 ifp->if_capenable ^= IFCAP_RXCSUM; 1793 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1794 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1795 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1796 if ((mask & IFCAP_TSO4) != 0 && 1797 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 1798 ifp->if_capenable ^= IFCAP_TSO4; 1799 if ((ifp->if_capenable & IFCAP_TSO4) != 0) 1800 ifp->if_hwassist |= CSUM_TSO; 1801 else 1802 ifp->if_hwassist &= ~CSUM_TSO; 1803 } 1804 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1805 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1806 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1807 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1808 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1809 /* 1810 * Due to unknown reason, toggling VLAN hardware 1811 * tagging require interface reinitialization. 1812 */ 1813 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1814 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1815 ifp->if_capenable &= 1816 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1817 reinit = 1; 1818 } 1819 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1820 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1821 sge_init_locked(sc); 1822 } 1823 SGE_UNLOCK(sc); 1824 VLAN_CAPABILITIES(ifp); 1825 break; 1826 case SIOCADDMULTI: 1827 case SIOCDELMULTI: 1828 SGE_LOCK(sc); 1829 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1830 sge_rxfilter(sc); 1831 SGE_UNLOCK(sc); 1832 break; 1833 case SIOCGIFMEDIA: 1834 case SIOCSIFMEDIA: 1835 mii = device_get_softc(sc->sge_miibus); 1836 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1837 break; 1838 default: 1839 error = ether_ioctl(ifp, command, data); 1840 break; 1841 } 1842 1843 return (error); 1844 } 1845 1846 static void 1847 sge_watchdog(struct sge_softc *sc) 1848 { 1849 struct ifnet *ifp; 1850 1851 SGE_LOCK_ASSERT(sc); 1852 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1853 return; 1854 1855 ifp = sc->sge_ifp; 1856 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1857 if (1 || bootverbose) 1858 device_printf(sc->sge_dev, 1859 "watchdog timeout (lost link)\n"); 1860 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1861 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1862 sge_init_locked(sc); 1863 return; 1864 } 1865 device_printf(sc->sge_dev, "watchdog timeout\n"); 1866 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1867 1868 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1869 sge_init_locked(sc); 1870 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1871 sge_start_locked(ifp); 1872 } 1873 1874 /* 1875 * Stop the adapter and free any mbufs allocated to the 1876 * RX and TX lists. 1877 */ 1878 static void 1879 sge_stop(struct sge_softc *sc) 1880 { 1881 struct ifnet *ifp; 1882 1883 ifp = sc->sge_ifp; 1884 1885 SGE_LOCK_ASSERT(sc); 1886 1887 sc->sge_timer = 0; 1888 callout_stop(&sc->sge_stat_ch); 1889 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1890 1891 CSR_WRITE_4(sc, IntrMask, 0); 1892 CSR_READ_4(sc, IntrMask); 1893 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1894 /* Stop TX/RX MAC. */ 1895 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1896 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1897 /* XXX Can we assume active DMA cycles gone? */ 1898 DELAY(2000); 1899 CSR_WRITE_4(sc, IntrMask, 0); 1900 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1901 1902 sc->sge_flags &= ~SGE_FLAG_LINK; 1903 sge_list_rx_free(sc); 1904 sge_list_tx_free(sc); 1905 } 1906