1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 5 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 6 * Copyright (c) 1997, 1998, 1999 7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Bill Paul. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 27 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 28 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 35 * OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 /* 42 * SiS 190/191 PCI Ethernet NIC driver. 43 * 44 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 45 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 46 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 47 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 48 * review and very useful comments. 49 * 50 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 51 * Linux and Solaris drivers. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/bus.h> 57 #include <sys/endian.h> 58 #include <sys/kernel.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/mbuf.h> 62 #include <sys/module.h> 63 #include <sys/mutex.h> 64 #include <sys/rman.h> 65 #include <sys/socket.h> 66 #include <sys/sockio.h> 67 68 #include <net/bpf.h> 69 #include <net/if.h> 70 #include <net/if_var.h> 71 #include <net/if_arp.h> 72 #include <net/ethernet.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 #include <net/if_types.h> 76 #include <net/if_vlan_var.h> 77 78 #include <netinet/in.h> 79 #include <netinet/in_systm.h> 80 #include <netinet/ip.h> 81 #include <netinet/tcp.h> 82 83 #include <machine/bus.h> 84 #include <machine/in_cksum.h> 85 86 #include <dev/mii/mii.h> 87 #include <dev/mii/miivar.h> 88 89 #include <dev/pci/pcireg.h> 90 #include <dev/pci/pcivar.h> 91 92 #include <dev/sge/if_sgereg.h> 93 94 MODULE_DEPEND(sge, pci, 1, 1, 1); 95 MODULE_DEPEND(sge, ether, 1, 1, 1); 96 MODULE_DEPEND(sge, miibus, 1, 1, 1); 97 98 /* "device miibus0" required. See GENERIC if you get errors here. */ 99 #include "miibus_if.h" 100 101 /* 102 * Various supported device vendors/types and their names. 103 */ 104 static struct sge_type sge_devs[] = { 105 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 106 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 107 { 0, 0, NULL } 108 }; 109 110 static int sge_probe(device_t); 111 static int sge_attach(device_t); 112 static int sge_detach(device_t); 113 static int sge_shutdown(device_t); 114 static int sge_suspend(device_t); 115 static int sge_resume(device_t); 116 117 static int sge_miibus_readreg(device_t, int, int); 118 static int sge_miibus_writereg(device_t, int, int, int); 119 static void sge_miibus_statchg(device_t); 120 121 static int sge_newbuf(struct sge_softc *, int); 122 static int sge_encap(struct sge_softc *, struct mbuf **); 123 static __inline void 124 sge_discard_rxbuf(struct sge_softc *, int); 125 static void sge_rxeof(struct sge_softc *); 126 static void sge_txeof(struct sge_softc *); 127 static void sge_intr(void *); 128 static void sge_tick(void *); 129 static void sge_start(struct ifnet *); 130 static void sge_start_locked(struct ifnet *); 131 static int sge_ioctl(struct ifnet *, u_long, caddr_t); 132 static void sge_init(void *); 133 static void sge_init_locked(struct sge_softc *); 134 static void sge_stop(struct sge_softc *); 135 static void sge_watchdog(struct sge_softc *); 136 static int sge_ifmedia_upd(struct ifnet *); 137 static void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 139 static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 140 static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 141 static uint16_t sge_read_eeprom(struct sge_softc *, int); 142 143 static void sge_rxfilter(struct sge_softc *); 144 static void sge_setvlan(struct sge_softc *); 145 static void sge_reset(struct sge_softc *); 146 static int sge_list_rx_init(struct sge_softc *); 147 static int sge_list_rx_free(struct sge_softc *); 148 static int sge_list_tx_init(struct sge_softc *); 149 static int sge_list_tx_free(struct sge_softc *); 150 151 static int sge_dma_alloc(struct sge_softc *); 152 static void sge_dma_free(struct sge_softc *); 153 static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 154 155 static device_method_t sge_methods[] = { 156 /* Device interface */ 157 DEVMETHOD(device_probe, sge_probe), 158 DEVMETHOD(device_attach, sge_attach), 159 DEVMETHOD(device_detach, sge_detach), 160 DEVMETHOD(device_suspend, sge_suspend), 161 DEVMETHOD(device_resume, sge_resume), 162 DEVMETHOD(device_shutdown, sge_shutdown), 163 164 /* MII interface */ 165 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 166 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 167 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 168 169 DEVMETHOD_END 170 }; 171 172 static driver_t sge_driver = { 173 "sge", sge_methods, sizeof(struct sge_softc) 174 }; 175 176 static devclass_t sge_devclass; 177 178 DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 179 DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 180 181 /* 182 * Register space access macros. 183 */ 184 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 185 #define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 186 #define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 187 188 #define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 189 #define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 190 #define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 191 192 /* Define to show Tx/Rx error status. */ 193 #undef SGE_SHOW_ERRORS 194 195 #define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 196 197 static void 198 sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 199 { 200 bus_addr_t *p; 201 202 if (error != 0) 203 return; 204 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 205 p = arg; 206 *p = segs->ds_addr; 207 } 208 209 /* 210 * Read a sequence of words from the EEPROM. 211 */ 212 static uint16_t 213 sge_read_eeprom(struct sge_softc *sc, int offset) 214 { 215 uint32_t val; 216 int i; 217 218 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 219 CSR_WRITE_4(sc, ROMInterface, 220 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 221 DELAY(500); 222 for (i = 0; i < SGE_TIMEOUT; i++) { 223 val = CSR_READ_4(sc, ROMInterface); 224 if ((val & EI_REQ) == 0) 225 break; 226 DELAY(100); 227 } 228 if (i == SGE_TIMEOUT) { 229 device_printf(sc->sge_dev, 230 "EEPROM read timeout : 0x%08x\n", val); 231 return (0xffff); 232 } 233 234 return ((val & EI_DATA) >> EI_DATA_SHIFT); 235 } 236 237 static int 238 sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 239 { 240 uint16_t val; 241 int i; 242 243 val = sge_read_eeprom(sc, EEPROMSignature); 244 if (val == 0xffff || val == 0) { 245 device_printf(sc->sge_dev, 246 "invalid EEPROM signature : 0x%04x\n", val); 247 return (EINVAL); 248 } 249 250 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 251 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 252 dest[i + 0] = (uint8_t)val; 253 dest[i + 1] = (uint8_t)(val >> 8); 254 } 255 256 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 257 sc->sge_flags |= SGE_FLAG_RGMII; 258 return (0); 259 } 260 261 /* 262 * For SiS96x, APC CMOS RAM is used to store ethernet address. 263 * APC CMOS RAM is accessed through ISA bridge. 264 */ 265 static int 266 sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 267 { 268 #if defined(__amd64__) || defined(__i386__) 269 devclass_t pci; 270 device_t bus, dev = NULL; 271 device_t *kids; 272 struct apc_tbl { 273 uint16_t vid; 274 uint16_t did; 275 } *tp, apc_tbls[] = { 276 { SIS_VENDORID, 0x0965 }, 277 { SIS_VENDORID, 0x0966 }, 278 { SIS_VENDORID, 0x0968 } 279 }; 280 uint8_t reg; 281 int busnum, i, j, numkids; 282 283 pci = devclass_find("pci"); 284 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 285 bus = devclass_get_device(pci, busnum); 286 if (!bus) 287 continue; 288 if (device_get_children(bus, &kids, &numkids) != 0) 289 continue; 290 for (i = 0; i < numkids; i++) { 291 dev = kids[i]; 292 if (pci_get_class(dev) == PCIC_BRIDGE && 293 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 294 tp = apc_tbls; 295 for (j = 0; j < nitems(apc_tbls); j++) { 296 if (pci_get_vendor(dev) == tp->vid && 297 pci_get_device(dev) == tp->did) { 298 free(kids, M_TEMP); 299 goto apc_found; 300 } 301 tp++; 302 } 303 } 304 } 305 free(kids, M_TEMP); 306 } 307 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 308 return (EINVAL); 309 apc_found: 310 /* Enable port 0x78 and 0x79 to access APC registers. */ 311 reg = pci_read_config(dev, 0x48, 1); 312 pci_write_config(dev, 0x48, reg & ~0x02, 1); 313 DELAY(50); 314 pci_read_config(dev, 0x48, 1); 315 /* Read stored ethernet address. */ 316 for (i = 0; i < ETHER_ADDR_LEN; i++) { 317 outb(0x78, 0x09 + i); 318 dest[i] = inb(0x79); 319 } 320 outb(0x78, 0x12); 321 if ((inb(0x79) & 0x80) != 0) 322 sc->sge_flags |= SGE_FLAG_RGMII; 323 /* Restore access to APC registers. */ 324 pci_write_config(dev, 0x48, reg, 1); 325 326 return (0); 327 #else 328 return (EINVAL); 329 #endif 330 } 331 332 static int 333 sge_miibus_readreg(device_t dev, int phy, int reg) 334 { 335 struct sge_softc *sc; 336 uint32_t val; 337 int i; 338 339 sc = device_get_softc(dev); 340 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 341 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 342 DELAY(10); 343 for (i = 0; i < SGE_TIMEOUT; i++) { 344 val = CSR_READ_4(sc, GMIIControl); 345 if ((val & GMI_REQ) == 0) 346 break; 347 DELAY(10); 348 } 349 if (i == SGE_TIMEOUT) { 350 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 351 return (0); 352 } 353 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 354 } 355 356 static int 357 sge_miibus_writereg(device_t dev, int phy, int reg, int data) 358 { 359 struct sge_softc *sc; 360 uint32_t val; 361 int i; 362 363 sc = device_get_softc(dev); 364 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 365 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 366 GMI_OP_WR | GMI_REQ); 367 DELAY(10); 368 for (i = 0; i < SGE_TIMEOUT; i++) { 369 val = CSR_READ_4(sc, GMIIControl); 370 if ((val & GMI_REQ) == 0) 371 break; 372 DELAY(10); 373 } 374 if (i == SGE_TIMEOUT) 375 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 376 return (0); 377 } 378 379 static void 380 sge_miibus_statchg(device_t dev) 381 { 382 struct sge_softc *sc; 383 struct mii_data *mii; 384 struct ifnet *ifp; 385 uint32_t ctl, speed; 386 387 sc = device_get_softc(dev); 388 mii = device_get_softc(sc->sge_miibus); 389 ifp = sc->sge_ifp; 390 if (mii == NULL || ifp == NULL || 391 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 392 return; 393 speed = 0; 394 sc->sge_flags &= ~SGE_FLAG_LINK; 395 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 396 (IFM_ACTIVE | IFM_AVALID)) { 397 switch (IFM_SUBTYPE(mii->mii_media_active)) { 398 case IFM_10_T: 399 sc->sge_flags |= SGE_FLAG_LINK; 400 speed = SC_SPEED_10; 401 break; 402 case IFM_100_TX: 403 sc->sge_flags |= SGE_FLAG_LINK; 404 speed = SC_SPEED_100; 405 break; 406 case IFM_1000_T: 407 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 408 sc->sge_flags |= SGE_FLAG_LINK; 409 speed = SC_SPEED_1000; 410 } 411 break; 412 default: 413 break; 414 } 415 } 416 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 417 return; 418 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 419 ctl = CSR_READ_4(sc, StationControl); 420 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 421 if (speed == SC_SPEED_1000) { 422 ctl |= 0x07000000; 423 sc->sge_flags |= SGE_FLAG_SPEED_1000; 424 } else { 425 ctl |= 0x04000000; 426 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 427 } 428 #ifdef notyet 429 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 430 ctl |= 0x03000000; 431 #endif 432 ctl |= speed; 433 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 434 ctl |= SC_FDX; 435 sc->sge_flags |= SGE_FLAG_FDX; 436 } else 437 sc->sge_flags &= ~SGE_FLAG_FDX; 438 CSR_WRITE_4(sc, StationControl, ctl); 439 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 440 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 441 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 442 } 443 } 444 445 static u_int 446 sge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int count) 447 { 448 uint32_t crc, *hashes = arg; 449 450 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 451 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 452 453 return (1); 454 } 455 456 static void 457 sge_rxfilter(struct sge_softc *sc) 458 { 459 struct ifnet *ifp; 460 uint32_t hashes[2]; 461 uint16_t rxfilt; 462 463 SGE_LOCK_ASSERT(sc); 464 465 ifp = sc->sge_ifp; 466 rxfilt = CSR_READ_2(sc, RxMacControl); 467 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 468 rxfilt |= AcceptMyPhys; 469 if ((ifp->if_flags & IFF_BROADCAST) != 0) 470 rxfilt |= AcceptBroadcast; 471 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 472 if ((ifp->if_flags & IFF_PROMISC) != 0) 473 rxfilt |= AcceptAllPhys; 474 rxfilt |= AcceptMulticast; 475 hashes[0] = 0xFFFFFFFF; 476 hashes[1] = 0xFFFFFFFF; 477 } else { 478 rxfilt |= AcceptMulticast; 479 hashes[0] = hashes[1] = 0; 480 /* Now program new ones. */ 481 if_foreach_llmaddr(ifp, sge_hash_maddr, hashes); 482 } 483 CSR_WRITE_2(sc, RxMacControl, rxfilt); 484 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 485 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 486 } 487 488 static void 489 sge_setvlan(struct sge_softc *sc) 490 { 491 struct ifnet *ifp; 492 uint16_t rxfilt; 493 494 SGE_LOCK_ASSERT(sc); 495 496 ifp = sc->sge_ifp; 497 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 498 return; 499 rxfilt = CSR_READ_2(sc, RxMacControl); 500 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 501 rxfilt |= RXMAC_STRIP_VLAN; 502 else 503 rxfilt &= ~RXMAC_STRIP_VLAN; 504 CSR_WRITE_2(sc, RxMacControl, rxfilt); 505 } 506 507 static void 508 sge_reset(struct sge_softc *sc) 509 { 510 511 CSR_WRITE_4(sc, IntrMask, 0); 512 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 513 514 /* Soft reset. */ 515 CSR_WRITE_4(sc, IntrControl, 0x8000); 516 CSR_READ_4(sc, IntrControl); 517 DELAY(100); 518 CSR_WRITE_4(sc, IntrControl, 0); 519 /* Stop MAC. */ 520 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 521 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 522 523 CSR_WRITE_4(sc, IntrMask, 0); 524 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 525 526 CSR_WRITE_4(sc, GMIIControl, 0); 527 } 528 529 /* 530 * Probe for an SiS chip. Check the PCI vendor and device 531 * IDs against our list and return a device name if we find a match. 532 */ 533 static int 534 sge_probe(device_t dev) 535 { 536 struct sge_type *t; 537 538 t = sge_devs; 539 while (t->sge_name != NULL) { 540 if ((pci_get_vendor(dev) == t->sge_vid) && 541 (pci_get_device(dev) == t->sge_did)) { 542 device_set_desc(dev, t->sge_name); 543 return (BUS_PROBE_DEFAULT); 544 } 545 t++; 546 } 547 548 return (ENXIO); 549 } 550 551 /* 552 * Attach the interface. Allocate softc structures, do ifmedia 553 * setup and ethernet/BPF attach. 554 */ 555 static int 556 sge_attach(device_t dev) 557 { 558 struct sge_softc *sc; 559 struct ifnet *ifp; 560 uint8_t eaddr[ETHER_ADDR_LEN]; 561 int error = 0, rid; 562 563 sc = device_get_softc(dev); 564 sc->sge_dev = dev; 565 566 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 567 MTX_DEF); 568 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 569 570 /* 571 * Map control/status registers. 572 */ 573 pci_enable_busmaster(dev); 574 575 /* Allocate resources. */ 576 sc->sge_res_id = PCIR_BAR(0); 577 sc->sge_res_type = SYS_RES_MEMORY; 578 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 579 &sc->sge_res_id, RF_ACTIVE); 580 if (sc->sge_res == NULL) { 581 device_printf(dev, "couldn't allocate resource\n"); 582 error = ENXIO; 583 goto fail; 584 } 585 586 rid = 0; 587 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 588 RF_SHAREABLE | RF_ACTIVE); 589 if (sc->sge_irq == NULL) { 590 device_printf(dev, "couldn't allocate IRQ resources\n"); 591 error = ENXIO; 592 goto fail; 593 } 594 sc->sge_rev = pci_get_revid(dev); 595 if (pci_get_device(dev) == SIS_DEVICEID_190) 596 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 597 /* Reset the adapter. */ 598 sge_reset(sc); 599 600 /* Get MAC address from the EEPROM. */ 601 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 602 sge_get_mac_addr_apc(sc, eaddr); 603 else 604 sge_get_mac_addr_eeprom(sc, eaddr); 605 606 if ((error = sge_dma_alloc(sc)) != 0) 607 goto fail; 608 609 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 610 if (ifp == NULL) { 611 device_printf(dev, "cannot allocate ifnet structure.\n"); 612 error = ENOSPC; 613 goto fail; 614 } 615 ifp->if_softc = sc; 616 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 617 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 618 ifp->if_ioctl = sge_ioctl; 619 ifp->if_start = sge_start; 620 ifp->if_init = sge_init; 621 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 622 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 623 IFQ_SET_READY(&ifp->if_snd); 624 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4; 625 ifp->if_hwassist = SGE_CSUM_FEATURES | CSUM_TSO; 626 ifp->if_capenable = ifp->if_capabilities; 627 /* 628 * Do MII setup. 629 */ 630 error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd, 631 sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 632 if (error != 0) { 633 device_printf(dev, "attaching PHYs failed\n"); 634 goto fail; 635 } 636 637 /* 638 * Call MI attach routine. 639 */ 640 ether_ifattach(ifp, eaddr); 641 642 /* VLAN setup. */ 643 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 644 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; 645 ifp->if_capenable = ifp->if_capabilities; 646 /* Tell the upper layer(s) we support long frames. */ 647 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 648 649 /* Hook interrupt last to avoid having to lock softc */ 650 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 651 NULL, sge_intr, sc, &sc->sge_intrhand); 652 if (error) { 653 device_printf(dev, "couldn't set up irq\n"); 654 ether_ifdetach(ifp); 655 goto fail; 656 } 657 658 fail: 659 if (error) 660 sge_detach(dev); 661 662 return (error); 663 } 664 665 /* 666 * Shutdown hardware and free up resources. This can be called any 667 * time after the mutex has been initialized. It is called in both 668 * the error case in attach and the normal detach case so it needs 669 * to be careful about only freeing resources that have actually been 670 * allocated. 671 */ 672 static int 673 sge_detach(device_t dev) 674 { 675 struct sge_softc *sc; 676 struct ifnet *ifp; 677 678 sc = device_get_softc(dev); 679 ifp = sc->sge_ifp; 680 /* These should only be active if attach succeeded. */ 681 if (device_is_attached(dev)) { 682 ether_ifdetach(ifp); 683 SGE_LOCK(sc); 684 sge_stop(sc); 685 SGE_UNLOCK(sc); 686 callout_drain(&sc->sge_stat_ch); 687 } 688 if (sc->sge_miibus) 689 device_delete_child(dev, sc->sge_miibus); 690 bus_generic_detach(dev); 691 692 if (sc->sge_intrhand) 693 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 694 if (sc->sge_irq) 695 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 696 if (sc->sge_res) 697 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 698 sc->sge_res); 699 if (ifp) 700 if_free(ifp); 701 sge_dma_free(sc); 702 mtx_destroy(&sc->sge_mtx); 703 704 return (0); 705 } 706 707 /* 708 * Stop all chip I/O so that the kernel's probe routines don't 709 * get confused by errant DMAs when rebooting. 710 */ 711 static int 712 sge_shutdown(device_t dev) 713 { 714 struct sge_softc *sc; 715 716 sc = device_get_softc(dev); 717 SGE_LOCK(sc); 718 sge_stop(sc); 719 SGE_UNLOCK(sc); 720 return (0); 721 } 722 723 static int 724 sge_suspend(device_t dev) 725 { 726 struct sge_softc *sc; 727 struct ifnet *ifp; 728 729 sc = device_get_softc(dev); 730 SGE_LOCK(sc); 731 ifp = sc->sge_ifp; 732 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 733 sge_stop(sc); 734 SGE_UNLOCK(sc); 735 return (0); 736 } 737 738 static int 739 sge_resume(device_t dev) 740 { 741 struct sge_softc *sc; 742 struct ifnet *ifp; 743 744 sc = device_get_softc(dev); 745 SGE_LOCK(sc); 746 ifp = sc->sge_ifp; 747 if ((ifp->if_flags & IFF_UP) != 0) 748 sge_init_locked(sc); 749 SGE_UNLOCK(sc); 750 return (0); 751 } 752 753 static int 754 sge_dma_alloc(struct sge_softc *sc) 755 { 756 struct sge_chain_data *cd; 757 struct sge_list_data *ld; 758 struct sge_rxdesc *rxd; 759 struct sge_txdesc *txd; 760 int error, i; 761 762 cd = &sc->sge_cdata; 763 ld = &sc->sge_ldata; 764 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 765 1, 0, /* alignment, boundary */ 766 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 767 BUS_SPACE_MAXADDR, /* highaddr */ 768 NULL, NULL, /* filter, filterarg */ 769 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 770 1, /* nsegments */ 771 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 772 0, /* flags */ 773 NULL, /* lockfunc */ 774 NULL, /* lockarg */ 775 &cd->sge_tag); 776 if (error != 0) { 777 device_printf(sc->sge_dev, 778 "could not create parent DMA tag.\n"); 779 goto fail; 780 } 781 782 /* RX descriptor ring */ 783 error = bus_dma_tag_create(cd->sge_tag, 784 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 785 BUS_SPACE_MAXADDR, /* lowaddr */ 786 BUS_SPACE_MAXADDR, /* highaddr */ 787 NULL, NULL, /* filter, filterarg */ 788 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 789 SGE_RX_RING_SZ, /* maxsegsize */ 790 0, /* flags */ 791 NULL, /* lockfunc */ 792 NULL, /* lockarg */ 793 &cd->sge_rx_tag); 794 if (error != 0) { 795 device_printf(sc->sge_dev, 796 "could not create Rx ring DMA tag.\n"); 797 goto fail; 798 } 799 /* Allocate DMA'able memory and load DMA map for RX ring. */ 800 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 801 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 802 &cd->sge_rx_dmamap); 803 if (error != 0) { 804 device_printf(sc->sge_dev, 805 "could not allocate DMA'able memory for Rx ring.\n"); 806 goto fail; 807 } 808 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 809 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 810 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 811 if (error != 0) { 812 device_printf(sc->sge_dev, 813 "could not load DMA'able memory for Rx ring.\n"); 814 } 815 816 /* TX descriptor ring */ 817 error = bus_dma_tag_create(cd->sge_tag, 818 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 819 BUS_SPACE_MAXADDR, /* lowaddr */ 820 BUS_SPACE_MAXADDR, /* highaddr */ 821 NULL, NULL, /* filter, filterarg */ 822 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 823 SGE_TX_RING_SZ, /* maxsegsize */ 824 0, /* flags */ 825 NULL, /* lockfunc */ 826 NULL, /* lockarg */ 827 &cd->sge_tx_tag); 828 if (error != 0) { 829 device_printf(sc->sge_dev, 830 "could not create Rx ring DMA tag.\n"); 831 goto fail; 832 } 833 /* Allocate DMA'able memory and load DMA map for TX ring. */ 834 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 835 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 836 &cd->sge_tx_dmamap); 837 if (error != 0) { 838 device_printf(sc->sge_dev, 839 "could not allocate DMA'able memory for Tx ring.\n"); 840 goto fail; 841 } 842 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 843 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 844 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 845 if (error != 0) { 846 device_printf(sc->sge_dev, 847 "could not load DMA'able memory for Rx ring.\n"); 848 goto fail; 849 } 850 851 /* Create DMA tag for Tx buffers. */ 852 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 853 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 854 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 855 if (error != 0) { 856 device_printf(sc->sge_dev, 857 "could not create Tx mbuf DMA tag.\n"); 858 goto fail; 859 } 860 861 /* Create DMA tag for Rx buffers. */ 862 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 863 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 864 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 865 if (error != 0) { 866 device_printf(sc->sge_dev, 867 "could not create Rx mbuf DMA tag.\n"); 868 goto fail; 869 } 870 871 /* Create DMA maps for Tx buffers. */ 872 for (i = 0; i < SGE_TX_RING_CNT; i++) { 873 txd = &cd->sge_txdesc[i]; 874 txd->tx_m = NULL; 875 txd->tx_dmamap = NULL; 876 txd->tx_ndesc = 0; 877 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 878 &txd->tx_dmamap); 879 if (error != 0) { 880 device_printf(sc->sge_dev, 881 "could not create Tx DMA map.\n"); 882 goto fail; 883 } 884 } 885 /* Create spare DMA map for Rx buffer. */ 886 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 887 if (error != 0) { 888 device_printf(sc->sge_dev, 889 "could not create spare Rx DMA map.\n"); 890 goto fail; 891 } 892 /* Create DMA maps for Rx buffers. */ 893 for (i = 0; i < SGE_RX_RING_CNT; i++) { 894 rxd = &cd->sge_rxdesc[i]; 895 rxd->rx_m = NULL; 896 rxd->rx_dmamap = NULL; 897 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 898 &rxd->rx_dmamap); 899 if (error) { 900 device_printf(sc->sge_dev, 901 "could not create Rx DMA map.\n"); 902 goto fail; 903 } 904 } 905 fail: 906 return (error); 907 } 908 909 static void 910 sge_dma_free(struct sge_softc *sc) 911 { 912 struct sge_chain_data *cd; 913 struct sge_list_data *ld; 914 struct sge_rxdesc *rxd; 915 struct sge_txdesc *txd; 916 int i; 917 918 cd = &sc->sge_cdata; 919 ld = &sc->sge_ldata; 920 /* Rx ring. */ 921 if (cd->sge_rx_tag != NULL) { 922 if (ld->sge_rx_paddr != 0) 923 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 924 if (ld->sge_rx_ring != NULL) 925 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 926 cd->sge_rx_dmamap); 927 ld->sge_rx_ring = NULL; 928 ld->sge_rx_paddr = 0; 929 bus_dma_tag_destroy(cd->sge_rx_tag); 930 cd->sge_rx_tag = NULL; 931 } 932 /* Tx ring. */ 933 if (cd->sge_tx_tag != NULL) { 934 if (ld->sge_tx_paddr != 0) 935 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 936 if (ld->sge_tx_ring != NULL) 937 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 938 cd->sge_tx_dmamap); 939 ld->sge_tx_ring = NULL; 940 ld->sge_tx_paddr = 0; 941 bus_dma_tag_destroy(cd->sge_tx_tag); 942 cd->sge_tx_tag = NULL; 943 } 944 /* Rx buffers. */ 945 if (cd->sge_rxmbuf_tag != NULL) { 946 for (i = 0; i < SGE_RX_RING_CNT; i++) { 947 rxd = &cd->sge_rxdesc[i]; 948 if (rxd->rx_dmamap != NULL) { 949 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 950 rxd->rx_dmamap); 951 rxd->rx_dmamap = NULL; 952 } 953 } 954 if (cd->sge_rx_spare_map != NULL) { 955 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 956 cd->sge_rx_spare_map); 957 cd->sge_rx_spare_map = NULL; 958 } 959 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 960 cd->sge_rxmbuf_tag = NULL; 961 } 962 /* Tx buffers. */ 963 if (cd->sge_txmbuf_tag != NULL) { 964 for (i = 0; i < SGE_TX_RING_CNT; i++) { 965 txd = &cd->sge_txdesc[i]; 966 if (txd->tx_dmamap != NULL) { 967 bus_dmamap_destroy(cd->sge_txmbuf_tag, 968 txd->tx_dmamap); 969 txd->tx_dmamap = NULL; 970 } 971 } 972 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 973 cd->sge_txmbuf_tag = NULL; 974 } 975 if (cd->sge_tag != NULL) 976 bus_dma_tag_destroy(cd->sge_tag); 977 cd->sge_tag = NULL; 978 } 979 980 /* 981 * Initialize the TX descriptors. 982 */ 983 static int 984 sge_list_tx_init(struct sge_softc *sc) 985 { 986 struct sge_list_data *ld; 987 struct sge_chain_data *cd; 988 989 SGE_LOCK_ASSERT(sc); 990 ld = &sc->sge_ldata; 991 cd = &sc->sge_cdata; 992 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 993 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 994 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 995 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 996 cd->sge_tx_prod = 0; 997 cd->sge_tx_cons = 0; 998 cd->sge_tx_cnt = 0; 999 return (0); 1000 } 1001 1002 static int 1003 sge_list_tx_free(struct sge_softc *sc) 1004 { 1005 struct sge_chain_data *cd; 1006 struct sge_txdesc *txd; 1007 int i; 1008 1009 SGE_LOCK_ASSERT(sc); 1010 cd = &sc->sge_cdata; 1011 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1012 txd = &cd->sge_txdesc[i]; 1013 if (txd->tx_m != NULL) { 1014 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1015 BUS_DMASYNC_POSTWRITE); 1016 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1017 m_freem(txd->tx_m); 1018 txd->tx_m = NULL; 1019 txd->tx_ndesc = 0; 1020 } 1021 } 1022 1023 return (0); 1024 } 1025 1026 /* 1027 * Initialize the RX descriptors and allocate mbufs for them. Note that 1028 * we arrange the descriptors in a closed ring, so that the last descriptor 1029 * has RING_END flag set. 1030 */ 1031 static int 1032 sge_list_rx_init(struct sge_softc *sc) 1033 { 1034 struct sge_chain_data *cd; 1035 int i; 1036 1037 SGE_LOCK_ASSERT(sc); 1038 cd = &sc->sge_cdata; 1039 cd->sge_rx_cons = 0; 1040 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1041 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1042 if (sge_newbuf(sc, i) != 0) 1043 return (ENOBUFS); 1044 } 1045 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1046 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1047 return (0); 1048 } 1049 1050 static int 1051 sge_list_rx_free(struct sge_softc *sc) 1052 { 1053 struct sge_chain_data *cd; 1054 struct sge_rxdesc *rxd; 1055 int i; 1056 1057 SGE_LOCK_ASSERT(sc); 1058 cd = &sc->sge_cdata; 1059 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1060 rxd = &cd->sge_rxdesc[i]; 1061 if (rxd->rx_m != NULL) { 1062 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1063 BUS_DMASYNC_POSTREAD); 1064 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1065 rxd->rx_dmamap); 1066 m_freem(rxd->rx_m); 1067 rxd->rx_m = NULL; 1068 } 1069 } 1070 return (0); 1071 } 1072 1073 /* 1074 * Initialize an RX descriptor and attach an MBUF cluster. 1075 */ 1076 static int 1077 sge_newbuf(struct sge_softc *sc, int prod) 1078 { 1079 struct mbuf *m; 1080 struct sge_desc *desc; 1081 struct sge_chain_data *cd; 1082 struct sge_rxdesc *rxd; 1083 bus_dma_segment_t segs[1]; 1084 bus_dmamap_t map; 1085 int error, nsegs; 1086 1087 SGE_LOCK_ASSERT(sc); 1088 1089 cd = &sc->sge_cdata; 1090 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1091 if (m == NULL) 1092 return (ENOBUFS); 1093 m->m_len = m->m_pkthdr.len = MCLBYTES; 1094 m_adj(m, SGE_RX_BUF_ALIGN); 1095 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1096 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1097 if (error != 0) { 1098 m_freem(m); 1099 return (error); 1100 } 1101 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1102 rxd = &cd->sge_rxdesc[prod]; 1103 if (rxd->rx_m != NULL) { 1104 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1105 BUS_DMASYNC_POSTREAD); 1106 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1107 } 1108 map = rxd->rx_dmamap; 1109 rxd->rx_dmamap = cd->sge_rx_spare_map; 1110 cd->sge_rx_spare_map = map; 1111 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1112 BUS_DMASYNC_PREREAD); 1113 rxd->rx_m = m; 1114 1115 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1116 desc->sge_sts_size = 0; 1117 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1118 desc->sge_flags = htole32(segs[0].ds_len); 1119 if (prod == SGE_RX_RING_CNT - 1) 1120 desc->sge_flags |= htole32(RING_END); 1121 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1122 return (0); 1123 } 1124 1125 static __inline void 1126 sge_discard_rxbuf(struct sge_softc *sc, int index) 1127 { 1128 struct sge_desc *desc; 1129 1130 desc = &sc->sge_ldata.sge_rx_ring[index]; 1131 desc->sge_sts_size = 0; 1132 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1133 if (index == SGE_RX_RING_CNT - 1) 1134 desc->sge_flags |= htole32(RING_END); 1135 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1136 } 1137 1138 /* 1139 * A frame has been uploaded: pass the resulting mbuf chain up to 1140 * the higher level protocols. 1141 */ 1142 static void 1143 sge_rxeof(struct sge_softc *sc) 1144 { 1145 struct ifnet *ifp; 1146 struct mbuf *m; 1147 struct sge_chain_data *cd; 1148 struct sge_desc *cur_rx; 1149 uint32_t rxinfo, rxstat; 1150 int cons, prog; 1151 1152 SGE_LOCK_ASSERT(sc); 1153 1154 ifp = sc->sge_ifp; 1155 cd = &sc->sge_cdata; 1156 1157 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1158 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1159 cons = cd->sge_rx_cons; 1160 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1161 SGE_INC(cons, SGE_RX_RING_CNT)) { 1162 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1163 break; 1164 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1165 rxinfo = le32toh(cur_rx->sge_cmdsts); 1166 if ((rxinfo & RDC_OWN) != 0) 1167 break; 1168 rxstat = le32toh(cur_rx->sge_sts_size); 1169 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1170 SGE_RX_NSEGS(rxstat) != 1) { 1171 /* XXX We don't support multi-segment frames yet. */ 1172 #ifdef SGE_SHOW_ERRORS 1173 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1174 RX_ERR_BITS); 1175 #endif 1176 sge_discard_rxbuf(sc, cons); 1177 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1178 continue; 1179 } 1180 m = cd->sge_rxdesc[cons].rx_m; 1181 if (sge_newbuf(sc, cons) != 0) { 1182 sge_discard_rxbuf(sc, cons); 1183 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1184 continue; 1185 } 1186 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1187 if ((rxinfo & RDC_IP_CSUM) != 0 && 1188 (rxinfo & RDC_IP_CSUM_OK) != 0) 1189 m->m_pkthdr.csum_flags |= 1190 CSUM_IP_CHECKED | CSUM_IP_VALID; 1191 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1192 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1193 ((rxinfo & RDC_UDP_CSUM) != 0 && 1194 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1195 m->m_pkthdr.csum_flags |= 1196 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1197 m->m_pkthdr.csum_data = 0xffff; 1198 } 1199 } 1200 /* Check for VLAN tagged frame. */ 1201 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1202 (rxstat & RDS_VLAN) != 0) { 1203 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1204 m->m_flags |= M_VLANTAG; 1205 } 1206 /* 1207 * Account for 10bytes auto padding which is used 1208 * to align IP header on 32bit boundary. Also note, 1209 * CRC bytes is automatically removed by the 1210 * hardware. 1211 */ 1212 m->m_data += SGE_RX_PAD_BYTES; 1213 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1214 SGE_RX_PAD_BYTES; 1215 m->m_pkthdr.rcvif = ifp; 1216 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1217 SGE_UNLOCK(sc); 1218 (*ifp->if_input)(ifp, m); 1219 SGE_LOCK(sc); 1220 } 1221 1222 if (prog > 0) { 1223 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1224 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1225 cd->sge_rx_cons = cons; 1226 } 1227 } 1228 1229 /* 1230 * A frame was downloaded to the chip. It's safe for us to clean up 1231 * the list buffers. 1232 */ 1233 static void 1234 sge_txeof(struct sge_softc *sc) 1235 { 1236 struct ifnet *ifp; 1237 struct sge_list_data *ld; 1238 struct sge_chain_data *cd; 1239 struct sge_txdesc *txd; 1240 uint32_t txstat; 1241 int cons, nsegs, prod; 1242 1243 SGE_LOCK_ASSERT(sc); 1244 1245 ifp = sc->sge_ifp; 1246 ld = &sc->sge_ldata; 1247 cd = &sc->sge_cdata; 1248 1249 if (cd->sge_tx_cnt == 0) 1250 return; 1251 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1252 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1253 cons = cd->sge_tx_cons; 1254 prod = cd->sge_tx_prod; 1255 for (; cons != prod;) { 1256 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1257 if ((txstat & TDC_OWN) != 0) 1258 break; 1259 /* 1260 * Only the first descriptor of multi-descriptor transmission 1261 * is updated by controller. Driver should skip entire 1262 * chained buffers for the transmitted frame. In other words 1263 * TDC_OWN bit is valid only at the first descriptor of a 1264 * multi-descriptor transmission. 1265 */ 1266 if (SGE_TX_ERROR(txstat) != 0) { 1267 #ifdef SGE_SHOW_ERRORS 1268 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1269 txstat, TX_ERR_BITS); 1270 #endif 1271 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1272 } else { 1273 #ifdef notyet 1274 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0xFFFF) - 1); 1275 #endif 1276 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1277 } 1278 txd = &cd->sge_txdesc[cons]; 1279 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1280 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1281 SGE_INC(cons, SGE_TX_RING_CNT); 1282 } 1283 /* Reclaim transmitted mbuf. */ 1284 KASSERT(txd->tx_m != NULL, 1285 ("%s: freeing NULL mbuf\n", __func__)); 1286 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1287 BUS_DMASYNC_POSTWRITE); 1288 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1289 m_freem(txd->tx_m); 1290 txd->tx_m = NULL; 1291 cd->sge_tx_cnt -= txd->tx_ndesc; 1292 KASSERT(cd->sge_tx_cnt >= 0, 1293 ("%s: Active Tx desc counter was garbled\n", __func__)); 1294 txd->tx_ndesc = 0; 1295 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1296 } 1297 cd->sge_tx_cons = cons; 1298 if (cd->sge_tx_cnt == 0) 1299 sc->sge_timer = 0; 1300 } 1301 1302 static void 1303 sge_tick(void *arg) 1304 { 1305 struct sge_softc *sc; 1306 struct mii_data *mii; 1307 struct ifnet *ifp; 1308 1309 sc = arg; 1310 SGE_LOCK_ASSERT(sc); 1311 1312 ifp = sc->sge_ifp; 1313 mii = device_get_softc(sc->sge_miibus); 1314 mii_tick(mii); 1315 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1316 sge_miibus_statchg(sc->sge_dev); 1317 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1318 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1319 sge_start_locked(ifp); 1320 } 1321 /* 1322 * Reclaim transmitted frames here as we do not request 1323 * Tx completion interrupt for every queued frames to 1324 * reduce excessive interrupts. 1325 */ 1326 sge_txeof(sc); 1327 sge_watchdog(sc); 1328 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1329 } 1330 1331 static void 1332 sge_intr(void *arg) 1333 { 1334 struct sge_softc *sc; 1335 struct ifnet *ifp; 1336 uint32_t status; 1337 1338 sc = arg; 1339 SGE_LOCK(sc); 1340 ifp = sc->sge_ifp; 1341 1342 status = CSR_READ_4(sc, IntrStatus); 1343 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1344 /* Not ours. */ 1345 SGE_UNLOCK(sc); 1346 return; 1347 } 1348 /* Acknowledge interrupts. */ 1349 CSR_WRITE_4(sc, IntrStatus, status); 1350 /* Disable further interrupts. */ 1351 CSR_WRITE_4(sc, IntrMask, 0); 1352 /* 1353 * It seems the controller supports some kind of interrupt 1354 * moderation mechanism but we still don't know how to 1355 * enable that. To reduce number of generated interrupts 1356 * under load we check pending interrupts in a loop. This 1357 * will increase number of register access and is not correct 1358 * way to handle interrupt moderation but there seems to be 1359 * no other way at this time. 1360 */ 1361 for (;;) { 1362 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1363 break; 1364 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1365 sge_rxeof(sc); 1366 /* Wakeup Rx MAC. */ 1367 if ((status & INTR_RX_IDLE) != 0) 1368 CSR_WRITE_4(sc, RX_CTL, 1369 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1370 } 1371 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1372 sge_txeof(sc); 1373 status = CSR_READ_4(sc, IntrStatus); 1374 if ((status & SGE_INTRS) == 0) 1375 break; 1376 /* Acknowledge interrupts. */ 1377 CSR_WRITE_4(sc, IntrStatus, status); 1378 } 1379 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1380 /* Re-enable interrupts */ 1381 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1382 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1383 sge_start_locked(ifp); 1384 } 1385 SGE_UNLOCK(sc); 1386 } 1387 1388 /* 1389 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1390 * pointers to the fragment pointers. 1391 */ 1392 static int 1393 sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1394 { 1395 struct mbuf *m; 1396 struct sge_desc *desc; 1397 struct sge_txdesc *txd; 1398 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1399 uint32_t cflags, mss; 1400 int error, i, nsegs, prod, si; 1401 1402 SGE_LOCK_ASSERT(sc); 1403 1404 si = prod = sc->sge_cdata.sge_tx_prod; 1405 txd = &sc->sge_cdata.sge_txdesc[prod]; 1406 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1407 struct ether_header *eh; 1408 struct ip *ip; 1409 struct tcphdr *tcp; 1410 uint32_t ip_off, poff; 1411 1412 if (M_WRITABLE(*m_head) == 0) { 1413 /* Get a writable copy. */ 1414 m = m_dup(*m_head, M_NOWAIT); 1415 m_freem(*m_head); 1416 if (m == NULL) { 1417 *m_head = NULL; 1418 return (ENOBUFS); 1419 } 1420 *m_head = m; 1421 } 1422 ip_off = sizeof(struct ether_header); 1423 m = m_pullup(*m_head, ip_off); 1424 if (m == NULL) { 1425 *m_head = NULL; 1426 return (ENOBUFS); 1427 } 1428 eh = mtod(m, struct ether_header *); 1429 /* Check the existence of VLAN tag. */ 1430 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1431 ip_off = sizeof(struct ether_vlan_header); 1432 m = m_pullup(m, ip_off); 1433 if (m == NULL) { 1434 *m_head = NULL; 1435 return (ENOBUFS); 1436 } 1437 } 1438 m = m_pullup(m, ip_off + sizeof(struct ip)); 1439 if (m == NULL) { 1440 *m_head = NULL; 1441 return (ENOBUFS); 1442 } 1443 ip = (struct ip *)(mtod(m, char *) + ip_off); 1444 poff = ip_off + (ip->ip_hl << 2); 1445 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1446 if (m == NULL) { 1447 *m_head = NULL; 1448 return (ENOBUFS); 1449 } 1450 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1451 m = m_pullup(m, poff + (tcp->th_off << 2)); 1452 if (m == NULL) { 1453 *m_head = NULL; 1454 return (ENOBUFS); 1455 } 1456 /* 1457 * Reset IP checksum and recompute TCP pseudo 1458 * checksum that NDIS specification requires. 1459 */ 1460 ip = (struct ip *)(mtod(m, char *) + ip_off); 1461 ip->ip_sum = 0; 1462 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1463 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1464 htons(IPPROTO_TCP)); 1465 *m_head = m; 1466 } 1467 1468 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1469 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1470 if (error == EFBIG) { 1471 m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS); 1472 if (m == NULL) { 1473 m_freem(*m_head); 1474 *m_head = NULL; 1475 return (ENOBUFS); 1476 } 1477 *m_head = m; 1478 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1479 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1480 if (error != 0) { 1481 m_freem(*m_head); 1482 *m_head = NULL; 1483 return (error); 1484 } 1485 } else if (error != 0) 1486 return (error); 1487 1488 KASSERT(nsegs != 0, ("zero segment returned")); 1489 /* Check descriptor overrun. */ 1490 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1491 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1492 return (ENOBUFS); 1493 } 1494 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1495 BUS_DMASYNC_PREWRITE); 1496 1497 m = *m_head; 1498 cflags = 0; 1499 mss = 0; 1500 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1501 cflags |= TDC_LS; 1502 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1503 mss <<= 16; 1504 } else { 1505 if (m->m_pkthdr.csum_flags & CSUM_IP) 1506 cflags |= TDC_IP_CSUM; 1507 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1508 cflags |= TDC_TCP_CSUM; 1509 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1510 cflags |= TDC_UDP_CSUM; 1511 } 1512 for (i = 0; i < nsegs; i++) { 1513 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1514 if (i == 0) { 1515 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1516 desc->sge_cmdsts = 0; 1517 } else { 1518 desc->sge_sts_size = 0; 1519 desc->sge_cmdsts = htole32(TDC_OWN); 1520 } 1521 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1522 desc->sge_flags = htole32(txsegs[i].ds_len); 1523 if (prod == SGE_TX_RING_CNT - 1) 1524 desc->sge_flags |= htole32(RING_END); 1525 sc->sge_cdata.sge_tx_cnt++; 1526 SGE_INC(prod, SGE_TX_RING_CNT); 1527 } 1528 /* Update producer index. */ 1529 sc->sge_cdata.sge_tx_prod = prod; 1530 1531 desc = &sc->sge_ldata.sge_tx_ring[si]; 1532 /* Configure VLAN. */ 1533 if((m->m_flags & M_VLANTAG) != 0) { 1534 cflags |= m->m_pkthdr.ether_vtag; 1535 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1536 } 1537 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1538 #if 1 1539 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1540 desc->sge_cmdsts |= htole32(TDC_BST); 1541 #else 1542 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1543 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1544 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1545 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1546 } 1547 #endif 1548 /* Request interrupt and give ownership to controller. */ 1549 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1550 txd->tx_m = m; 1551 txd->tx_ndesc = nsegs; 1552 return (0); 1553 } 1554 1555 static void 1556 sge_start(struct ifnet *ifp) 1557 { 1558 struct sge_softc *sc; 1559 1560 sc = ifp->if_softc; 1561 SGE_LOCK(sc); 1562 sge_start_locked(ifp); 1563 SGE_UNLOCK(sc); 1564 } 1565 1566 static void 1567 sge_start_locked(struct ifnet *ifp) 1568 { 1569 struct sge_softc *sc; 1570 struct mbuf *m_head; 1571 int queued = 0; 1572 1573 sc = ifp->if_softc; 1574 SGE_LOCK_ASSERT(sc); 1575 1576 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1577 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1578 IFF_DRV_RUNNING) 1579 return; 1580 1581 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1582 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1583 SGE_MAXTXSEGS)) { 1584 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1585 break; 1586 } 1587 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1588 if (m_head == NULL) 1589 break; 1590 if (sge_encap(sc, &m_head)) { 1591 if (m_head == NULL) 1592 break; 1593 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1594 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1595 break; 1596 } 1597 queued++; 1598 /* 1599 * If there's a BPF listener, bounce a copy of this frame 1600 * to him. 1601 */ 1602 BPF_MTAP(ifp, m_head); 1603 } 1604 1605 if (queued > 0) { 1606 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1607 sc->sge_cdata.sge_tx_dmamap, 1608 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1609 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1610 sc->sge_timer = 5; 1611 } 1612 } 1613 1614 static void 1615 sge_init(void *arg) 1616 { 1617 struct sge_softc *sc; 1618 1619 sc = arg; 1620 SGE_LOCK(sc); 1621 sge_init_locked(sc); 1622 SGE_UNLOCK(sc); 1623 } 1624 1625 static void 1626 sge_init_locked(struct sge_softc *sc) 1627 { 1628 struct ifnet *ifp; 1629 struct mii_data *mii; 1630 uint16_t rxfilt; 1631 int i; 1632 1633 SGE_LOCK_ASSERT(sc); 1634 ifp = sc->sge_ifp; 1635 mii = device_get_softc(sc->sge_miibus); 1636 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1637 return; 1638 /* 1639 * Cancel pending I/O and free all RX/TX buffers. 1640 */ 1641 sge_stop(sc); 1642 sge_reset(sc); 1643 1644 /* Init circular RX list. */ 1645 if (sge_list_rx_init(sc) == ENOBUFS) { 1646 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1647 sge_stop(sc); 1648 return; 1649 } 1650 /* Init TX descriptors. */ 1651 sge_list_tx_init(sc); 1652 /* 1653 * Load the address of the RX and TX lists. 1654 */ 1655 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1656 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1657 1658 CSR_WRITE_4(sc, TxMacControl, 0x60); 1659 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1660 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1661 /* Allow receiving VLAN frames. */ 1662 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1663 SGE_RX_PAD_BYTES); 1664 1665 for (i = 0; i < ETHER_ADDR_LEN; i++) 1666 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1667 /* Configure RX MAC. */ 1668 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1669 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1670 sge_rxfilter(sc); 1671 sge_setvlan(sc); 1672 1673 /* Initialize default speed/duplex information. */ 1674 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1675 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1676 sc->sge_flags |= SGE_FLAG_FDX; 1677 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1678 CSR_WRITE_4(sc, StationControl, 0x04008001); 1679 else 1680 CSR_WRITE_4(sc, StationControl, 0x04000001); 1681 /* 1682 * XXX Try to mitigate interrupts. 1683 */ 1684 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1685 #ifdef notyet 1686 if (sc->sge_intrcontrol != 0) 1687 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1688 if (sc->sge_intrtimer != 0) 1689 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1690 #endif 1691 1692 /* 1693 * Clear and enable interrupts. 1694 */ 1695 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1696 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1697 1698 /* Enable receiver and transmitter. */ 1699 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1700 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1701 1702 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1703 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1704 1705 sc->sge_flags &= ~SGE_FLAG_LINK; 1706 mii_mediachg(mii); 1707 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1708 } 1709 1710 /* 1711 * Set media options. 1712 */ 1713 static int 1714 sge_ifmedia_upd(struct ifnet *ifp) 1715 { 1716 struct sge_softc *sc; 1717 struct mii_data *mii; 1718 struct mii_softc *miisc; 1719 int error; 1720 1721 sc = ifp->if_softc; 1722 SGE_LOCK(sc); 1723 mii = device_get_softc(sc->sge_miibus); 1724 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1725 PHY_RESET(miisc); 1726 error = mii_mediachg(mii); 1727 SGE_UNLOCK(sc); 1728 1729 return (error); 1730 } 1731 1732 /* 1733 * Report current media status. 1734 */ 1735 static void 1736 sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1737 { 1738 struct sge_softc *sc; 1739 struct mii_data *mii; 1740 1741 sc = ifp->if_softc; 1742 SGE_LOCK(sc); 1743 mii = device_get_softc(sc->sge_miibus); 1744 if ((ifp->if_flags & IFF_UP) == 0) { 1745 SGE_UNLOCK(sc); 1746 return; 1747 } 1748 mii_pollstat(mii); 1749 ifmr->ifm_active = mii->mii_media_active; 1750 ifmr->ifm_status = mii->mii_media_status; 1751 SGE_UNLOCK(sc); 1752 } 1753 1754 static int 1755 sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1756 { 1757 struct sge_softc *sc; 1758 struct ifreq *ifr; 1759 struct mii_data *mii; 1760 int error = 0, mask, reinit; 1761 1762 sc = ifp->if_softc; 1763 ifr = (struct ifreq *)data; 1764 1765 switch(command) { 1766 case SIOCSIFFLAGS: 1767 SGE_LOCK(sc); 1768 if ((ifp->if_flags & IFF_UP) != 0) { 1769 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1770 ((ifp->if_flags ^ sc->sge_if_flags) & 1771 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1772 sge_rxfilter(sc); 1773 else 1774 sge_init_locked(sc); 1775 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1776 sge_stop(sc); 1777 sc->sge_if_flags = ifp->if_flags; 1778 SGE_UNLOCK(sc); 1779 break; 1780 case SIOCSIFCAP: 1781 SGE_LOCK(sc); 1782 reinit = 0; 1783 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1784 if ((mask & IFCAP_TXCSUM) != 0 && 1785 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1786 ifp->if_capenable ^= IFCAP_TXCSUM; 1787 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1788 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1789 else 1790 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1791 } 1792 if ((mask & IFCAP_RXCSUM) != 0 && 1793 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1794 ifp->if_capenable ^= IFCAP_RXCSUM; 1795 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1796 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1797 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1798 if ((mask & IFCAP_TSO4) != 0 && 1799 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 1800 ifp->if_capenable ^= IFCAP_TSO4; 1801 if ((ifp->if_capenable & IFCAP_TSO4) != 0) 1802 ifp->if_hwassist |= CSUM_TSO; 1803 else 1804 ifp->if_hwassist &= ~CSUM_TSO; 1805 } 1806 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1807 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1808 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1809 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1810 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1811 /* 1812 * Due to unknown reason, toggling VLAN hardware 1813 * tagging require interface reinitialization. 1814 */ 1815 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1816 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1817 ifp->if_capenable &= 1818 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1819 reinit = 1; 1820 } 1821 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1822 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1823 sge_init_locked(sc); 1824 } 1825 SGE_UNLOCK(sc); 1826 VLAN_CAPABILITIES(ifp); 1827 break; 1828 case SIOCADDMULTI: 1829 case SIOCDELMULTI: 1830 SGE_LOCK(sc); 1831 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1832 sge_rxfilter(sc); 1833 SGE_UNLOCK(sc); 1834 break; 1835 case SIOCGIFMEDIA: 1836 case SIOCSIFMEDIA: 1837 mii = device_get_softc(sc->sge_miibus); 1838 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1839 break; 1840 default: 1841 error = ether_ioctl(ifp, command, data); 1842 break; 1843 } 1844 1845 return (error); 1846 } 1847 1848 static void 1849 sge_watchdog(struct sge_softc *sc) 1850 { 1851 struct ifnet *ifp; 1852 1853 SGE_LOCK_ASSERT(sc); 1854 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1855 return; 1856 1857 ifp = sc->sge_ifp; 1858 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1859 if (1 || bootverbose) 1860 device_printf(sc->sge_dev, 1861 "watchdog timeout (lost link)\n"); 1862 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1863 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1864 sge_init_locked(sc); 1865 return; 1866 } 1867 device_printf(sc->sge_dev, "watchdog timeout\n"); 1868 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1869 1870 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1871 sge_init_locked(sc); 1872 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1873 sge_start_locked(ifp); 1874 } 1875 1876 /* 1877 * Stop the adapter and free any mbufs allocated to the 1878 * RX and TX lists. 1879 */ 1880 static void 1881 sge_stop(struct sge_softc *sc) 1882 { 1883 struct ifnet *ifp; 1884 1885 ifp = sc->sge_ifp; 1886 1887 SGE_LOCK_ASSERT(sc); 1888 1889 sc->sge_timer = 0; 1890 callout_stop(&sc->sge_stat_ch); 1891 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1892 1893 CSR_WRITE_4(sc, IntrMask, 0); 1894 CSR_READ_4(sc, IntrMask); 1895 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1896 /* Stop TX/RX MAC. */ 1897 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1898 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1899 /* XXX Can we assume active DMA cycles gone? */ 1900 DELAY(2000); 1901 CSR_WRITE_4(sc, IntrMask, 0); 1902 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1903 1904 sc->sge_flags &= ~SGE_FLAG_LINK; 1905 sge_list_rx_free(sc); 1906 sge_list_tx_free(sc); 1907 } 1908