1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 5 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 6 * Copyright (c) 1997, 1998, 1999 7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Bill Paul. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 27 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 28 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 35 * OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 /* 42 * SiS 190/191 PCI Ethernet NIC driver. 43 * 44 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 45 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 46 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 47 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 48 * review and very useful comments. 49 * 50 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 51 * Linux and Solaris drivers. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/bus.h> 57 #include <sys/endian.h> 58 #include <sys/kernel.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/mbuf.h> 62 #include <sys/module.h> 63 #include <sys/mutex.h> 64 #include <sys/rman.h> 65 #include <sys/socket.h> 66 #include <sys/sockio.h> 67 68 #include <net/bpf.h> 69 #include <net/if.h> 70 #include <net/if_var.h> 71 #include <net/if_arp.h> 72 #include <net/ethernet.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 #include <net/if_types.h> 76 #include <net/if_vlan_var.h> 77 78 #include <netinet/in.h> 79 #include <netinet/in_systm.h> 80 #include <netinet/ip.h> 81 #include <netinet/tcp.h> 82 83 #include <machine/bus.h> 84 #include <machine/in_cksum.h> 85 86 #include <dev/mii/mii.h> 87 #include <dev/mii/miivar.h> 88 89 #include <dev/pci/pcireg.h> 90 #include <dev/pci/pcivar.h> 91 92 #include <dev/sge/if_sgereg.h> 93 94 MODULE_DEPEND(sge, pci, 1, 1, 1); 95 MODULE_DEPEND(sge, ether, 1, 1, 1); 96 MODULE_DEPEND(sge, miibus, 1, 1, 1); 97 98 /* "device miibus0" required. See GENERIC if you get errors here. */ 99 #include "miibus_if.h" 100 101 /* 102 * Various supported device vendors/types and their names. 103 */ 104 static struct sge_type sge_devs[] = { 105 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 106 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 107 { 0, 0, NULL } 108 }; 109 110 static int sge_probe(device_t); 111 static int sge_attach(device_t); 112 static int sge_detach(device_t); 113 static int sge_shutdown(device_t); 114 static int sge_suspend(device_t); 115 static int sge_resume(device_t); 116 117 static int sge_miibus_readreg(device_t, int, int); 118 static int sge_miibus_writereg(device_t, int, int, int); 119 static void sge_miibus_statchg(device_t); 120 121 static int sge_newbuf(struct sge_softc *, int); 122 static int sge_encap(struct sge_softc *, struct mbuf **); 123 static __inline void 124 sge_discard_rxbuf(struct sge_softc *, int); 125 static void sge_rxeof(struct sge_softc *); 126 static void sge_txeof(struct sge_softc *); 127 static void sge_intr(void *); 128 static void sge_tick(void *); 129 static void sge_start(if_t); 130 static void sge_start_locked(if_t); 131 static int sge_ioctl(if_t, u_long, caddr_t); 132 static void sge_init(void *); 133 static void sge_init_locked(struct sge_softc *); 134 static void sge_stop(struct sge_softc *); 135 static void sge_watchdog(struct sge_softc *); 136 static int sge_ifmedia_upd(if_t); 137 static void sge_ifmedia_sts(if_t, struct ifmediareq *); 138 139 static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 140 static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 141 static uint16_t sge_read_eeprom(struct sge_softc *, int); 142 143 static void sge_rxfilter(struct sge_softc *); 144 static void sge_setvlan(struct sge_softc *); 145 static void sge_reset(struct sge_softc *); 146 static int sge_list_rx_init(struct sge_softc *); 147 static int sge_list_rx_free(struct sge_softc *); 148 static int sge_list_tx_init(struct sge_softc *); 149 static int sge_list_tx_free(struct sge_softc *); 150 151 static int sge_dma_alloc(struct sge_softc *); 152 static void sge_dma_free(struct sge_softc *); 153 static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 154 155 static device_method_t sge_methods[] = { 156 /* Device interface */ 157 DEVMETHOD(device_probe, sge_probe), 158 DEVMETHOD(device_attach, sge_attach), 159 DEVMETHOD(device_detach, sge_detach), 160 DEVMETHOD(device_suspend, sge_suspend), 161 DEVMETHOD(device_resume, sge_resume), 162 DEVMETHOD(device_shutdown, sge_shutdown), 163 164 /* MII interface */ 165 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 166 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 167 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 168 169 DEVMETHOD_END 170 }; 171 172 static driver_t sge_driver = { 173 "sge", sge_methods, sizeof(struct sge_softc) 174 }; 175 176 DRIVER_MODULE(sge, pci, sge_driver, 0, 0); 177 DRIVER_MODULE(miibus, sge, miibus_driver, 0, 0); 178 179 /* 180 * Register space access macros. 181 */ 182 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 183 #define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 184 #define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 185 186 #define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 187 #define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 188 #define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 189 190 /* Define to show Tx/Rx error status. */ 191 #undef SGE_SHOW_ERRORS 192 193 #define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 194 195 static void 196 sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 197 { 198 bus_addr_t *p; 199 200 if (error != 0) 201 return; 202 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 203 p = arg; 204 *p = segs->ds_addr; 205 } 206 207 /* 208 * Read a sequence of words from the EEPROM. 209 */ 210 static uint16_t 211 sge_read_eeprom(struct sge_softc *sc, int offset) 212 { 213 uint32_t val; 214 int i; 215 216 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 217 CSR_WRITE_4(sc, ROMInterface, 218 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 219 DELAY(500); 220 for (i = 0; i < SGE_TIMEOUT; i++) { 221 val = CSR_READ_4(sc, ROMInterface); 222 if ((val & EI_REQ) == 0) 223 break; 224 DELAY(100); 225 } 226 if (i == SGE_TIMEOUT) { 227 device_printf(sc->sge_dev, 228 "EEPROM read timeout : 0x%08x\n", val); 229 return (0xffff); 230 } 231 232 return ((val & EI_DATA) >> EI_DATA_SHIFT); 233 } 234 235 static int 236 sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 237 { 238 uint16_t val; 239 int i; 240 241 val = sge_read_eeprom(sc, EEPROMSignature); 242 if (val == 0xffff || val == 0) { 243 device_printf(sc->sge_dev, 244 "invalid EEPROM signature : 0x%04x\n", val); 245 return (EINVAL); 246 } 247 248 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 249 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 250 dest[i + 0] = (uint8_t)val; 251 dest[i + 1] = (uint8_t)(val >> 8); 252 } 253 254 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 255 sc->sge_flags |= SGE_FLAG_RGMII; 256 return (0); 257 } 258 259 /* 260 * For SiS96x, APC CMOS RAM is used to store ethernet address. 261 * APC CMOS RAM is accessed through ISA bridge. 262 */ 263 static int 264 sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 265 { 266 #if defined(__amd64__) || defined(__i386__) 267 devclass_t pci; 268 device_t bus, dev = NULL; 269 device_t *kids; 270 struct apc_tbl { 271 uint16_t vid; 272 uint16_t did; 273 } *tp, apc_tbls[] = { 274 { SIS_VENDORID, 0x0965 }, 275 { SIS_VENDORID, 0x0966 }, 276 { SIS_VENDORID, 0x0968 } 277 }; 278 uint8_t reg; 279 int busnum, i, j, numkids; 280 281 pci = devclass_find("pci"); 282 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 283 bus = devclass_get_device(pci, busnum); 284 if (!bus) 285 continue; 286 if (device_get_children(bus, &kids, &numkids) != 0) 287 continue; 288 for (i = 0; i < numkids; i++) { 289 dev = kids[i]; 290 if (pci_get_class(dev) == PCIC_BRIDGE && 291 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 292 tp = apc_tbls; 293 for (j = 0; j < nitems(apc_tbls); j++) { 294 if (pci_get_vendor(dev) == tp->vid && 295 pci_get_device(dev) == tp->did) { 296 free(kids, M_TEMP); 297 goto apc_found; 298 } 299 tp++; 300 } 301 } 302 } 303 free(kids, M_TEMP); 304 } 305 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 306 return (EINVAL); 307 apc_found: 308 /* Enable port 0x78 and 0x79 to access APC registers. */ 309 reg = pci_read_config(dev, 0x48, 1); 310 pci_write_config(dev, 0x48, reg & ~0x02, 1); 311 DELAY(50); 312 pci_read_config(dev, 0x48, 1); 313 /* Read stored ethernet address. */ 314 for (i = 0; i < ETHER_ADDR_LEN; i++) { 315 outb(0x78, 0x09 + i); 316 dest[i] = inb(0x79); 317 } 318 outb(0x78, 0x12); 319 if ((inb(0x79) & 0x80) != 0) 320 sc->sge_flags |= SGE_FLAG_RGMII; 321 /* Restore access to APC registers. */ 322 pci_write_config(dev, 0x48, reg, 1); 323 324 return (0); 325 #else 326 return (EINVAL); 327 #endif 328 } 329 330 static int 331 sge_miibus_readreg(device_t dev, int phy, int reg) 332 { 333 struct sge_softc *sc; 334 uint32_t val; 335 int i; 336 337 sc = device_get_softc(dev); 338 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 339 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 340 DELAY(10); 341 for (i = 0; i < SGE_TIMEOUT; i++) { 342 val = CSR_READ_4(sc, GMIIControl); 343 if ((val & GMI_REQ) == 0) 344 break; 345 DELAY(10); 346 } 347 if (i == SGE_TIMEOUT) { 348 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 349 return (0); 350 } 351 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 352 } 353 354 static int 355 sge_miibus_writereg(device_t dev, int phy, int reg, int data) 356 { 357 struct sge_softc *sc; 358 uint32_t val; 359 int i; 360 361 sc = device_get_softc(dev); 362 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 363 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 364 GMI_OP_WR | GMI_REQ); 365 DELAY(10); 366 for (i = 0; i < SGE_TIMEOUT; i++) { 367 val = CSR_READ_4(sc, GMIIControl); 368 if ((val & GMI_REQ) == 0) 369 break; 370 DELAY(10); 371 } 372 if (i == SGE_TIMEOUT) 373 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 374 return (0); 375 } 376 377 static void 378 sge_miibus_statchg(device_t dev) 379 { 380 struct sge_softc *sc; 381 struct mii_data *mii; 382 if_t ifp; 383 uint32_t ctl, speed; 384 385 sc = device_get_softc(dev); 386 mii = device_get_softc(sc->sge_miibus); 387 ifp = sc->sge_ifp; 388 if (mii == NULL || ifp == NULL || 389 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 390 return; 391 speed = 0; 392 sc->sge_flags &= ~SGE_FLAG_LINK; 393 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 394 (IFM_ACTIVE | IFM_AVALID)) { 395 switch (IFM_SUBTYPE(mii->mii_media_active)) { 396 case IFM_10_T: 397 sc->sge_flags |= SGE_FLAG_LINK; 398 speed = SC_SPEED_10; 399 break; 400 case IFM_100_TX: 401 sc->sge_flags |= SGE_FLAG_LINK; 402 speed = SC_SPEED_100; 403 break; 404 case IFM_1000_T: 405 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 406 sc->sge_flags |= SGE_FLAG_LINK; 407 speed = SC_SPEED_1000; 408 } 409 break; 410 default: 411 break; 412 } 413 } 414 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 415 return; 416 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 417 ctl = CSR_READ_4(sc, StationControl); 418 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 419 if (speed == SC_SPEED_1000) { 420 ctl |= 0x07000000; 421 sc->sge_flags |= SGE_FLAG_SPEED_1000; 422 } else { 423 ctl |= 0x04000000; 424 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 425 } 426 #ifdef notyet 427 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 428 ctl |= 0x03000000; 429 #endif 430 ctl |= speed; 431 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 432 ctl |= SC_FDX; 433 sc->sge_flags |= SGE_FLAG_FDX; 434 } else 435 sc->sge_flags &= ~SGE_FLAG_FDX; 436 CSR_WRITE_4(sc, StationControl, ctl); 437 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 438 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 439 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 440 } 441 } 442 443 static u_int 444 sge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int count) 445 { 446 uint32_t crc, *hashes = arg; 447 448 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 449 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 450 451 return (1); 452 } 453 454 static void 455 sge_rxfilter(struct sge_softc *sc) 456 { 457 if_t ifp; 458 uint32_t hashes[2]; 459 uint16_t rxfilt; 460 461 SGE_LOCK_ASSERT(sc); 462 463 ifp = sc->sge_ifp; 464 rxfilt = CSR_READ_2(sc, RxMacControl); 465 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 466 rxfilt |= AcceptMyPhys; 467 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 468 rxfilt |= AcceptBroadcast; 469 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 470 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 471 rxfilt |= AcceptAllPhys; 472 rxfilt |= AcceptMulticast; 473 hashes[0] = 0xFFFFFFFF; 474 hashes[1] = 0xFFFFFFFF; 475 } else { 476 rxfilt |= AcceptMulticast; 477 hashes[0] = hashes[1] = 0; 478 /* Now program new ones. */ 479 if_foreach_llmaddr(ifp, sge_hash_maddr, hashes); 480 } 481 CSR_WRITE_2(sc, RxMacControl, rxfilt); 482 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 483 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 484 } 485 486 static void 487 sge_setvlan(struct sge_softc *sc) 488 { 489 if_t ifp; 490 uint16_t rxfilt; 491 492 SGE_LOCK_ASSERT(sc); 493 494 ifp = sc->sge_ifp; 495 if ((if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 496 return; 497 rxfilt = CSR_READ_2(sc, RxMacControl); 498 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 499 rxfilt |= RXMAC_STRIP_VLAN; 500 else 501 rxfilt &= ~RXMAC_STRIP_VLAN; 502 CSR_WRITE_2(sc, RxMacControl, rxfilt); 503 } 504 505 static void 506 sge_reset(struct sge_softc *sc) 507 { 508 509 CSR_WRITE_4(sc, IntrMask, 0); 510 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 511 512 /* Soft reset. */ 513 CSR_WRITE_4(sc, IntrControl, 0x8000); 514 CSR_READ_4(sc, IntrControl); 515 DELAY(100); 516 CSR_WRITE_4(sc, IntrControl, 0); 517 /* Stop MAC. */ 518 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 519 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 520 521 CSR_WRITE_4(sc, IntrMask, 0); 522 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 523 524 CSR_WRITE_4(sc, GMIIControl, 0); 525 } 526 527 /* 528 * Probe for an SiS chip. Check the PCI vendor and device 529 * IDs against our list and return a device name if we find a match. 530 */ 531 static int 532 sge_probe(device_t dev) 533 { 534 struct sge_type *t; 535 536 t = sge_devs; 537 while (t->sge_name != NULL) { 538 if ((pci_get_vendor(dev) == t->sge_vid) && 539 (pci_get_device(dev) == t->sge_did)) { 540 device_set_desc(dev, t->sge_name); 541 return (BUS_PROBE_DEFAULT); 542 } 543 t++; 544 } 545 546 return (ENXIO); 547 } 548 549 /* 550 * Attach the interface. Allocate softc structures, do ifmedia 551 * setup and ethernet/BPF attach. 552 */ 553 static int 554 sge_attach(device_t dev) 555 { 556 struct sge_softc *sc; 557 if_t ifp; 558 uint8_t eaddr[ETHER_ADDR_LEN]; 559 int error = 0, rid; 560 561 sc = device_get_softc(dev); 562 sc->sge_dev = dev; 563 564 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 565 MTX_DEF); 566 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 567 568 /* 569 * Map control/status registers. 570 */ 571 pci_enable_busmaster(dev); 572 573 /* Allocate resources. */ 574 sc->sge_res_id = PCIR_BAR(0); 575 sc->sge_res_type = SYS_RES_MEMORY; 576 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 577 &sc->sge_res_id, RF_ACTIVE); 578 if (sc->sge_res == NULL) { 579 device_printf(dev, "couldn't allocate resource\n"); 580 error = ENXIO; 581 goto fail; 582 } 583 584 rid = 0; 585 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 586 RF_SHAREABLE | RF_ACTIVE); 587 if (sc->sge_irq == NULL) { 588 device_printf(dev, "couldn't allocate IRQ resources\n"); 589 error = ENXIO; 590 goto fail; 591 } 592 sc->sge_rev = pci_get_revid(dev); 593 if (pci_get_device(dev) == SIS_DEVICEID_190) 594 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 595 /* Reset the adapter. */ 596 sge_reset(sc); 597 598 /* Get MAC address from the EEPROM. */ 599 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 600 sge_get_mac_addr_apc(sc, eaddr); 601 else 602 sge_get_mac_addr_eeprom(sc, eaddr); 603 604 if ((error = sge_dma_alloc(sc)) != 0) 605 goto fail; 606 607 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 608 if (ifp == NULL) { 609 device_printf(dev, "cannot allocate ifnet structure.\n"); 610 error = ENOSPC; 611 goto fail; 612 } 613 if_setsoftc(ifp, sc); 614 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 615 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 616 if_setioctlfn(ifp, sge_ioctl); 617 if_setstartfn(ifp, sge_start); 618 if_setinitfn(ifp, sge_init); 619 if_setsendqlen(ifp, SGE_TX_RING_CNT - 1); 620 if_setsendqready(ifp); 621 if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4); 622 if_sethwassist(ifp, SGE_CSUM_FEATURES | CSUM_TSO); 623 if_setcapenable(ifp, if_getcapabilities(ifp)); 624 /* 625 * Do MII setup. 626 */ 627 error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd, 628 sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 629 if (error != 0) { 630 device_printf(dev, "attaching PHYs failed\n"); 631 goto fail; 632 } 633 634 /* 635 * Call MI attach routine. 636 */ 637 ether_ifattach(ifp, eaddr); 638 639 /* VLAN setup. */ 640 if_setcapabilities(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 641 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU); 642 if_setcapenable(ifp, if_getcapabilities(ifp)); 643 /* Tell the upper layer(s) we support long frames. */ 644 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 645 646 /* Hook interrupt last to avoid having to lock softc */ 647 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 648 NULL, sge_intr, sc, &sc->sge_intrhand); 649 if (error) { 650 device_printf(dev, "couldn't set up irq\n"); 651 ether_ifdetach(ifp); 652 goto fail; 653 } 654 655 fail: 656 if (error) 657 sge_detach(dev); 658 659 return (error); 660 } 661 662 /* 663 * Shutdown hardware and free up resources. This can be called any 664 * time after the mutex has been initialized. It is called in both 665 * the error case in attach and the normal detach case so it needs 666 * to be careful about only freeing resources that have actually been 667 * allocated. 668 */ 669 static int 670 sge_detach(device_t dev) 671 { 672 struct sge_softc *sc; 673 if_t ifp; 674 675 sc = device_get_softc(dev); 676 ifp = sc->sge_ifp; 677 /* These should only be active if attach succeeded. */ 678 if (device_is_attached(dev)) { 679 ether_ifdetach(ifp); 680 SGE_LOCK(sc); 681 sge_stop(sc); 682 SGE_UNLOCK(sc); 683 callout_drain(&sc->sge_stat_ch); 684 } 685 if (sc->sge_miibus) 686 device_delete_child(dev, sc->sge_miibus); 687 bus_generic_detach(dev); 688 689 if (sc->sge_intrhand) 690 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 691 if (sc->sge_irq) 692 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 693 if (sc->sge_res) 694 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 695 sc->sge_res); 696 if (ifp) 697 if_free(ifp); 698 sge_dma_free(sc); 699 mtx_destroy(&sc->sge_mtx); 700 701 return (0); 702 } 703 704 /* 705 * Stop all chip I/O so that the kernel's probe routines don't 706 * get confused by errant DMAs when rebooting. 707 */ 708 static int 709 sge_shutdown(device_t dev) 710 { 711 struct sge_softc *sc; 712 713 sc = device_get_softc(dev); 714 SGE_LOCK(sc); 715 sge_stop(sc); 716 SGE_UNLOCK(sc); 717 return (0); 718 } 719 720 static int 721 sge_suspend(device_t dev) 722 { 723 struct sge_softc *sc; 724 if_t ifp; 725 726 sc = device_get_softc(dev); 727 SGE_LOCK(sc); 728 ifp = sc->sge_ifp; 729 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 730 sge_stop(sc); 731 SGE_UNLOCK(sc); 732 return (0); 733 } 734 735 static int 736 sge_resume(device_t dev) 737 { 738 struct sge_softc *sc; 739 if_t ifp; 740 741 sc = device_get_softc(dev); 742 SGE_LOCK(sc); 743 ifp = sc->sge_ifp; 744 if ((if_getflags(ifp) & IFF_UP) != 0) 745 sge_init_locked(sc); 746 SGE_UNLOCK(sc); 747 return (0); 748 } 749 750 static int 751 sge_dma_alloc(struct sge_softc *sc) 752 { 753 struct sge_chain_data *cd; 754 struct sge_list_data *ld; 755 struct sge_rxdesc *rxd; 756 struct sge_txdesc *txd; 757 int error, i; 758 759 cd = &sc->sge_cdata; 760 ld = &sc->sge_ldata; 761 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 762 1, 0, /* alignment, boundary */ 763 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 764 BUS_SPACE_MAXADDR, /* highaddr */ 765 NULL, NULL, /* filter, filterarg */ 766 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 767 1, /* nsegments */ 768 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 769 0, /* flags */ 770 NULL, /* lockfunc */ 771 NULL, /* lockarg */ 772 &cd->sge_tag); 773 if (error != 0) { 774 device_printf(sc->sge_dev, 775 "could not create parent DMA tag.\n"); 776 goto fail; 777 } 778 779 /* RX descriptor ring */ 780 error = bus_dma_tag_create(cd->sge_tag, 781 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 782 BUS_SPACE_MAXADDR, /* lowaddr */ 783 BUS_SPACE_MAXADDR, /* highaddr */ 784 NULL, NULL, /* filter, filterarg */ 785 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 786 SGE_RX_RING_SZ, /* maxsegsize */ 787 0, /* flags */ 788 NULL, /* lockfunc */ 789 NULL, /* lockarg */ 790 &cd->sge_rx_tag); 791 if (error != 0) { 792 device_printf(sc->sge_dev, 793 "could not create Rx ring DMA tag.\n"); 794 goto fail; 795 } 796 /* Allocate DMA'able memory and load DMA map for RX ring. */ 797 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 798 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 799 &cd->sge_rx_dmamap); 800 if (error != 0) { 801 device_printf(sc->sge_dev, 802 "could not allocate DMA'able memory for Rx ring.\n"); 803 goto fail; 804 } 805 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 806 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 807 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 808 if (error != 0) { 809 device_printf(sc->sge_dev, 810 "could not load DMA'able memory for Rx ring.\n"); 811 } 812 813 /* TX descriptor ring */ 814 error = bus_dma_tag_create(cd->sge_tag, 815 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 816 BUS_SPACE_MAXADDR, /* lowaddr */ 817 BUS_SPACE_MAXADDR, /* highaddr */ 818 NULL, NULL, /* filter, filterarg */ 819 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 820 SGE_TX_RING_SZ, /* maxsegsize */ 821 0, /* flags */ 822 NULL, /* lockfunc */ 823 NULL, /* lockarg */ 824 &cd->sge_tx_tag); 825 if (error != 0) { 826 device_printf(sc->sge_dev, 827 "could not create Rx ring DMA tag.\n"); 828 goto fail; 829 } 830 /* Allocate DMA'able memory and load DMA map for TX ring. */ 831 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 832 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 833 &cd->sge_tx_dmamap); 834 if (error != 0) { 835 device_printf(sc->sge_dev, 836 "could not allocate DMA'able memory for Tx ring.\n"); 837 goto fail; 838 } 839 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 840 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 841 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 842 if (error != 0) { 843 device_printf(sc->sge_dev, 844 "could not load DMA'able memory for Rx ring.\n"); 845 goto fail; 846 } 847 848 /* Create DMA tag for Tx buffers. */ 849 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 850 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 851 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 852 if (error != 0) { 853 device_printf(sc->sge_dev, 854 "could not create Tx mbuf DMA tag.\n"); 855 goto fail; 856 } 857 858 /* Create DMA tag for Rx buffers. */ 859 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 860 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 861 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 862 if (error != 0) { 863 device_printf(sc->sge_dev, 864 "could not create Rx mbuf DMA tag.\n"); 865 goto fail; 866 } 867 868 /* Create DMA maps for Tx buffers. */ 869 for (i = 0; i < SGE_TX_RING_CNT; i++) { 870 txd = &cd->sge_txdesc[i]; 871 txd->tx_m = NULL; 872 txd->tx_dmamap = NULL; 873 txd->tx_ndesc = 0; 874 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 875 &txd->tx_dmamap); 876 if (error != 0) { 877 device_printf(sc->sge_dev, 878 "could not create Tx DMA map.\n"); 879 goto fail; 880 } 881 } 882 /* Create spare DMA map for Rx buffer. */ 883 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 884 if (error != 0) { 885 device_printf(sc->sge_dev, 886 "could not create spare Rx DMA map.\n"); 887 goto fail; 888 } 889 /* Create DMA maps for Rx buffers. */ 890 for (i = 0; i < SGE_RX_RING_CNT; i++) { 891 rxd = &cd->sge_rxdesc[i]; 892 rxd->rx_m = NULL; 893 rxd->rx_dmamap = NULL; 894 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 895 &rxd->rx_dmamap); 896 if (error) { 897 device_printf(sc->sge_dev, 898 "could not create Rx DMA map.\n"); 899 goto fail; 900 } 901 } 902 fail: 903 return (error); 904 } 905 906 static void 907 sge_dma_free(struct sge_softc *sc) 908 { 909 struct sge_chain_data *cd; 910 struct sge_list_data *ld; 911 struct sge_rxdesc *rxd; 912 struct sge_txdesc *txd; 913 int i; 914 915 cd = &sc->sge_cdata; 916 ld = &sc->sge_ldata; 917 /* Rx ring. */ 918 if (cd->sge_rx_tag != NULL) { 919 if (ld->sge_rx_paddr != 0) 920 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 921 if (ld->sge_rx_ring != NULL) 922 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 923 cd->sge_rx_dmamap); 924 ld->sge_rx_ring = NULL; 925 ld->sge_rx_paddr = 0; 926 bus_dma_tag_destroy(cd->sge_rx_tag); 927 cd->sge_rx_tag = NULL; 928 } 929 /* Tx ring. */ 930 if (cd->sge_tx_tag != NULL) { 931 if (ld->sge_tx_paddr != 0) 932 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 933 if (ld->sge_tx_ring != NULL) 934 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 935 cd->sge_tx_dmamap); 936 ld->sge_tx_ring = NULL; 937 ld->sge_tx_paddr = 0; 938 bus_dma_tag_destroy(cd->sge_tx_tag); 939 cd->sge_tx_tag = NULL; 940 } 941 /* Rx buffers. */ 942 if (cd->sge_rxmbuf_tag != NULL) { 943 for (i = 0; i < SGE_RX_RING_CNT; i++) { 944 rxd = &cd->sge_rxdesc[i]; 945 if (rxd->rx_dmamap != NULL) { 946 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 947 rxd->rx_dmamap); 948 rxd->rx_dmamap = NULL; 949 } 950 } 951 if (cd->sge_rx_spare_map != NULL) { 952 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 953 cd->sge_rx_spare_map); 954 cd->sge_rx_spare_map = NULL; 955 } 956 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 957 cd->sge_rxmbuf_tag = NULL; 958 } 959 /* Tx buffers. */ 960 if (cd->sge_txmbuf_tag != NULL) { 961 for (i = 0; i < SGE_TX_RING_CNT; i++) { 962 txd = &cd->sge_txdesc[i]; 963 if (txd->tx_dmamap != NULL) { 964 bus_dmamap_destroy(cd->sge_txmbuf_tag, 965 txd->tx_dmamap); 966 txd->tx_dmamap = NULL; 967 } 968 } 969 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 970 cd->sge_txmbuf_tag = NULL; 971 } 972 if (cd->sge_tag != NULL) 973 bus_dma_tag_destroy(cd->sge_tag); 974 cd->sge_tag = NULL; 975 } 976 977 /* 978 * Initialize the TX descriptors. 979 */ 980 static int 981 sge_list_tx_init(struct sge_softc *sc) 982 { 983 struct sge_list_data *ld; 984 struct sge_chain_data *cd; 985 986 SGE_LOCK_ASSERT(sc); 987 ld = &sc->sge_ldata; 988 cd = &sc->sge_cdata; 989 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 990 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 991 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 992 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 993 cd->sge_tx_prod = 0; 994 cd->sge_tx_cons = 0; 995 cd->sge_tx_cnt = 0; 996 return (0); 997 } 998 999 static int 1000 sge_list_tx_free(struct sge_softc *sc) 1001 { 1002 struct sge_chain_data *cd; 1003 struct sge_txdesc *txd; 1004 int i; 1005 1006 SGE_LOCK_ASSERT(sc); 1007 cd = &sc->sge_cdata; 1008 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1009 txd = &cd->sge_txdesc[i]; 1010 if (txd->tx_m != NULL) { 1011 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1012 BUS_DMASYNC_POSTWRITE); 1013 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1014 m_freem(txd->tx_m); 1015 txd->tx_m = NULL; 1016 txd->tx_ndesc = 0; 1017 } 1018 } 1019 1020 return (0); 1021 } 1022 1023 /* 1024 * Initialize the RX descriptors and allocate mbufs for them. Note that 1025 * we arrange the descriptors in a closed ring, so that the last descriptor 1026 * has RING_END flag set. 1027 */ 1028 static int 1029 sge_list_rx_init(struct sge_softc *sc) 1030 { 1031 struct sge_chain_data *cd; 1032 int i; 1033 1034 SGE_LOCK_ASSERT(sc); 1035 cd = &sc->sge_cdata; 1036 cd->sge_rx_cons = 0; 1037 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1038 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1039 if (sge_newbuf(sc, i) != 0) 1040 return (ENOBUFS); 1041 } 1042 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1043 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1044 return (0); 1045 } 1046 1047 static int 1048 sge_list_rx_free(struct sge_softc *sc) 1049 { 1050 struct sge_chain_data *cd; 1051 struct sge_rxdesc *rxd; 1052 int i; 1053 1054 SGE_LOCK_ASSERT(sc); 1055 cd = &sc->sge_cdata; 1056 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1057 rxd = &cd->sge_rxdesc[i]; 1058 if (rxd->rx_m != NULL) { 1059 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1060 BUS_DMASYNC_POSTREAD); 1061 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1062 rxd->rx_dmamap); 1063 m_freem(rxd->rx_m); 1064 rxd->rx_m = NULL; 1065 } 1066 } 1067 return (0); 1068 } 1069 1070 /* 1071 * Initialize an RX descriptor and attach an MBUF cluster. 1072 */ 1073 static int 1074 sge_newbuf(struct sge_softc *sc, int prod) 1075 { 1076 struct mbuf *m; 1077 struct sge_desc *desc; 1078 struct sge_chain_data *cd; 1079 struct sge_rxdesc *rxd; 1080 bus_dma_segment_t segs[1]; 1081 bus_dmamap_t map; 1082 int error, nsegs; 1083 1084 SGE_LOCK_ASSERT(sc); 1085 1086 cd = &sc->sge_cdata; 1087 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1088 if (m == NULL) 1089 return (ENOBUFS); 1090 m->m_len = m->m_pkthdr.len = MCLBYTES; 1091 m_adj(m, SGE_RX_BUF_ALIGN); 1092 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1093 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1094 if (error != 0) { 1095 m_freem(m); 1096 return (error); 1097 } 1098 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1099 rxd = &cd->sge_rxdesc[prod]; 1100 if (rxd->rx_m != NULL) { 1101 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1102 BUS_DMASYNC_POSTREAD); 1103 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1104 } 1105 map = rxd->rx_dmamap; 1106 rxd->rx_dmamap = cd->sge_rx_spare_map; 1107 cd->sge_rx_spare_map = map; 1108 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1109 BUS_DMASYNC_PREREAD); 1110 rxd->rx_m = m; 1111 1112 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1113 desc->sge_sts_size = 0; 1114 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1115 desc->sge_flags = htole32(segs[0].ds_len); 1116 if (prod == SGE_RX_RING_CNT - 1) 1117 desc->sge_flags |= htole32(RING_END); 1118 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1119 return (0); 1120 } 1121 1122 static __inline void 1123 sge_discard_rxbuf(struct sge_softc *sc, int index) 1124 { 1125 struct sge_desc *desc; 1126 1127 desc = &sc->sge_ldata.sge_rx_ring[index]; 1128 desc->sge_sts_size = 0; 1129 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1130 if (index == SGE_RX_RING_CNT - 1) 1131 desc->sge_flags |= htole32(RING_END); 1132 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1133 } 1134 1135 /* 1136 * A frame has been uploaded: pass the resulting mbuf chain up to 1137 * the higher level protocols. 1138 */ 1139 static void 1140 sge_rxeof(struct sge_softc *sc) 1141 { 1142 if_t ifp; 1143 struct mbuf *m; 1144 struct sge_chain_data *cd; 1145 struct sge_desc *cur_rx; 1146 uint32_t rxinfo, rxstat; 1147 int cons, prog; 1148 1149 SGE_LOCK_ASSERT(sc); 1150 1151 ifp = sc->sge_ifp; 1152 cd = &sc->sge_cdata; 1153 1154 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1155 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1156 cons = cd->sge_rx_cons; 1157 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1158 SGE_INC(cons, SGE_RX_RING_CNT)) { 1159 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1160 break; 1161 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1162 rxinfo = le32toh(cur_rx->sge_cmdsts); 1163 if ((rxinfo & RDC_OWN) != 0) 1164 break; 1165 rxstat = le32toh(cur_rx->sge_sts_size); 1166 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1167 SGE_RX_NSEGS(rxstat) != 1) { 1168 /* XXX We don't support multi-segment frames yet. */ 1169 #ifdef SGE_SHOW_ERRORS 1170 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1171 RX_ERR_BITS); 1172 #endif 1173 sge_discard_rxbuf(sc, cons); 1174 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1175 continue; 1176 } 1177 m = cd->sge_rxdesc[cons].rx_m; 1178 if (sge_newbuf(sc, cons) != 0) { 1179 sge_discard_rxbuf(sc, cons); 1180 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1181 continue; 1182 } 1183 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 1184 if ((rxinfo & RDC_IP_CSUM) != 0 && 1185 (rxinfo & RDC_IP_CSUM_OK) != 0) 1186 m->m_pkthdr.csum_flags |= 1187 CSUM_IP_CHECKED | CSUM_IP_VALID; 1188 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1189 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1190 ((rxinfo & RDC_UDP_CSUM) != 0 && 1191 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1192 m->m_pkthdr.csum_flags |= 1193 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1194 m->m_pkthdr.csum_data = 0xffff; 1195 } 1196 } 1197 /* Check for VLAN tagged frame. */ 1198 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 && 1199 (rxstat & RDS_VLAN) != 0) { 1200 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1201 m->m_flags |= M_VLANTAG; 1202 } 1203 /* 1204 * Account for 10bytes auto padding which is used 1205 * to align IP header on 32bit boundary. Also note, 1206 * CRC bytes is automatically removed by the 1207 * hardware. 1208 */ 1209 m->m_data += SGE_RX_PAD_BYTES; 1210 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1211 SGE_RX_PAD_BYTES; 1212 m->m_pkthdr.rcvif = ifp; 1213 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1214 SGE_UNLOCK(sc); 1215 if_input(ifp, m); 1216 SGE_LOCK(sc); 1217 } 1218 1219 if (prog > 0) { 1220 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1221 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1222 cd->sge_rx_cons = cons; 1223 } 1224 } 1225 1226 /* 1227 * A frame was downloaded to the chip. It's safe for us to clean up 1228 * the list buffers. 1229 */ 1230 static void 1231 sge_txeof(struct sge_softc *sc) 1232 { 1233 if_t ifp; 1234 struct sge_list_data *ld; 1235 struct sge_chain_data *cd; 1236 struct sge_txdesc *txd; 1237 uint32_t txstat; 1238 int cons, nsegs, prod; 1239 1240 SGE_LOCK_ASSERT(sc); 1241 1242 ifp = sc->sge_ifp; 1243 ld = &sc->sge_ldata; 1244 cd = &sc->sge_cdata; 1245 1246 if (cd->sge_tx_cnt == 0) 1247 return; 1248 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1249 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1250 cons = cd->sge_tx_cons; 1251 prod = cd->sge_tx_prod; 1252 for (; cons != prod;) { 1253 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1254 if ((txstat & TDC_OWN) != 0) 1255 break; 1256 /* 1257 * Only the first descriptor of multi-descriptor transmission 1258 * is updated by controller. Driver should skip entire 1259 * chained buffers for the transmitted frame. In other words 1260 * TDC_OWN bit is valid only at the first descriptor of a 1261 * multi-descriptor transmission. 1262 */ 1263 if (SGE_TX_ERROR(txstat) != 0) { 1264 #ifdef SGE_SHOW_ERRORS 1265 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1266 txstat, TX_ERR_BITS); 1267 #endif 1268 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1269 } else { 1270 #ifdef notyet 1271 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0xFFFF) - 1); 1272 #endif 1273 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1274 } 1275 txd = &cd->sge_txdesc[cons]; 1276 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1277 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1278 SGE_INC(cons, SGE_TX_RING_CNT); 1279 } 1280 /* Reclaim transmitted mbuf. */ 1281 KASSERT(txd->tx_m != NULL, 1282 ("%s: freeing NULL mbuf\n", __func__)); 1283 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1284 BUS_DMASYNC_POSTWRITE); 1285 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1286 m_freem(txd->tx_m); 1287 txd->tx_m = NULL; 1288 cd->sge_tx_cnt -= txd->tx_ndesc; 1289 KASSERT(cd->sge_tx_cnt >= 0, 1290 ("%s: Active Tx desc counter was garbled\n", __func__)); 1291 txd->tx_ndesc = 0; 1292 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1293 } 1294 cd->sge_tx_cons = cons; 1295 if (cd->sge_tx_cnt == 0) 1296 sc->sge_timer = 0; 1297 } 1298 1299 static void 1300 sge_tick(void *arg) 1301 { 1302 struct sge_softc *sc; 1303 struct mii_data *mii; 1304 if_t ifp; 1305 1306 sc = arg; 1307 SGE_LOCK_ASSERT(sc); 1308 1309 ifp = sc->sge_ifp; 1310 mii = device_get_softc(sc->sge_miibus); 1311 mii_tick(mii); 1312 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1313 sge_miibus_statchg(sc->sge_dev); 1314 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1315 !if_sendq_empty(ifp)) 1316 sge_start_locked(ifp); 1317 } 1318 /* 1319 * Reclaim transmitted frames here as we do not request 1320 * Tx completion interrupt for every queued frames to 1321 * reduce excessive interrupts. 1322 */ 1323 sge_txeof(sc); 1324 sge_watchdog(sc); 1325 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1326 } 1327 1328 static void 1329 sge_intr(void *arg) 1330 { 1331 struct sge_softc *sc; 1332 if_t ifp; 1333 uint32_t status; 1334 1335 sc = arg; 1336 SGE_LOCK(sc); 1337 ifp = sc->sge_ifp; 1338 1339 status = CSR_READ_4(sc, IntrStatus); 1340 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1341 /* Not ours. */ 1342 SGE_UNLOCK(sc); 1343 return; 1344 } 1345 /* Acknowledge interrupts. */ 1346 CSR_WRITE_4(sc, IntrStatus, status); 1347 /* Disable further interrupts. */ 1348 CSR_WRITE_4(sc, IntrMask, 0); 1349 /* 1350 * It seems the controller supports some kind of interrupt 1351 * moderation mechanism but we still don't know how to 1352 * enable that. To reduce number of generated interrupts 1353 * under load we check pending interrupts in a loop. This 1354 * will increase number of register access and is not correct 1355 * way to handle interrupt moderation but there seems to be 1356 * no other way at this time. 1357 */ 1358 for (;;) { 1359 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1360 break; 1361 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1362 sge_rxeof(sc); 1363 /* Wakeup Rx MAC. */ 1364 if ((status & INTR_RX_IDLE) != 0) 1365 CSR_WRITE_4(sc, RX_CTL, 1366 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1367 } 1368 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1369 sge_txeof(sc); 1370 status = CSR_READ_4(sc, IntrStatus); 1371 if ((status & SGE_INTRS) == 0) 1372 break; 1373 /* Acknowledge interrupts. */ 1374 CSR_WRITE_4(sc, IntrStatus, status); 1375 } 1376 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1377 /* Re-enable interrupts */ 1378 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1379 if (!if_sendq_empty(ifp)) 1380 sge_start_locked(ifp); 1381 } 1382 SGE_UNLOCK(sc); 1383 } 1384 1385 /* 1386 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1387 * pointers to the fragment pointers. 1388 */ 1389 static int 1390 sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1391 { 1392 struct mbuf *m; 1393 struct sge_desc *desc; 1394 struct sge_txdesc *txd; 1395 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1396 uint32_t cflags, mss; 1397 int error, i, nsegs, prod, si; 1398 1399 SGE_LOCK_ASSERT(sc); 1400 1401 si = prod = sc->sge_cdata.sge_tx_prod; 1402 txd = &sc->sge_cdata.sge_txdesc[prod]; 1403 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1404 struct ether_header *eh; 1405 struct ip *ip; 1406 struct tcphdr *tcp; 1407 uint32_t ip_off, poff; 1408 1409 if (M_WRITABLE(*m_head) == 0) { 1410 /* Get a writable copy. */ 1411 m = m_dup(*m_head, M_NOWAIT); 1412 m_freem(*m_head); 1413 if (m == NULL) { 1414 *m_head = NULL; 1415 return (ENOBUFS); 1416 } 1417 *m_head = m; 1418 } 1419 ip_off = sizeof(struct ether_header); 1420 m = m_pullup(*m_head, ip_off); 1421 if (m == NULL) { 1422 *m_head = NULL; 1423 return (ENOBUFS); 1424 } 1425 eh = mtod(m, struct ether_header *); 1426 /* Check the existence of VLAN tag. */ 1427 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1428 ip_off = sizeof(struct ether_vlan_header); 1429 m = m_pullup(m, ip_off); 1430 if (m == NULL) { 1431 *m_head = NULL; 1432 return (ENOBUFS); 1433 } 1434 } 1435 m = m_pullup(m, ip_off + sizeof(struct ip)); 1436 if (m == NULL) { 1437 *m_head = NULL; 1438 return (ENOBUFS); 1439 } 1440 ip = (struct ip *)(mtod(m, char *) + ip_off); 1441 poff = ip_off + (ip->ip_hl << 2); 1442 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1443 if (m == NULL) { 1444 *m_head = NULL; 1445 return (ENOBUFS); 1446 } 1447 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1448 m = m_pullup(m, poff + (tcp->th_off << 2)); 1449 if (m == NULL) { 1450 *m_head = NULL; 1451 return (ENOBUFS); 1452 } 1453 /* 1454 * Reset IP checksum and recompute TCP pseudo 1455 * checksum that NDIS specification requires. 1456 */ 1457 ip = (struct ip *)(mtod(m, char *) + ip_off); 1458 ip->ip_sum = 0; 1459 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1460 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1461 htons(IPPROTO_TCP)); 1462 *m_head = m; 1463 } 1464 1465 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1466 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1467 if (error == EFBIG) { 1468 m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS); 1469 if (m == NULL) { 1470 m_freem(*m_head); 1471 *m_head = NULL; 1472 return (ENOBUFS); 1473 } 1474 *m_head = m; 1475 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1476 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1477 if (error != 0) { 1478 m_freem(*m_head); 1479 *m_head = NULL; 1480 return (error); 1481 } 1482 } else if (error != 0) 1483 return (error); 1484 1485 KASSERT(nsegs != 0, ("zero segment returned")); 1486 /* Check descriptor overrun. */ 1487 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1488 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1489 return (ENOBUFS); 1490 } 1491 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1492 BUS_DMASYNC_PREWRITE); 1493 1494 m = *m_head; 1495 cflags = 0; 1496 mss = 0; 1497 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1498 cflags |= TDC_LS; 1499 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1500 mss <<= 16; 1501 } else { 1502 if (m->m_pkthdr.csum_flags & CSUM_IP) 1503 cflags |= TDC_IP_CSUM; 1504 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1505 cflags |= TDC_TCP_CSUM; 1506 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1507 cflags |= TDC_UDP_CSUM; 1508 } 1509 for (i = 0; i < nsegs; i++) { 1510 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1511 if (i == 0) { 1512 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1513 desc->sge_cmdsts = 0; 1514 } else { 1515 desc->sge_sts_size = 0; 1516 desc->sge_cmdsts = htole32(TDC_OWN); 1517 } 1518 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1519 desc->sge_flags = htole32(txsegs[i].ds_len); 1520 if (prod == SGE_TX_RING_CNT - 1) 1521 desc->sge_flags |= htole32(RING_END); 1522 sc->sge_cdata.sge_tx_cnt++; 1523 SGE_INC(prod, SGE_TX_RING_CNT); 1524 } 1525 /* Update producer index. */ 1526 sc->sge_cdata.sge_tx_prod = prod; 1527 1528 desc = &sc->sge_ldata.sge_tx_ring[si]; 1529 /* Configure VLAN. */ 1530 if((m->m_flags & M_VLANTAG) != 0) { 1531 cflags |= m->m_pkthdr.ether_vtag; 1532 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1533 } 1534 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1535 #if 1 1536 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1537 desc->sge_cmdsts |= htole32(TDC_BST); 1538 #else 1539 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1540 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1541 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1542 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1543 } 1544 #endif 1545 /* Request interrupt and give ownership to controller. */ 1546 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1547 txd->tx_m = m; 1548 txd->tx_ndesc = nsegs; 1549 return (0); 1550 } 1551 1552 static void 1553 sge_start(if_t ifp) 1554 { 1555 struct sge_softc *sc; 1556 1557 sc = if_getsoftc(ifp); 1558 SGE_LOCK(sc); 1559 sge_start_locked(ifp); 1560 SGE_UNLOCK(sc); 1561 } 1562 1563 static void 1564 sge_start_locked(if_t ifp) 1565 { 1566 struct sge_softc *sc; 1567 struct mbuf *m_head; 1568 int queued = 0; 1569 1570 sc = if_getsoftc(ifp); 1571 SGE_LOCK_ASSERT(sc); 1572 1573 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1574 (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1575 IFF_DRV_RUNNING) 1576 return; 1577 1578 for (queued = 0; !if_sendq_empty(ifp); ) { 1579 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1580 SGE_MAXTXSEGS)) { 1581 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1582 break; 1583 } 1584 m_head = if_dequeue(ifp); 1585 if (m_head == NULL) 1586 break; 1587 if (sge_encap(sc, &m_head)) { 1588 if (m_head == NULL) 1589 break; 1590 if_sendq_prepend(ifp, m_head); 1591 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1592 break; 1593 } 1594 queued++; 1595 /* 1596 * If there's a BPF listener, bounce a copy of this frame 1597 * to him. 1598 */ 1599 BPF_MTAP(ifp, m_head); 1600 } 1601 1602 if (queued > 0) { 1603 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1604 sc->sge_cdata.sge_tx_dmamap, 1605 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1606 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1607 sc->sge_timer = 5; 1608 } 1609 } 1610 1611 static void 1612 sge_init(void *arg) 1613 { 1614 struct sge_softc *sc; 1615 1616 sc = arg; 1617 SGE_LOCK(sc); 1618 sge_init_locked(sc); 1619 SGE_UNLOCK(sc); 1620 } 1621 1622 static void 1623 sge_init_locked(struct sge_softc *sc) 1624 { 1625 if_t ifp; 1626 struct mii_data *mii; 1627 uint16_t rxfilt; 1628 int i; 1629 1630 SGE_LOCK_ASSERT(sc); 1631 ifp = sc->sge_ifp; 1632 mii = device_get_softc(sc->sge_miibus); 1633 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1634 return; 1635 /* 1636 * Cancel pending I/O and free all RX/TX buffers. 1637 */ 1638 sge_stop(sc); 1639 sge_reset(sc); 1640 1641 /* Init circular RX list. */ 1642 if (sge_list_rx_init(sc) == ENOBUFS) { 1643 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1644 sge_stop(sc); 1645 return; 1646 } 1647 /* Init TX descriptors. */ 1648 sge_list_tx_init(sc); 1649 /* 1650 * Load the address of the RX and TX lists. 1651 */ 1652 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1653 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1654 1655 CSR_WRITE_4(sc, TxMacControl, 0x60); 1656 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1657 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1658 /* Allow receiving VLAN frames. */ 1659 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1660 SGE_RX_PAD_BYTES); 1661 1662 for (i = 0; i < ETHER_ADDR_LEN; i++) 1663 CSR_WRITE_1(sc, RxMacAddr + i, if_getlladdr(ifp)[i]); 1664 /* Configure RX MAC. */ 1665 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1666 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1667 sge_rxfilter(sc); 1668 sge_setvlan(sc); 1669 1670 /* Initialize default speed/duplex information. */ 1671 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1672 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1673 sc->sge_flags |= SGE_FLAG_FDX; 1674 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1675 CSR_WRITE_4(sc, StationControl, 0x04008001); 1676 else 1677 CSR_WRITE_4(sc, StationControl, 0x04000001); 1678 /* 1679 * XXX Try to mitigate interrupts. 1680 */ 1681 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1682 #ifdef notyet 1683 if (sc->sge_intrcontrol != 0) 1684 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1685 if (sc->sge_intrtimer != 0) 1686 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1687 #endif 1688 1689 /* 1690 * Clear and enable interrupts. 1691 */ 1692 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1693 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1694 1695 /* Enable receiver and transmitter. */ 1696 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1697 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1698 1699 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1700 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1701 1702 sc->sge_flags &= ~SGE_FLAG_LINK; 1703 mii_mediachg(mii); 1704 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1705 } 1706 1707 /* 1708 * Set media options. 1709 */ 1710 static int 1711 sge_ifmedia_upd(if_t ifp) 1712 { 1713 struct sge_softc *sc; 1714 struct mii_data *mii; 1715 struct mii_softc *miisc; 1716 int error; 1717 1718 sc = if_getsoftc(ifp); 1719 SGE_LOCK(sc); 1720 mii = device_get_softc(sc->sge_miibus); 1721 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1722 PHY_RESET(miisc); 1723 error = mii_mediachg(mii); 1724 SGE_UNLOCK(sc); 1725 1726 return (error); 1727 } 1728 1729 /* 1730 * Report current media status. 1731 */ 1732 static void 1733 sge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 1734 { 1735 struct sge_softc *sc; 1736 struct mii_data *mii; 1737 1738 sc = if_getsoftc(ifp); 1739 SGE_LOCK(sc); 1740 mii = device_get_softc(sc->sge_miibus); 1741 if ((if_getflags(ifp) & IFF_UP) == 0) { 1742 SGE_UNLOCK(sc); 1743 return; 1744 } 1745 mii_pollstat(mii); 1746 ifmr->ifm_active = mii->mii_media_active; 1747 ifmr->ifm_status = mii->mii_media_status; 1748 SGE_UNLOCK(sc); 1749 } 1750 1751 static int 1752 sge_ioctl(if_t ifp, u_long command, caddr_t data) 1753 { 1754 struct sge_softc *sc; 1755 struct ifreq *ifr; 1756 struct mii_data *mii; 1757 int error = 0, mask, reinit; 1758 1759 sc = if_getsoftc(ifp); 1760 ifr = (struct ifreq *)data; 1761 1762 switch(command) { 1763 case SIOCSIFFLAGS: 1764 SGE_LOCK(sc); 1765 if ((if_getflags(ifp) & IFF_UP) != 0) { 1766 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 1767 ((if_getflags(ifp) ^ sc->sge_if_flags) & 1768 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1769 sge_rxfilter(sc); 1770 else 1771 sge_init_locked(sc); 1772 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1773 sge_stop(sc); 1774 sc->sge_if_flags = if_getflags(ifp); 1775 SGE_UNLOCK(sc); 1776 break; 1777 case SIOCSIFCAP: 1778 SGE_LOCK(sc); 1779 reinit = 0; 1780 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1781 if ((mask & IFCAP_TXCSUM) != 0 && 1782 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 1783 if_togglecapenable(ifp, IFCAP_TXCSUM); 1784 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1785 if_sethwassistbits(ifp, SGE_CSUM_FEATURES, 0); 1786 else 1787 if_sethwassistbits(ifp, 0, SGE_CSUM_FEATURES); 1788 } 1789 if ((mask & IFCAP_RXCSUM) != 0 && 1790 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) 1791 if_togglecapenable(ifp, IFCAP_RXCSUM); 1792 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1793 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0) 1794 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); 1795 if ((mask & IFCAP_TSO4) != 0 && 1796 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { 1797 if_togglecapenable(ifp, IFCAP_TSO4); 1798 if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) 1799 if_sethwassistbits(ifp, CSUM_TSO, 0); 1800 else 1801 if_sethwassistbits(ifp, 0, CSUM_TSO); 1802 } 1803 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1804 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) 1805 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 1806 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1807 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 1808 /* 1809 * Due to unknown reason, toggling VLAN hardware 1810 * tagging require interface reinitialization. 1811 */ 1812 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 1813 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 1814 if_setcapenablebit(ifp, 0, 1815 IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1816 reinit = 1; 1817 } 1818 if (reinit > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1819 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1820 sge_init_locked(sc); 1821 } 1822 SGE_UNLOCK(sc); 1823 VLAN_CAPABILITIES(ifp); 1824 break; 1825 case SIOCADDMULTI: 1826 case SIOCDELMULTI: 1827 SGE_LOCK(sc); 1828 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1829 sge_rxfilter(sc); 1830 SGE_UNLOCK(sc); 1831 break; 1832 case SIOCGIFMEDIA: 1833 case SIOCSIFMEDIA: 1834 mii = device_get_softc(sc->sge_miibus); 1835 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1836 break; 1837 default: 1838 error = ether_ioctl(ifp, command, data); 1839 break; 1840 } 1841 1842 return (error); 1843 } 1844 1845 static void 1846 sge_watchdog(struct sge_softc *sc) 1847 { 1848 if_t ifp; 1849 1850 SGE_LOCK_ASSERT(sc); 1851 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1852 return; 1853 1854 ifp = sc->sge_ifp; 1855 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1856 if (1 || bootverbose) 1857 device_printf(sc->sge_dev, 1858 "watchdog timeout (lost link)\n"); 1859 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1860 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1861 sge_init_locked(sc); 1862 return; 1863 } 1864 device_printf(sc->sge_dev, "watchdog timeout\n"); 1865 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1866 1867 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1868 sge_init_locked(sc); 1869 if (!if_sendq_empty(sc->sge_ifp)) 1870 sge_start_locked(ifp); 1871 } 1872 1873 /* 1874 * Stop the adapter and free any mbufs allocated to the 1875 * RX and TX lists. 1876 */ 1877 static void 1878 sge_stop(struct sge_softc *sc) 1879 { 1880 if_t ifp; 1881 1882 ifp = sc->sge_ifp; 1883 1884 SGE_LOCK_ASSERT(sc); 1885 1886 sc->sge_timer = 0; 1887 callout_stop(&sc->sge_stat_ch); 1888 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 1889 1890 CSR_WRITE_4(sc, IntrMask, 0); 1891 CSR_READ_4(sc, IntrMask); 1892 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1893 /* Stop TX/RX MAC. */ 1894 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1895 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1896 /* XXX Can we assume active DMA cycles gone? */ 1897 DELAY(2000); 1898 CSR_WRITE_4(sc, IntrMask, 0); 1899 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1900 1901 sc->sge_flags &= ~SGE_FLAG_LINK; 1902 sge_list_rx_free(sc); 1903 sge_list_tx_free(sc); 1904 } 1905