1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 5 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 6 * Copyright (c) 1997, 1998, 1999 7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Bill Paul. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 27 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 28 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 35 * OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 /* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 49 * Linux and Solaris drivers. 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/bus.h> 55 #include <sys/endian.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/module.h> 61 #include <sys/mutex.h> 62 #include <sys/rman.h> 63 #include <sys/socket.h> 64 #include <sys/sockio.h> 65 66 #include <net/bpf.h> 67 #include <net/if.h> 68 #include <net/if_var.h> 69 #include <net/if_arp.h> 70 #include <net/ethernet.h> 71 #include <net/if_dl.h> 72 #include <net/if_media.h> 73 #include <net/if_types.h> 74 #include <net/if_vlan_var.h> 75 76 #include <netinet/in.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/ip.h> 79 #include <netinet/tcp.h> 80 81 #include <machine/bus.h> 82 #include <machine/in_cksum.h> 83 84 #include <dev/mii/mii.h> 85 #include <dev/mii/miivar.h> 86 87 #include <dev/pci/pcireg.h> 88 #include <dev/pci/pcivar.h> 89 90 #include <dev/sge/if_sgereg.h> 91 92 MODULE_DEPEND(sge, pci, 1, 1, 1); 93 MODULE_DEPEND(sge, ether, 1, 1, 1); 94 MODULE_DEPEND(sge, miibus, 1, 1, 1); 95 96 /* "device miibus0" required. See GENERIC if you get errors here. */ 97 #include "miibus_if.h" 98 99 /* 100 * Various supported device vendors/types and their names. 101 */ 102 static struct sge_type sge_devs[] = { 103 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 104 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 105 { 0, 0, NULL } 106 }; 107 108 static int sge_probe(device_t); 109 static int sge_attach(device_t); 110 static int sge_detach(device_t); 111 static int sge_shutdown(device_t); 112 static int sge_suspend(device_t); 113 static int sge_resume(device_t); 114 115 static int sge_miibus_readreg(device_t, int, int); 116 static int sge_miibus_writereg(device_t, int, int, int); 117 static void sge_miibus_statchg(device_t); 118 119 static int sge_newbuf(struct sge_softc *, int); 120 static int sge_encap(struct sge_softc *, struct mbuf **); 121 static __inline void 122 sge_discard_rxbuf(struct sge_softc *, int); 123 static void sge_rxeof(struct sge_softc *); 124 static void sge_txeof(struct sge_softc *); 125 static void sge_intr(void *); 126 static void sge_tick(void *); 127 static void sge_start(if_t); 128 static void sge_start_locked(if_t); 129 static int sge_ioctl(if_t, u_long, caddr_t); 130 static void sge_init(void *); 131 static void sge_init_locked(struct sge_softc *); 132 static void sge_stop(struct sge_softc *); 133 static void sge_watchdog(struct sge_softc *); 134 static int sge_ifmedia_upd(if_t); 135 static void sge_ifmedia_sts(if_t, struct ifmediareq *); 136 137 static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 138 static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 139 static uint16_t sge_read_eeprom(struct sge_softc *, int); 140 141 static void sge_rxfilter(struct sge_softc *); 142 static void sge_setvlan(struct sge_softc *); 143 static void sge_reset(struct sge_softc *); 144 static int sge_list_rx_init(struct sge_softc *); 145 static int sge_list_rx_free(struct sge_softc *); 146 static int sge_list_tx_init(struct sge_softc *); 147 static int sge_list_tx_free(struct sge_softc *); 148 149 static int sge_dma_alloc(struct sge_softc *); 150 static void sge_dma_free(struct sge_softc *); 151 static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 152 153 static device_method_t sge_methods[] = { 154 /* Device interface */ 155 DEVMETHOD(device_probe, sge_probe), 156 DEVMETHOD(device_attach, sge_attach), 157 DEVMETHOD(device_detach, sge_detach), 158 DEVMETHOD(device_suspend, sge_suspend), 159 DEVMETHOD(device_resume, sge_resume), 160 DEVMETHOD(device_shutdown, sge_shutdown), 161 162 /* MII interface */ 163 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 164 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 165 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 166 167 DEVMETHOD_END 168 }; 169 170 static driver_t sge_driver = { 171 "sge", sge_methods, sizeof(struct sge_softc) 172 }; 173 174 DRIVER_MODULE(sge, pci, sge_driver, 0, 0); 175 DRIVER_MODULE(miibus, sge, miibus_driver, 0, 0); 176 177 /* 178 * Register space access macros. 179 */ 180 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 181 #define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 182 #define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 183 184 #define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 185 #define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 186 #define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 187 188 /* Define to show Tx/Rx error status. */ 189 #undef SGE_SHOW_ERRORS 190 191 #define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 192 193 static void 194 sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 195 { 196 bus_addr_t *p; 197 198 if (error != 0) 199 return; 200 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 201 p = arg; 202 *p = segs->ds_addr; 203 } 204 205 /* 206 * Read a sequence of words from the EEPROM. 207 */ 208 static uint16_t 209 sge_read_eeprom(struct sge_softc *sc, int offset) 210 { 211 uint32_t val; 212 int i; 213 214 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 215 CSR_WRITE_4(sc, ROMInterface, 216 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 217 DELAY(500); 218 for (i = 0; i < SGE_TIMEOUT; i++) { 219 val = CSR_READ_4(sc, ROMInterface); 220 if ((val & EI_REQ) == 0) 221 break; 222 DELAY(100); 223 } 224 if (i == SGE_TIMEOUT) { 225 device_printf(sc->sge_dev, 226 "EEPROM read timeout : 0x%08x\n", val); 227 return (0xffff); 228 } 229 230 return ((val & EI_DATA) >> EI_DATA_SHIFT); 231 } 232 233 static int 234 sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 235 { 236 uint16_t val; 237 int i; 238 239 val = sge_read_eeprom(sc, EEPROMSignature); 240 if (val == 0xffff || val == 0) { 241 device_printf(sc->sge_dev, 242 "invalid EEPROM signature : 0x%04x\n", val); 243 return (EINVAL); 244 } 245 246 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 247 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 248 dest[i + 0] = (uint8_t)val; 249 dest[i + 1] = (uint8_t)(val >> 8); 250 } 251 252 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 253 sc->sge_flags |= SGE_FLAG_RGMII; 254 return (0); 255 } 256 257 /* 258 * For SiS96x, APC CMOS RAM is used to store ethernet address. 259 * APC CMOS RAM is accessed through ISA bridge. 260 */ 261 static int 262 sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 263 { 264 #if defined(__amd64__) || defined(__i386__) 265 devclass_t pci; 266 device_t bus, dev = NULL; 267 device_t *kids; 268 struct apc_tbl { 269 uint16_t vid; 270 uint16_t did; 271 } *tp, apc_tbls[] = { 272 { SIS_VENDORID, 0x0965 }, 273 { SIS_VENDORID, 0x0966 }, 274 { SIS_VENDORID, 0x0968 } 275 }; 276 uint8_t reg; 277 int busnum, i, j, numkids; 278 279 pci = devclass_find("pci"); 280 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 281 bus = devclass_get_device(pci, busnum); 282 if (!bus) 283 continue; 284 if (device_get_children(bus, &kids, &numkids) != 0) 285 continue; 286 for (i = 0; i < numkids; i++) { 287 dev = kids[i]; 288 if (pci_get_class(dev) == PCIC_BRIDGE && 289 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 290 tp = apc_tbls; 291 for (j = 0; j < nitems(apc_tbls); j++) { 292 if (pci_get_vendor(dev) == tp->vid && 293 pci_get_device(dev) == tp->did) { 294 free(kids, M_TEMP); 295 goto apc_found; 296 } 297 tp++; 298 } 299 } 300 } 301 free(kids, M_TEMP); 302 } 303 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 304 return (EINVAL); 305 apc_found: 306 /* Enable port 0x78 and 0x79 to access APC registers. */ 307 reg = pci_read_config(dev, 0x48, 1); 308 pci_write_config(dev, 0x48, reg & ~0x02, 1); 309 DELAY(50); 310 pci_read_config(dev, 0x48, 1); 311 /* Read stored ethernet address. */ 312 for (i = 0; i < ETHER_ADDR_LEN; i++) { 313 outb(0x78, 0x09 + i); 314 dest[i] = inb(0x79); 315 } 316 outb(0x78, 0x12); 317 if ((inb(0x79) & 0x80) != 0) 318 sc->sge_flags |= SGE_FLAG_RGMII; 319 /* Restore access to APC registers. */ 320 pci_write_config(dev, 0x48, reg, 1); 321 322 return (0); 323 #else 324 return (EINVAL); 325 #endif 326 } 327 328 static int 329 sge_miibus_readreg(device_t dev, int phy, int reg) 330 { 331 struct sge_softc *sc; 332 uint32_t val; 333 int i; 334 335 sc = device_get_softc(dev); 336 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 337 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 338 DELAY(10); 339 for (i = 0; i < SGE_TIMEOUT; i++) { 340 val = CSR_READ_4(sc, GMIIControl); 341 if ((val & GMI_REQ) == 0) 342 break; 343 DELAY(10); 344 } 345 if (i == SGE_TIMEOUT) { 346 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 347 return (0); 348 } 349 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 350 } 351 352 static int 353 sge_miibus_writereg(device_t dev, int phy, int reg, int data) 354 { 355 struct sge_softc *sc; 356 uint32_t val; 357 int i; 358 359 sc = device_get_softc(dev); 360 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 361 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 362 GMI_OP_WR | GMI_REQ); 363 DELAY(10); 364 for (i = 0; i < SGE_TIMEOUT; i++) { 365 val = CSR_READ_4(sc, GMIIControl); 366 if ((val & GMI_REQ) == 0) 367 break; 368 DELAY(10); 369 } 370 if (i == SGE_TIMEOUT) 371 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 372 return (0); 373 } 374 375 static void 376 sge_miibus_statchg(device_t dev) 377 { 378 struct sge_softc *sc; 379 struct mii_data *mii; 380 if_t ifp; 381 uint32_t ctl, speed; 382 383 sc = device_get_softc(dev); 384 mii = device_get_softc(sc->sge_miibus); 385 ifp = sc->sge_ifp; 386 if (mii == NULL || ifp == NULL || 387 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 388 return; 389 speed = 0; 390 sc->sge_flags &= ~SGE_FLAG_LINK; 391 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 392 (IFM_ACTIVE | IFM_AVALID)) { 393 switch (IFM_SUBTYPE(mii->mii_media_active)) { 394 case IFM_10_T: 395 sc->sge_flags |= SGE_FLAG_LINK; 396 speed = SC_SPEED_10; 397 break; 398 case IFM_100_TX: 399 sc->sge_flags |= SGE_FLAG_LINK; 400 speed = SC_SPEED_100; 401 break; 402 case IFM_1000_T: 403 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 404 sc->sge_flags |= SGE_FLAG_LINK; 405 speed = SC_SPEED_1000; 406 } 407 break; 408 default: 409 break; 410 } 411 } 412 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 413 return; 414 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 415 ctl = CSR_READ_4(sc, StationControl); 416 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 417 if (speed == SC_SPEED_1000) { 418 ctl |= 0x07000000; 419 sc->sge_flags |= SGE_FLAG_SPEED_1000; 420 } else { 421 ctl |= 0x04000000; 422 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 423 } 424 #ifdef notyet 425 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 426 ctl |= 0x03000000; 427 #endif 428 ctl |= speed; 429 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 430 ctl |= SC_FDX; 431 sc->sge_flags |= SGE_FLAG_FDX; 432 } else 433 sc->sge_flags &= ~SGE_FLAG_FDX; 434 CSR_WRITE_4(sc, StationControl, ctl); 435 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 436 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 437 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 438 } 439 } 440 441 static u_int 442 sge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int count) 443 { 444 uint32_t crc, *hashes = arg; 445 446 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 447 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 448 449 return (1); 450 } 451 452 static void 453 sge_rxfilter(struct sge_softc *sc) 454 { 455 if_t ifp; 456 uint32_t hashes[2]; 457 uint16_t rxfilt; 458 459 SGE_LOCK_ASSERT(sc); 460 461 ifp = sc->sge_ifp; 462 rxfilt = CSR_READ_2(sc, RxMacControl); 463 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 464 rxfilt |= AcceptMyPhys; 465 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 466 rxfilt |= AcceptBroadcast; 467 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 468 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 469 rxfilt |= AcceptAllPhys; 470 rxfilt |= AcceptMulticast; 471 hashes[0] = 0xFFFFFFFF; 472 hashes[1] = 0xFFFFFFFF; 473 } else { 474 rxfilt |= AcceptMulticast; 475 hashes[0] = hashes[1] = 0; 476 /* Now program new ones. */ 477 if_foreach_llmaddr(ifp, sge_hash_maddr, hashes); 478 } 479 CSR_WRITE_2(sc, RxMacControl, rxfilt); 480 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 481 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 482 } 483 484 static void 485 sge_setvlan(struct sge_softc *sc) 486 { 487 if_t ifp; 488 uint16_t rxfilt; 489 490 SGE_LOCK_ASSERT(sc); 491 492 ifp = sc->sge_ifp; 493 if ((if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 494 return; 495 rxfilt = CSR_READ_2(sc, RxMacControl); 496 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 497 rxfilt |= RXMAC_STRIP_VLAN; 498 else 499 rxfilt &= ~RXMAC_STRIP_VLAN; 500 CSR_WRITE_2(sc, RxMacControl, rxfilt); 501 } 502 503 static void 504 sge_reset(struct sge_softc *sc) 505 { 506 507 CSR_WRITE_4(sc, IntrMask, 0); 508 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 509 510 /* Soft reset. */ 511 CSR_WRITE_4(sc, IntrControl, 0x8000); 512 CSR_READ_4(sc, IntrControl); 513 DELAY(100); 514 CSR_WRITE_4(sc, IntrControl, 0); 515 /* Stop MAC. */ 516 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 517 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 518 519 CSR_WRITE_4(sc, IntrMask, 0); 520 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 521 522 CSR_WRITE_4(sc, GMIIControl, 0); 523 } 524 525 /* 526 * Probe for an SiS chip. Check the PCI vendor and device 527 * IDs against our list and return a device name if we find a match. 528 */ 529 static int 530 sge_probe(device_t dev) 531 { 532 struct sge_type *t; 533 534 t = sge_devs; 535 while (t->sge_name != NULL) { 536 if ((pci_get_vendor(dev) == t->sge_vid) && 537 (pci_get_device(dev) == t->sge_did)) { 538 device_set_desc(dev, t->sge_name); 539 return (BUS_PROBE_DEFAULT); 540 } 541 t++; 542 } 543 544 return (ENXIO); 545 } 546 547 /* 548 * Attach the interface. Allocate softc structures, do ifmedia 549 * setup and ethernet/BPF attach. 550 */ 551 static int 552 sge_attach(device_t dev) 553 { 554 struct sge_softc *sc; 555 if_t ifp; 556 uint8_t eaddr[ETHER_ADDR_LEN]; 557 int error = 0, rid; 558 559 sc = device_get_softc(dev); 560 sc->sge_dev = dev; 561 562 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 563 MTX_DEF); 564 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 565 566 /* 567 * Map control/status registers. 568 */ 569 pci_enable_busmaster(dev); 570 571 /* Allocate resources. */ 572 sc->sge_res_id = PCIR_BAR(0); 573 sc->sge_res_type = SYS_RES_MEMORY; 574 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 575 &sc->sge_res_id, RF_ACTIVE); 576 if (sc->sge_res == NULL) { 577 device_printf(dev, "couldn't allocate resource\n"); 578 error = ENXIO; 579 goto fail; 580 } 581 582 rid = 0; 583 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 584 RF_SHAREABLE | RF_ACTIVE); 585 if (sc->sge_irq == NULL) { 586 device_printf(dev, "couldn't allocate IRQ resources\n"); 587 error = ENXIO; 588 goto fail; 589 } 590 sc->sge_rev = pci_get_revid(dev); 591 if (pci_get_device(dev) == SIS_DEVICEID_190) 592 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 593 /* Reset the adapter. */ 594 sge_reset(sc); 595 596 /* Get MAC address from the EEPROM. */ 597 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 598 sge_get_mac_addr_apc(sc, eaddr); 599 else 600 sge_get_mac_addr_eeprom(sc, eaddr); 601 602 if ((error = sge_dma_alloc(sc)) != 0) 603 goto fail; 604 605 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 606 if (ifp == NULL) { 607 device_printf(dev, "cannot allocate ifnet structure.\n"); 608 error = ENOSPC; 609 goto fail; 610 } 611 if_setsoftc(ifp, sc); 612 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 613 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 614 if_setioctlfn(ifp, sge_ioctl); 615 if_setstartfn(ifp, sge_start); 616 if_setinitfn(ifp, sge_init); 617 if_setsendqlen(ifp, SGE_TX_RING_CNT - 1); 618 if_setsendqready(ifp); 619 if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4); 620 if_sethwassist(ifp, SGE_CSUM_FEATURES | CSUM_TSO); 621 if_setcapenable(ifp, if_getcapabilities(ifp)); 622 /* 623 * Do MII setup. 624 */ 625 error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd, 626 sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 627 if (error != 0) { 628 device_printf(dev, "attaching PHYs failed\n"); 629 goto fail; 630 } 631 632 /* 633 * Call MI attach routine. 634 */ 635 ether_ifattach(ifp, eaddr); 636 637 /* VLAN setup. */ 638 if_setcapabilities(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 639 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU); 640 if_setcapenable(ifp, if_getcapabilities(ifp)); 641 /* Tell the upper layer(s) we support long frames. */ 642 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 643 644 /* Hook interrupt last to avoid having to lock softc */ 645 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 646 NULL, sge_intr, sc, &sc->sge_intrhand); 647 if (error) { 648 device_printf(dev, "couldn't set up irq\n"); 649 ether_ifdetach(ifp); 650 goto fail; 651 } 652 653 fail: 654 if (error) 655 sge_detach(dev); 656 657 return (error); 658 } 659 660 /* 661 * Shutdown hardware and free up resources. This can be called any 662 * time after the mutex has been initialized. It is called in both 663 * the error case in attach and the normal detach case so it needs 664 * to be careful about only freeing resources that have actually been 665 * allocated. 666 */ 667 static int 668 sge_detach(device_t dev) 669 { 670 struct sge_softc *sc; 671 if_t ifp; 672 673 sc = device_get_softc(dev); 674 ifp = sc->sge_ifp; 675 /* These should only be active if attach succeeded. */ 676 if (device_is_attached(dev)) { 677 ether_ifdetach(ifp); 678 SGE_LOCK(sc); 679 sge_stop(sc); 680 SGE_UNLOCK(sc); 681 callout_drain(&sc->sge_stat_ch); 682 } 683 if (sc->sge_miibus) 684 device_delete_child(dev, sc->sge_miibus); 685 bus_generic_detach(dev); 686 687 if (sc->sge_intrhand) 688 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 689 if (sc->sge_irq) 690 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 691 if (sc->sge_res) 692 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 693 sc->sge_res); 694 if (ifp) 695 if_free(ifp); 696 sge_dma_free(sc); 697 mtx_destroy(&sc->sge_mtx); 698 699 return (0); 700 } 701 702 /* 703 * Stop all chip I/O so that the kernel's probe routines don't 704 * get confused by errant DMAs when rebooting. 705 */ 706 static int 707 sge_shutdown(device_t dev) 708 { 709 struct sge_softc *sc; 710 711 sc = device_get_softc(dev); 712 SGE_LOCK(sc); 713 sge_stop(sc); 714 SGE_UNLOCK(sc); 715 return (0); 716 } 717 718 static int 719 sge_suspend(device_t dev) 720 { 721 struct sge_softc *sc; 722 if_t ifp; 723 724 sc = device_get_softc(dev); 725 SGE_LOCK(sc); 726 ifp = sc->sge_ifp; 727 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 728 sge_stop(sc); 729 SGE_UNLOCK(sc); 730 return (0); 731 } 732 733 static int 734 sge_resume(device_t dev) 735 { 736 struct sge_softc *sc; 737 if_t ifp; 738 739 sc = device_get_softc(dev); 740 SGE_LOCK(sc); 741 ifp = sc->sge_ifp; 742 if ((if_getflags(ifp) & IFF_UP) != 0) 743 sge_init_locked(sc); 744 SGE_UNLOCK(sc); 745 return (0); 746 } 747 748 static int 749 sge_dma_alloc(struct sge_softc *sc) 750 { 751 struct sge_chain_data *cd; 752 struct sge_list_data *ld; 753 struct sge_rxdesc *rxd; 754 struct sge_txdesc *txd; 755 int error, i; 756 757 cd = &sc->sge_cdata; 758 ld = &sc->sge_ldata; 759 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 760 1, 0, /* alignment, boundary */ 761 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 762 BUS_SPACE_MAXADDR, /* highaddr */ 763 NULL, NULL, /* filter, filterarg */ 764 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 765 1, /* nsegments */ 766 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 767 0, /* flags */ 768 NULL, /* lockfunc */ 769 NULL, /* lockarg */ 770 &cd->sge_tag); 771 if (error != 0) { 772 device_printf(sc->sge_dev, 773 "could not create parent DMA tag.\n"); 774 goto fail; 775 } 776 777 /* RX descriptor ring */ 778 error = bus_dma_tag_create(cd->sge_tag, 779 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 780 BUS_SPACE_MAXADDR, /* lowaddr */ 781 BUS_SPACE_MAXADDR, /* highaddr */ 782 NULL, NULL, /* filter, filterarg */ 783 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 784 SGE_RX_RING_SZ, /* maxsegsize */ 785 0, /* flags */ 786 NULL, /* lockfunc */ 787 NULL, /* lockarg */ 788 &cd->sge_rx_tag); 789 if (error != 0) { 790 device_printf(sc->sge_dev, 791 "could not create Rx ring DMA tag.\n"); 792 goto fail; 793 } 794 /* Allocate DMA'able memory and load DMA map for RX ring. */ 795 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 796 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 797 &cd->sge_rx_dmamap); 798 if (error != 0) { 799 device_printf(sc->sge_dev, 800 "could not allocate DMA'able memory for Rx ring.\n"); 801 goto fail; 802 } 803 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 804 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 805 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 806 if (error != 0) { 807 device_printf(sc->sge_dev, 808 "could not load DMA'able memory for Rx ring.\n"); 809 } 810 811 /* TX descriptor ring */ 812 error = bus_dma_tag_create(cd->sge_tag, 813 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 814 BUS_SPACE_MAXADDR, /* lowaddr */ 815 BUS_SPACE_MAXADDR, /* highaddr */ 816 NULL, NULL, /* filter, filterarg */ 817 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 818 SGE_TX_RING_SZ, /* maxsegsize */ 819 0, /* flags */ 820 NULL, /* lockfunc */ 821 NULL, /* lockarg */ 822 &cd->sge_tx_tag); 823 if (error != 0) { 824 device_printf(sc->sge_dev, 825 "could not create Rx ring DMA tag.\n"); 826 goto fail; 827 } 828 /* Allocate DMA'able memory and load DMA map for TX ring. */ 829 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 830 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 831 &cd->sge_tx_dmamap); 832 if (error != 0) { 833 device_printf(sc->sge_dev, 834 "could not allocate DMA'able memory for Tx ring.\n"); 835 goto fail; 836 } 837 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 838 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 839 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 840 if (error != 0) { 841 device_printf(sc->sge_dev, 842 "could not load DMA'able memory for Rx ring.\n"); 843 goto fail; 844 } 845 846 /* Create DMA tag for Tx buffers. */ 847 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 848 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 849 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 850 if (error != 0) { 851 device_printf(sc->sge_dev, 852 "could not create Tx mbuf DMA tag.\n"); 853 goto fail; 854 } 855 856 /* Create DMA tag for Rx buffers. */ 857 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 858 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 859 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 860 if (error != 0) { 861 device_printf(sc->sge_dev, 862 "could not create Rx mbuf DMA tag.\n"); 863 goto fail; 864 } 865 866 /* Create DMA maps for Tx buffers. */ 867 for (i = 0; i < SGE_TX_RING_CNT; i++) { 868 txd = &cd->sge_txdesc[i]; 869 txd->tx_m = NULL; 870 txd->tx_dmamap = NULL; 871 txd->tx_ndesc = 0; 872 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 873 &txd->tx_dmamap); 874 if (error != 0) { 875 device_printf(sc->sge_dev, 876 "could not create Tx DMA map.\n"); 877 goto fail; 878 } 879 } 880 /* Create spare DMA map for Rx buffer. */ 881 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 882 if (error != 0) { 883 device_printf(sc->sge_dev, 884 "could not create spare Rx DMA map.\n"); 885 goto fail; 886 } 887 /* Create DMA maps for Rx buffers. */ 888 for (i = 0; i < SGE_RX_RING_CNT; i++) { 889 rxd = &cd->sge_rxdesc[i]; 890 rxd->rx_m = NULL; 891 rxd->rx_dmamap = NULL; 892 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 893 &rxd->rx_dmamap); 894 if (error) { 895 device_printf(sc->sge_dev, 896 "could not create Rx DMA map.\n"); 897 goto fail; 898 } 899 } 900 fail: 901 return (error); 902 } 903 904 static void 905 sge_dma_free(struct sge_softc *sc) 906 { 907 struct sge_chain_data *cd; 908 struct sge_list_data *ld; 909 struct sge_rxdesc *rxd; 910 struct sge_txdesc *txd; 911 int i; 912 913 cd = &sc->sge_cdata; 914 ld = &sc->sge_ldata; 915 /* Rx ring. */ 916 if (cd->sge_rx_tag != NULL) { 917 if (ld->sge_rx_paddr != 0) 918 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 919 if (ld->sge_rx_ring != NULL) 920 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 921 cd->sge_rx_dmamap); 922 ld->sge_rx_ring = NULL; 923 ld->sge_rx_paddr = 0; 924 bus_dma_tag_destroy(cd->sge_rx_tag); 925 cd->sge_rx_tag = NULL; 926 } 927 /* Tx ring. */ 928 if (cd->sge_tx_tag != NULL) { 929 if (ld->sge_tx_paddr != 0) 930 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 931 if (ld->sge_tx_ring != NULL) 932 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 933 cd->sge_tx_dmamap); 934 ld->sge_tx_ring = NULL; 935 ld->sge_tx_paddr = 0; 936 bus_dma_tag_destroy(cd->sge_tx_tag); 937 cd->sge_tx_tag = NULL; 938 } 939 /* Rx buffers. */ 940 if (cd->sge_rxmbuf_tag != NULL) { 941 for (i = 0; i < SGE_RX_RING_CNT; i++) { 942 rxd = &cd->sge_rxdesc[i]; 943 if (rxd->rx_dmamap != NULL) { 944 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 945 rxd->rx_dmamap); 946 rxd->rx_dmamap = NULL; 947 } 948 } 949 if (cd->sge_rx_spare_map != NULL) { 950 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 951 cd->sge_rx_spare_map); 952 cd->sge_rx_spare_map = NULL; 953 } 954 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 955 cd->sge_rxmbuf_tag = NULL; 956 } 957 /* Tx buffers. */ 958 if (cd->sge_txmbuf_tag != NULL) { 959 for (i = 0; i < SGE_TX_RING_CNT; i++) { 960 txd = &cd->sge_txdesc[i]; 961 if (txd->tx_dmamap != NULL) { 962 bus_dmamap_destroy(cd->sge_txmbuf_tag, 963 txd->tx_dmamap); 964 txd->tx_dmamap = NULL; 965 } 966 } 967 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 968 cd->sge_txmbuf_tag = NULL; 969 } 970 if (cd->sge_tag != NULL) 971 bus_dma_tag_destroy(cd->sge_tag); 972 cd->sge_tag = NULL; 973 } 974 975 /* 976 * Initialize the TX descriptors. 977 */ 978 static int 979 sge_list_tx_init(struct sge_softc *sc) 980 { 981 struct sge_list_data *ld; 982 struct sge_chain_data *cd; 983 984 SGE_LOCK_ASSERT(sc); 985 ld = &sc->sge_ldata; 986 cd = &sc->sge_cdata; 987 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 988 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 989 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 990 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 991 cd->sge_tx_prod = 0; 992 cd->sge_tx_cons = 0; 993 cd->sge_tx_cnt = 0; 994 return (0); 995 } 996 997 static int 998 sge_list_tx_free(struct sge_softc *sc) 999 { 1000 struct sge_chain_data *cd; 1001 struct sge_txdesc *txd; 1002 int i; 1003 1004 SGE_LOCK_ASSERT(sc); 1005 cd = &sc->sge_cdata; 1006 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1007 txd = &cd->sge_txdesc[i]; 1008 if (txd->tx_m != NULL) { 1009 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1010 BUS_DMASYNC_POSTWRITE); 1011 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1012 m_freem(txd->tx_m); 1013 txd->tx_m = NULL; 1014 txd->tx_ndesc = 0; 1015 } 1016 } 1017 1018 return (0); 1019 } 1020 1021 /* 1022 * Initialize the RX descriptors and allocate mbufs for them. Note that 1023 * we arrange the descriptors in a closed ring, so that the last descriptor 1024 * has RING_END flag set. 1025 */ 1026 static int 1027 sge_list_rx_init(struct sge_softc *sc) 1028 { 1029 struct sge_chain_data *cd; 1030 int i; 1031 1032 SGE_LOCK_ASSERT(sc); 1033 cd = &sc->sge_cdata; 1034 cd->sge_rx_cons = 0; 1035 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1036 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1037 if (sge_newbuf(sc, i) != 0) 1038 return (ENOBUFS); 1039 } 1040 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1041 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1042 return (0); 1043 } 1044 1045 static int 1046 sge_list_rx_free(struct sge_softc *sc) 1047 { 1048 struct sge_chain_data *cd; 1049 struct sge_rxdesc *rxd; 1050 int i; 1051 1052 SGE_LOCK_ASSERT(sc); 1053 cd = &sc->sge_cdata; 1054 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1055 rxd = &cd->sge_rxdesc[i]; 1056 if (rxd->rx_m != NULL) { 1057 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1058 BUS_DMASYNC_POSTREAD); 1059 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1060 rxd->rx_dmamap); 1061 m_freem(rxd->rx_m); 1062 rxd->rx_m = NULL; 1063 } 1064 } 1065 return (0); 1066 } 1067 1068 /* 1069 * Initialize an RX descriptor and attach an MBUF cluster. 1070 */ 1071 static int 1072 sge_newbuf(struct sge_softc *sc, int prod) 1073 { 1074 struct mbuf *m; 1075 struct sge_desc *desc; 1076 struct sge_chain_data *cd; 1077 struct sge_rxdesc *rxd; 1078 bus_dma_segment_t segs[1]; 1079 bus_dmamap_t map; 1080 int error, nsegs; 1081 1082 SGE_LOCK_ASSERT(sc); 1083 1084 cd = &sc->sge_cdata; 1085 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1086 if (m == NULL) 1087 return (ENOBUFS); 1088 m->m_len = m->m_pkthdr.len = MCLBYTES; 1089 m_adj(m, SGE_RX_BUF_ALIGN); 1090 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1091 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1092 if (error != 0) { 1093 m_freem(m); 1094 return (error); 1095 } 1096 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1097 rxd = &cd->sge_rxdesc[prod]; 1098 if (rxd->rx_m != NULL) { 1099 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1100 BUS_DMASYNC_POSTREAD); 1101 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1102 } 1103 map = rxd->rx_dmamap; 1104 rxd->rx_dmamap = cd->sge_rx_spare_map; 1105 cd->sge_rx_spare_map = map; 1106 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1107 BUS_DMASYNC_PREREAD); 1108 rxd->rx_m = m; 1109 1110 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1111 desc->sge_sts_size = 0; 1112 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1113 desc->sge_flags = htole32(segs[0].ds_len); 1114 if (prod == SGE_RX_RING_CNT - 1) 1115 desc->sge_flags |= htole32(RING_END); 1116 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1117 return (0); 1118 } 1119 1120 static __inline void 1121 sge_discard_rxbuf(struct sge_softc *sc, int index) 1122 { 1123 struct sge_desc *desc; 1124 1125 desc = &sc->sge_ldata.sge_rx_ring[index]; 1126 desc->sge_sts_size = 0; 1127 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1128 if (index == SGE_RX_RING_CNT - 1) 1129 desc->sge_flags |= htole32(RING_END); 1130 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1131 } 1132 1133 /* 1134 * A frame has been uploaded: pass the resulting mbuf chain up to 1135 * the higher level protocols. 1136 */ 1137 static void 1138 sge_rxeof(struct sge_softc *sc) 1139 { 1140 if_t ifp; 1141 struct mbuf *m; 1142 struct sge_chain_data *cd; 1143 struct sge_desc *cur_rx; 1144 uint32_t rxinfo, rxstat; 1145 int cons, prog; 1146 1147 SGE_LOCK_ASSERT(sc); 1148 1149 ifp = sc->sge_ifp; 1150 cd = &sc->sge_cdata; 1151 1152 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1153 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1154 cons = cd->sge_rx_cons; 1155 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1156 SGE_INC(cons, SGE_RX_RING_CNT)) { 1157 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1158 break; 1159 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1160 rxinfo = le32toh(cur_rx->sge_cmdsts); 1161 if ((rxinfo & RDC_OWN) != 0) 1162 break; 1163 rxstat = le32toh(cur_rx->sge_sts_size); 1164 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1165 SGE_RX_NSEGS(rxstat) != 1) { 1166 /* XXX We don't support multi-segment frames yet. */ 1167 #ifdef SGE_SHOW_ERRORS 1168 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1169 RX_ERR_BITS); 1170 #endif 1171 sge_discard_rxbuf(sc, cons); 1172 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1173 continue; 1174 } 1175 m = cd->sge_rxdesc[cons].rx_m; 1176 if (sge_newbuf(sc, cons) != 0) { 1177 sge_discard_rxbuf(sc, cons); 1178 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1179 continue; 1180 } 1181 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 1182 if ((rxinfo & RDC_IP_CSUM) != 0 && 1183 (rxinfo & RDC_IP_CSUM_OK) != 0) 1184 m->m_pkthdr.csum_flags |= 1185 CSUM_IP_CHECKED | CSUM_IP_VALID; 1186 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1187 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1188 ((rxinfo & RDC_UDP_CSUM) != 0 && 1189 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1190 m->m_pkthdr.csum_flags |= 1191 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1192 m->m_pkthdr.csum_data = 0xffff; 1193 } 1194 } 1195 /* Check for VLAN tagged frame. */ 1196 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 && 1197 (rxstat & RDS_VLAN) != 0) { 1198 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1199 m->m_flags |= M_VLANTAG; 1200 } 1201 /* 1202 * Account for 10bytes auto padding which is used 1203 * to align IP header on 32bit boundary. Also note, 1204 * CRC bytes is automatically removed by the 1205 * hardware. 1206 */ 1207 m->m_data += SGE_RX_PAD_BYTES; 1208 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1209 SGE_RX_PAD_BYTES; 1210 m->m_pkthdr.rcvif = ifp; 1211 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1212 SGE_UNLOCK(sc); 1213 if_input(ifp, m); 1214 SGE_LOCK(sc); 1215 } 1216 1217 if (prog > 0) { 1218 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1219 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1220 cd->sge_rx_cons = cons; 1221 } 1222 } 1223 1224 /* 1225 * A frame was downloaded to the chip. It's safe for us to clean up 1226 * the list buffers. 1227 */ 1228 static void 1229 sge_txeof(struct sge_softc *sc) 1230 { 1231 if_t ifp; 1232 struct sge_list_data *ld; 1233 struct sge_chain_data *cd; 1234 struct sge_txdesc *txd; 1235 uint32_t txstat; 1236 int cons, nsegs, prod; 1237 1238 SGE_LOCK_ASSERT(sc); 1239 1240 ifp = sc->sge_ifp; 1241 ld = &sc->sge_ldata; 1242 cd = &sc->sge_cdata; 1243 1244 if (cd->sge_tx_cnt == 0) 1245 return; 1246 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1247 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1248 cons = cd->sge_tx_cons; 1249 prod = cd->sge_tx_prod; 1250 for (; cons != prod;) { 1251 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1252 if ((txstat & TDC_OWN) != 0) 1253 break; 1254 /* 1255 * Only the first descriptor of multi-descriptor transmission 1256 * is updated by controller. Driver should skip entire 1257 * chained buffers for the transmitted frame. In other words 1258 * TDC_OWN bit is valid only at the first descriptor of a 1259 * multi-descriptor transmission. 1260 */ 1261 if (SGE_TX_ERROR(txstat) != 0) { 1262 #ifdef SGE_SHOW_ERRORS 1263 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1264 txstat, TX_ERR_BITS); 1265 #endif 1266 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1267 } else { 1268 #ifdef notyet 1269 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0xFFFF) - 1); 1270 #endif 1271 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1272 } 1273 txd = &cd->sge_txdesc[cons]; 1274 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1275 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1276 SGE_INC(cons, SGE_TX_RING_CNT); 1277 } 1278 /* Reclaim transmitted mbuf. */ 1279 KASSERT(txd->tx_m != NULL, 1280 ("%s: freeing NULL mbuf\n", __func__)); 1281 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1282 BUS_DMASYNC_POSTWRITE); 1283 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1284 m_freem(txd->tx_m); 1285 txd->tx_m = NULL; 1286 cd->sge_tx_cnt -= txd->tx_ndesc; 1287 KASSERT(cd->sge_tx_cnt >= 0, 1288 ("%s: Active Tx desc counter was garbled\n", __func__)); 1289 txd->tx_ndesc = 0; 1290 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1291 } 1292 cd->sge_tx_cons = cons; 1293 if (cd->sge_tx_cnt == 0) 1294 sc->sge_timer = 0; 1295 } 1296 1297 static void 1298 sge_tick(void *arg) 1299 { 1300 struct sge_softc *sc; 1301 struct mii_data *mii; 1302 if_t ifp; 1303 1304 sc = arg; 1305 SGE_LOCK_ASSERT(sc); 1306 1307 ifp = sc->sge_ifp; 1308 mii = device_get_softc(sc->sge_miibus); 1309 mii_tick(mii); 1310 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1311 sge_miibus_statchg(sc->sge_dev); 1312 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1313 !if_sendq_empty(ifp)) 1314 sge_start_locked(ifp); 1315 } 1316 /* 1317 * Reclaim transmitted frames here as we do not request 1318 * Tx completion interrupt for every queued frames to 1319 * reduce excessive interrupts. 1320 */ 1321 sge_txeof(sc); 1322 sge_watchdog(sc); 1323 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1324 } 1325 1326 static void 1327 sge_intr(void *arg) 1328 { 1329 struct sge_softc *sc; 1330 if_t ifp; 1331 uint32_t status; 1332 1333 sc = arg; 1334 SGE_LOCK(sc); 1335 ifp = sc->sge_ifp; 1336 1337 status = CSR_READ_4(sc, IntrStatus); 1338 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1339 /* Not ours. */ 1340 SGE_UNLOCK(sc); 1341 return; 1342 } 1343 /* Acknowledge interrupts. */ 1344 CSR_WRITE_4(sc, IntrStatus, status); 1345 /* Disable further interrupts. */ 1346 CSR_WRITE_4(sc, IntrMask, 0); 1347 /* 1348 * It seems the controller supports some kind of interrupt 1349 * moderation mechanism but we still don't know how to 1350 * enable that. To reduce number of generated interrupts 1351 * under load we check pending interrupts in a loop. This 1352 * will increase number of register access and is not correct 1353 * way to handle interrupt moderation but there seems to be 1354 * no other way at this time. 1355 */ 1356 for (;;) { 1357 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1358 break; 1359 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1360 sge_rxeof(sc); 1361 /* Wakeup Rx MAC. */ 1362 if ((status & INTR_RX_IDLE) != 0) 1363 CSR_WRITE_4(sc, RX_CTL, 1364 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1365 } 1366 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1367 sge_txeof(sc); 1368 status = CSR_READ_4(sc, IntrStatus); 1369 if ((status & SGE_INTRS) == 0) 1370 break; 1371 /* Acknowledge interrupts. */ 1372 CSR_WRITE_4(sc, IntrStatus, status); 1373 } 1374 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1375 /* Re-enable interrupts */ 1376 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1377 if (!if_sendq_empty(ifp)) 1378 sge_start_locked(ifp); 1379 } 1380 SGE_UNLOCK(sc); 1381 } 1382 1383 /* 1384 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1385 * pointers to the fragment pointers. 1386 */ 1387 static int 1388 sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1389 { 1390 struct mbuf *m; 1391 struct sge_desc *desc; 1392 struct sge_txdesc *txd; 1393 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1394 uint32_t cflags, mss; 1395 int error, i, nsegs, prod, si; 1396 1397 SGE_LOCK_ASSERT(sc); 1398 1399 si = prod = sc->sge_cdata.sge_tx_prod; 1400 txd = &sc->sge_cdata.sge_txdesc[prod]; 1401 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1402 struct ether_header *eh; 1403 struct ip *ip; 1404 struct tcphdr *tcp; 1405 uint32_t ip_off, poff; 1406 1407 if (M_WRITABLE(*m_head) == 0) { 1408 /* Get a writable copy. */ 1409 m = m_dup(*m_head, M_NOWAIT); 1410 m_freem(*m_head); 1411 if (m == NULL) { 1412 *m_head = NULL; 1413 return (ENOBUFS); 1414 } 1415 *m_head = m; 1416 } 1417 ip_off = sizeof(struct ether_header); 1418 m = m_pullup(*m_head, ip_off); 1419 if (m == NULL) { 1420 *m_head = NULL; 1421 return (ENOBUFS); 1422 } 1423 eh = mtod(m, struct ether_header *); 1424 /* Check the existence of VLAN tag. */ 1425 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1426 ip_off = sizeof(struct ether_vlan_header); 1427 m = m_pullup(m, ip_off); 1428 if (m == NULL) { 1429 *m_head = NULL; 1430 return (ENOBUFS); 1431 } 1432 } 1433 m = m_pullup(m, ip_off + sizeof(struct ip)); 1434 if (m == NULL) { 1435 *m_head = NULL; 1436 return (ENOBUFS); 1437 } 1438 ip = (struct ip *)(mtod(m, char *) + ip_off); 1439 poff = ip_off + (ip->ip_hl << 2); 1440 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1441 if (m == NULL) { 1442 *m_head = NULL; 1443 return (ENOBUFS); 1444 } 1445 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1446 m = m_pullup(m, poff + (tcp->th_off << 2)); 1447 if (m == NULL) { 1448 *m_head = NULL; 1449 return (ENOBUFS); 1450 } 1451 /* 1452 * Reset IP checksum and recompute TCP pseudo 1453 * checksum that NDIS specification requires. 1454 */ 1455 ip = (struct ip *)(mtod(m, char *) + ip_off); 1456 ip->ip_sum = 0; 1457 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1458 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1459 htons(IPPROTO_TCP)); 1460 *m_head = m; 1461 } 1462 1463 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1464 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1465 if (error == EFBIG) { 1466 m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS); 1467 if (m == NULL) { 1468 m_freem(*m_head); 1469 *m_head = NULL; 1470 return (ENOBUFS); 1471 } 1472 *m_head = m; 1473 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1474 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1475 if (error != 0) { 1476 m_freem(*m_head); 1477 *m_head = NULL; 1478 return (error); 1479 } 1480 } else if (error != 0) 1481 return (error); 1482 1483 KASSERT(nsegs != 0, ("zero segment returned")); 1484 /* Check descriptor overrun. */ 1485 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1486 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1487 return (ENOBUFS); 1488 } 1489 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1490 BUS_DMASYNC_PREWRITE); 1491 1492 m = *m_head; 1493 cflags = 0; 1494 mss = 0; 1495 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1496 cflags |= TDC_LS; 1497 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1498 mss <<= 16; 1499 } else { 1500 if (m->m_pkthdr.csum_flags & CSUM_IP) 1501 cflags |= TDC_IP_CSUM; 1502 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1503 cflags |= TDC_TCP_CSUM; 1504 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1505 cflags |= TDC_UDP_CSUM; 1506 } 1507 for (i = 0; i < nsegs; i++) { 1508 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1509 if (i == 0) { 1510 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1511 desc->sge_cmdsts = 0; 1512 } else { 1513 desc->sge_sts_size = 0; 1514 desc->sge_cmdsts = htole32(TDC_OWN); 1515 } 1516 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1517 desc->sge_flags = htole32(txsegs[i].ds_len); 1518 if (prod == SGE_TX_RING_CNT - 1) 1519 desc->sge_flags |= htole32(RING_END); 1520 sc->sge_cdata.sge_tx_cnt++; 1521 SGE_INC(prod, SGE_TX_RING_CNT); 1522 } 1523 /* Update producer index. */ 1524 sc->sge_cdata.sge_tx_prod = prod; 1525 1526 desc = &sc->sge_ldata.sge_tx_ring[si]; 1527 /* Configure VLAN. */ 1528 if((m->m_flags & M_VLANTAG) != 0) { 1529 cflags |= m->m_pkthdr.ether_vtag; 1530 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1531 } 1532 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1533 #if 1 1534 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1535 desc->sge_cmdsts |= htole32(TDC_BST); 1536 #else 1537 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1538 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1539 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1540 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1541 } 1542 #endif 1543 /* Request interrupt and give ownership to controller. */ 1544 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1545 txd->tx_m = m; 1546 txd->tx_ndesc = nsegs; 1547 return (0); 1548 } 1549 1550 static void 1551 sge_start(if_t ifp) 1552 { 1553 struct sge_softc *sc; 1554 1555 sc = if_getsoftc(ifp); 1556 SGE_LOCK(sc); 1557 sge_start_locked(ifp); 1558 SGE_UNLOCK(sc); 1559 } 1560 1561 static void 1562 sge_start_locked(if_t ifp) 1563 { 1564 struct sge_softc *sc; 1565 struct mbuf *m_head; 1566 int queued = 0; 1567 1568 sc = if_getsoftc(ifp); 1569 SGE_LOCK_ASSERT(sc); 1570 1571 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1572 (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1573 IFF_DRV_RUNNING) 1574 return; 1575 1576 for (queued = 0; !if_sendq_empty(ifp); ) { 1577 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1578 SGE_MAXTXSEGS)) { 1579 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1580 break; 1581 } 1582 m_head = if_dequeue(ifp); 1583 if (m_head == NULL) 1584 break; 1585 if (sge_encap(sc, &m_head)) { 1586 if (m_head == NULL) 1587 break; 1588 if_sendq_prepend(ifp, m_head); 1589 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1590 break; 1591 } 1592 queued++; 1593 /* 1594 * If there's a BPF listener, bounce a copy of this frame 1595 * to him. 1596 */ 1597 BPF_MTAP(ifp, m_head); 1598 } 1599 1600 if (queued > 0) { 1601 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1602 sc->sge_cdata.sge_tx_dmamap, 1603 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1604 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1605 sc->sge_timer = 5; 1606 } 1607 } 1608 1609 static void 1610 sge_init(void *arg) 1611 { 1612 struct sge_softc *sc; 1613 1614 sc = arg; 1615 SGE_LOCK(sc); 1616 sge_init_locked(sc); 1617 SGE_UNLOCK(sc); 1618 } 1619 1620 static void 1621 sge_init_locked(struct sge_softc *sc) 1622 { 1623 if_t ifp; 1624 struct mii_data *mii; 1625 uint16_t rxfilt; 1626 int i; 1627 1628 SGE_LOCK_ASSERT(sc); 1629 ifp = sc->sge_ifp; 1630 mii = device_get_softc(sc->sge_miibus); 1631 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1632 return; 1633 /* 1634 * Cancel pending I/O and free all RX/TX buffers. 1635 */ 1636 sge_stop(sc); 1637 sge_reset(sc); 1638 1639 /* Init circular RX list. */ 1640 if (sge_list_rx_init(sc) == ENOBUFS) { 1641 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1642 sge_stop(sc); 1643 return; 1644 } 1645 /* Init TX descriptors. */ 1646 sge_list_tx_init(sc); 1647 /* 1648 * Load the address of the RX and TX lists. 1649 */ 1650 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1651 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1652 1653 CSR_WRITE_4(sc, TxMacControl, 0x60); 1654 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1655 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1656 /* Allow receiving VLAN frames. */ 1657 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1658 SGE_RX_PAD_BYTES); 1659 1660 for (i = 0; i < ETHER_ADDR_LEN; i++) 1661 CSR_WRITE_1(sc, RxMacAddr + i, if_getlladdr(ifp)[i]); 1662 /* Configure RX MAC. */ 1663 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1664 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1665 sge_rxfilter(sc); 1666 sge_setvlan(sc); 1667 1668 /* Initialize default speed/duplex information. */ 1669 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1670 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1671 sc->sge_flags |= SGE_FLAG_FDX; 1672 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1673 CSR_WRITE_4(sc, StationControl, 0x04008001); 1674 else 1675 CSR_WRITE_4(sc, StationControl, 0x04000001); 1676 /* 1677 * XXX Try to mitigate interrupts. 1678 */ 1679 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1680 #ifdef notyet 1681 if (sc->sge_intrcontrol != 0) 1682 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1683 if (sc->sge_intrtimer != 0) 1684 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1685 #endif 1686 1687 /* 1688 * Clear and enable interrupts. 1689 */ 1690 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1691 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1692 1693 /* Enable receiver and transmitter. */ 1694 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1695 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1696 1697 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1698 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1699 1700 sc->sge_flags &= ~SGE_FLAG_LINK; 1701 mii_mediachg(mii); 1702 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1703 } 1704 1705 /* 1706 * Set media options. 1707 */ 1708 static int 1709 sge_ifmedia_upd(if_t ifp) 1710 { 1711 struct sge_softc *sc; 1712 struct mii_data *mii; 1713 struct mii_softc *miisc; 1714 int error; 1715 1716 sc = if_getsoftc(ifp); 1717 SGE_LOCK(sc); 1718 mii = device_get_softc(sc->sge_miibus); 1719 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1720 PHY_RESET(miisc); 1721 error = mii_mediachg(mii); 1722 SGE_UNLOCK(sc); 1723 1724 return (error); 1725 } 1726 1727 /* 1728 * Report current media status. 1729 */ 1730 static void 1731 sge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 1732 { 1733 struct sge_softc *sc; 1734 struct mii_data *mii; 1735 1736 sc = if_getsoftc(ifp); 1737 SGE_LOCK(sc); 1738 mii = device_get_softc(sc->sge_miibus); 1739 if ((if_getflags(ifp) & IFF_UP) == 0) { 1740 SGE_UNLOCK(sc); 1741 return; 1742 } 1743 mii_pollstat(mii); 1744 ifmr->ifm_active = mii->mii_media_active; 1745 ifmr->ifm_status = mii->mii_media_status; 1746 SGE_UNLOCK(sc); 1747 } 1748 1749 static int 1750 sge_ioctl(if_t ifp, u_long command, caddr_t data) 1751 { 1752 struct sge_softc *sc; 1753 struct ifreq *ifr; 1754 struct mii_data *mii; 1755 int error = 0, mask, reinit; 1756 1757 sc = if_getsoftc(ifp); 1758 ifr = (struct ifreq *)data; 1759 1760 switch(command) { 1761 case SIOCSIFFLAGS: 1762 SGE_LOCK(sc); 1763 if ((if_getflags(ifp) & IFF_UP) != 0) { 1764 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 1765 ((if_getflags(ifp) ^ sc->sge_if_flags) & 1766 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1767 sge_rxfilter(sc); 1768 else 1769 sge_init_locked(sc); 1770 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1771 sge_stop(sc); 1772 sc->sge_if_flags = if_getflags(ifp); 1773 SGE_UNLOCK(sc); 1774 break; 1775 case SIOCSIFCAP: 1776 SGE_LOCK(sc); 1777 reinit = 0; 1778 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1779 if ((mask & IFCAP_TXCSUM) != 0 && 1780 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 1781 if_togglecapenable(ifp, IFCAP_TXCSUM); 1782 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1783 if_sethwassistbits(ifp, SGE_CSUM_FEATURES, 0); 1784 else 1785 if_sethwassistbits(ifp, 0, SGE_CSUM_FEATURES); 1786 } 1787 if ((mask & IFCAP_RXCSUM) != 0 && 1788 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) 1789 if_togglecapenable(ifp, IFCAP_RXCSUM); 1790 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1791 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0) 1792 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); 1793 if ((mask & IFCAP_TSO4) != 0 && 1794 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { 1795 if_togglecapenable(ifp, IFCAP_TSO4); 1796 if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) 1797 if_sethwassistbits(ifp, CSUM_TSO, 0); 1798 else 1799 if_sethwassistbits(ifp, 0, CSUM_TSO); 1800 } 1801 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1802 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) 1803 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 1804 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1805 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 1806 /* 1807 * Due to unknown reason, toggling VLAN hardware 1808 * tagging require interface reinitialization. 1809 */ 1810 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 1811 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 1812 if_setcapenablebit(ifp, 0, 1813 IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1814 reinit = 1; 1815 } 1816 if (reinit > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1817 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1818 sge_init_locked(sc); 1819 } 1820 SGE_UNLOCK(sc); 1821 VLAN_CAPABILITIES(ifp); 1822 break; 1823 case SIOCADDMULTI: 1824 case SIOCDELMULTI: 1825 SGE_LOCK(sc); 1826 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1827 sge_rxfilter(sc); 1828 SGE_UNLOCK(sc); 1829 break; 1830 case SIOCGIFMEDIA: 1831 case SIOCSIFMEDIA: 1832 mii = device_get_softc(sc->sge_miibus); 1833 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1834 break; 1835 default: 1836 error = ether_ioctl(ifp, command, data); 1837 break; 1838 } 1839 1840 return (error); 1841 } 1842 1843 static void 1844 sge_watchdog(struct sge_softc *sc) 1845 { 1846 if_t ifp; 1847 1848 SGE_LOCK_ASSERT(sc); 1849 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1850 return; 1851 1852 ifp = sc->sge_ifp; 1853 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1854 if (1 || bootverbose) 1855 device_printf(sc->sge_dev, 1856 "watchdog timeout (lost link)\n"); 1857 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1858 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1859 sge_init_locked(sc); 1860 return; 1861 } 1862 device_printf(sc->sge_dev, "watchdog timeout\n"); 1863 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1864 1865 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1866 sge_init_locked(sc); 1867 if (!if_sendq_empty(sc->sge_ifp)) 1868 sge_start_locked(ifp); 1869 } 1870 1871 /* 1872 * Stop the adapter and free any mbufs allocated to the 1873 * RX and TX lists. 1874 */ 1875 static void 1876 sge_stop(struct sge_softc *sc) 1877 { 1878 if_t ifp; 1879 1880 ifp = sc->sge_ifp; 1881 1882 SGE_LOCK_ASSERT(sc); 1883 1884 sc->sge_timer = 0; 1885 callout_stop(&sc->sge_stat_ch); 1886 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 1887 1888 CSR_WRITE_4(sc, IntrMask, 0); 1889 CSR_READ_4(sc, IntrMask); 1890 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1891 /* Stop TX/RX MAC. */ 1892 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1893 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1894 /* XXX Can we assume active DMA cycles gone? */ 1895 DELAY(2000); 1896 CSR_WRITE_4(sc, IntrMask, 0); 1897 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1898 1899 sc->sge_flags &= ~SGE_FLAG_LINK; 1900 sge_list_rx_free(sc); 1901 sge_list_tx_free(sc); 1902 } 1903