1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 5 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 6 * Copyright (c) 1997, 1998, 1999 7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Bill Paul. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 27 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 28 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 35 * OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 /* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 49 * Linux and Solaris drivers. 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/bus.h> 55 #include <sys/endian.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/module.h> 61 #include <sys/mutex.h> 62 #include <sys/rman.h> 63 #include <sys/socket.h> 64 #include <sys/sockio.h> 65 66 #include <net/bpf.h> 67 #include <net/if.h> 68 #include <net/if_var.h> 69 #include <net/if_arp.h> 70 #include <net/ethernet.h> 71 #include <net/if_dl.h> 72 #include <net/if_media.h> 73 #include <net/if_types.h> 74 #include <net/if_vlan_var.h> 75 76 #include <netinet/in.h> 77 #include <netinet/in_systm.h> 78 #include <netinet/ip.h> 79 #include <netinet/tcp.h> 80 81 #include <machine/bus.h> 82 #include <machine/in_cksum.h> 83 84 #include <dev/mii/mii.h> 85 #include <dev/mii/miivar.h> 86 87 #include <dev/pci/pcireg.h> 88 #include <dev/pci/pcivar.h> 89 90 #include <dev/sge/if_sgereg.h> 91 92 MODULE_DEPEND(sge, pci, 1, 1, 1); 93 MODULE_DEPEND(sge, ether, 1, 1, 1); 94 MODULE_DEPEND(sge, miibus, 1, 1, 1); 95 96 /* "device miibus0" required. See GENERIC if you get errors here. */ 97 #include "miibus_if.h" 98 99 /* 100 * Various supported device vendors/types and their names. 101 */ 102 static struct sge_type sge_devs[] = { 103 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 104 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 105 { 0, 0, NULL } 106 }; 107 108 static int sge_probe(device_t); 109 static int sge_attach(device_t); 110 static int sge_detach(device_t); 111 static int sge_shutdown(device_t); 112 static int sge_suspend(device_t); 113 static int sge_resume(device_t); 114 115 static int sge_miibus_readreg(device_t, int, int); 116 static int sge_miibus_writereg(device_t, int, int, int); 117 static void sge_miibus_statchg(device_t); 118 119 static int sge_newbuf(struct sge_softc *, int); 120 static int sge_encap(struct sge_softc *, struct mbuf **); 121 static __inline void 122 sge_discard_rxbuf(struct sge_softc *, int); 123 static void sge_rxeof(struct sge_softc *); 124 static void sge_txeof(struct sge_softc *); 125 static void sge_intr(void *); 126 static void sge_tick(void *); 127 static void sge_start(if_t); 128 static void sge_start_locked(if_t); 129 static int sge_ioctl(if_t, u_long, caddr_t); 130 static void sge_init(void *); 131 static void sge_init_locked(struct sge_softc *); 132 static void sge_stop(struct sge_softc *); 133 static void sge_watchdog(struct sge_softc *); 134 static int sge_ifmedia_upd(if_t); 135 static void sge_ifmedia_sts(if_t, struct ifmediareq *); 136 137 static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 138 static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 139 static uint16_t sge_read_eeprom(struct sge_softc *, int); 140 141 static void sge_rxfilter(struct sge_softc *); 142 static void sge_setvlan(struct sge_softc *); 143 static void sge_reset(struct sge_softc *); 144 static int sge_list_rx_init(struct sge_softc *); 145 static int sge_list_rx_free(struct sge_softc *); 146 static int sge_list_tx_init(struct sge_softc *); 147 static int sge_list_tx_free(struct sge_softc *); 148 149 static int sge_dma_alloc(struct sge_softc *); 150 static void sge_dma_free(struct sge_softc *); 151 static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 152 153 static device_method_t sge_methods[] = { 154 /* Device interface */ 155 DEVMETHOD(device_probe, sge_probe), 156 DEVMETHOD(device_attach, sge_attach), 157 DEVMETHOD(device_detach, sge_detach), 158 DEVMETHOD(device_suspend, sge_suspend), 159 DEVMETHOD(device_resume, sge_resume), 160 DEVMETHOD(device_shutdown, sge_shutdown), 161 162 /* MII interface */ 163 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 164 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 165 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 166 167 DEVMETHOD_END 168 }; 169 170 static driver_t sge_driver = { 171 "sge", sge_methods, sizeof(struct sge_softc) 172 }; 173 174 DRIVER_MODULE(sge, pci, sge_driver, 0, 0); 175 DRIVER_MODULE(miibus, sge, miibus_driver, 0, 0); 176 177 /* 178 * Register space access macros. 179 */ 180 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 181 #define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 182 #define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 183 184 #define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 185 #define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 186 #define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 187 188 /* Define to show Tx/Rx error status. */ 189 #undef SGE_SHOW_ERRORS 190 191 #define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 192 193 static void 194 sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 195 { 196 bus_addr_t *p; 197 198 if (error != 0) 199 return; 200 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 201 p = arg; 202 *p = segs->ds_addr; 203 } 204 205 /* 206 * Read a sequence of words from the EEPROM. 207 */ 208 static uint16_t 209 sge_read_eeprom(struct sge_softc *sc, int offset) 210 { 211 uint32_t val; 212 int i; 213 214 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 215 CSR_WRITE_4(sc, ROMInterface, 216 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 217 DELAY(500); 218 for (i = 0; i < SGE_TIMEOUT; i++) { 219 val = CSR_READ_4(sc, ROMInterface); 220 if ((val & EI_REQ) == 0) 221 break; 222 DELAY(100); 223 } 224 if (i == SGE_TIMEOUT) { 225 device_printf(sc->sge_dev, 226 "EEPROM read timeout : 0x%08x\n", val); 227 return (0xffff); 228 } 229 230 return ((val & EI_DATA) >> EI_DATA_SHIFT); 231 } 232 233 static int 234 sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 235 { 236 uint16_t val; 237 int i; 238 239 val = sge_read_eeprom(sc, EEPROMSignature); 240 if (val == 0xffff || val == 0) { 241 device_printf(sc->sge_dev, 242 "invalid EEPROM signature : 0x%04x\n", val); 243 return (EINVAL); 244 } 245 246 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 247 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 248 dest[i + 0] = (uint8_t)val; 249 dest[i + 1] = (uint8_t)(val >> 8); 250 } 251 252 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 253 sc->sge_flags |= SGE_FLAG_RGMII; 254 return (0); 255 } 256 257 /* 258 * For SiS96x, APC CMOS RAM is used to store ethernet address. 259 * APC CMOS RAM is accessed through ISA bridge. 260 */ 261 static int 262 sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 263 { 264 #if defined(__amd64__) || defined(__i386__) 265 devclass_t pci; 266 device_t bus, dev = NULL; 267 device_t *kids; 268 struct apc_tbl { 269 uint16_t vid; 270 uint16_t did; 271 } *tp, apc_tbls[] = { 272 { SIS_VENDORID, 0x0965 }, 273 { SIS_VENDORID, 0x0966 }, 274 { SIS_VENDORID, 0x0968 } 275 }; 276 uint8_t reg; 277 int busnum, i, j, numkids; 278 279 pci = devclass_find("pci"); 280 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 281 bus = devclass_get_device(pci, busnum); 282 if (!bus) 283 continue; 284 if (device_get_children(bus, &kids, &numkids) != 0) 285 continue; 286 for (i = 0; i < numkids; i++) { 287 dev = kids[i]; 288 if (pci_get_class(dev) == PCIC_BRIDGE && 289 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 290 tp = apc_tbls; 291 for (j = 0; j < nitems(apc_tbls); j++) { 292 if (pci_get_vendor(dev) == tp->vid && 293 pci_get_device(dev) == tp->did) { 294 free(kids, M_TEMP); 295 goto apc_found; 296 } 297 tp++; 298 } 299 } 300 } 301 free(kids, M_TEMP); 302 } 303 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 304 return (EINVAL); 305 apc_found: 306 /* Enable port 0x78 and 0x79 to access APC registers. */ 307 reg = pci_read_config(dev, 0x48, 1); 308 pci_write_config(dev, 0x48, reg & ~0x02, 1); 309 DELAY(50); 310 pci_read_config(dev, 0x48, 1); 311 /* Read stored ethernet address. */ 312 for (i = 0; i < ETHER_ADDR_LEN; i++) { 313 outb(0x78, 0x09 + i); 314 dest[i] = inb(0x79); 315 } 316 outb(0x78, 0x12); 317 if ((inb(0x79) & 0x80) != 0) 318 sc->sge_flags |= SGE_FLAG_RGMII; 319 /* Restore access to APC registers. */ 320 pci_write_config(dev, 0x48, reg, 1); 321 322 return (0); 323 #else 324 return (EINVAL); 325 #endif 326 } 327 328 static int 329 sge_miibus_readreg(device_t dev, int phy, int reg) 330 { 331 struct sge_softc *sc; 332 uint32_t val; 333 int i; 334 335 sc = device_get_softc(dev); 336 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 337 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 338 DELAY(10); 339 for (i = 0; i < SGE_TIMEOUT; i++) { 340 val = CSR_READ_4(sc, GMIIControl); 341 if ((val & GMI_REQ) == 0) 342 break; 343 DELAY(10); 344 } 345 if (i == SGE_TIMEOUT) { 346 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 347 return (0); 348 } 349 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 350 } 351 352 static int 353 sge_miibus_writereg(device_t dev, int phy, int reg, int data) 354 { 355 struct sge_softc *sc; 356 uint32_t val; 357 int i; 358 359 sc = device_get_softc(dev); 360 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 361 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 362 GMI_OP_WR | GMI_REQ); 363 DELAY(10); 364 for (i = 0; i < SGE_TIMEOUT; i++) { 365 val = CSR_READ_4(sc, GMIIControl); 366 if ((val & GMI_REQ) == 0) 367 break; 368 DELAY(10); 369 } 370 if (i == SGE_TIMEOUT) 371 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 372 return (0); 373 } 374 375 static void 376 sge_miibus_statchg(device_t dev) 377 { 378 struct sge_softc *sc; 379 struct mii_data *mii; 380 if_t ifp; 381 uint32_t ctl, speed; 382 383 sc = device_get_softc(dev); 384 mii = device_get_softc(sc->sge_miibus); 385 ifp = sc->sge_ifp; 386 if (mii == NULL || ifp == NULL || 387 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 388 return; 389 speed = 0; 390 sc->sge_flags &= ~SGE_FLAG_LINK; 391 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 392 (IFM_ACTIVE | IFM_AVALID)) { 393 switch (IFM_SUBTYPE(mii->mii_media_active)) { 394 case IFM_10_T: 395 sc->sge_flags |= SGE_FLAG_LINK; 396 speed = SC_SPEED_10; 397 break; 398 case IFM_100_TX: 399 sc->sge_flags |= SGE_FLAG_LINK; 400 speed = SC_SPEED_100; 401 break; 402 case IFM_1000_T: 403 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 404 sc->sge_flags |= SGE_FLAG_LINK; 405 speed = SC_SPEED_1000; 406 } 407 break; 408 default: 409 break; 410 } 411 } 412 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 413 return; 414 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 415 ctl = CSR_READ_4(sc, StationControl); 416 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 417 if (speed == SC_SPEED_1000) { 418 ctl |= 0x07000000; 419 sc->sge_flags |= SGE_FLAG_SPEED_1000; 420 } else { 421 ctl |= 0x04000000; 422 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 423 } 424 #ifdef notyet 425 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 426 ctl |= 0x03000000; 427 #endif 428 ctl |= speed; 429 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 430 ctl |= SC_FDX; 431 sc->sge_flags |= SGE_FLAG_FDX; 432 } else 433 sc->sge_flags &= ~SGE_FLAG_FDX; 434 CSR_WRITE_4(sc, StationControl, ctl); 435 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 436 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 437 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 438 } 439 } 440 441 static u_int 442 sge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int count) 443 { 444 uint32_t crc, *hashes = arg; 445 446 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 447 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 448 449 return (1); 450 } 451 452 static void 453 sge_rxfilter(struct sge_softc *sc) 454 { 455 if_t ifp; 456 uint32_t hashes[2]; 457 uint16_t rxfilt; 458 459 SGE_LOCK_ASSERT(sc); 460 461 ifp = sc->sge_ifp; 462 rxfilt = CSR_READ_2(sc, RxMacControl); 463 rxfilt &= ~(AcceptBroadcast | AcceptAllPhys | AcceptMulticast); 464 rxfilt |= AcceptMyPhys; 465 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 466 rxfilt |= AcceptBroadcast; 467 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 468 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 469 rxfilt |= AcceptAllPhys; 470 rxfilt |= AcceptMulticast; 471 hashes[0] = 0xFFFFFFFF; 472 hashes[1] = 0xFFFFFFFF; 473 } else { 474 rxfilt |= AcceptMulticast; 475 hashes[0] = hashes[1] = 0; 476 /* Now program new ones. */ 477 if_foreach_llmaddr(ifp, sge_hash_maddr, hashes); 478 } 479 CSR_WRITE_2(sc, RxMacControl, rxfilt); 480 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 481 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 482 } 483 484 static void 485 sge_setvlan(struct sge_softc *sc) 486 { 487 if_t ifp; 488 uint16_t rxfilt; 489 490 SGE_LOCK_ASSERT(sc); 491 492 ifp = sc->sge_ifp; 493 if ((if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 494 return; 495 rxfilt = CSR_READ_2(sc, RxMacControl); 496 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 497 rxfilt |= RXMAC_STRIP_VLAN; 498 else 499 rxfilt &= ~RXMAC_STRIP_VLAN; 500 CSR_WRITE_2(sc, RxMacControl, rxfilt); 501 } 502 503 static void 504 sge_reset(struct sge_softc *sc) 505 { 506 507 CSR_WRITE_4(sc, IntrMask, 0); 508 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 509 510 /* Soft reset. */ 511 CSR_WRITE_4(sc, IntrControl, 0x8000); 512 CSR_READ_4(sc, IntrControl); 513 DELAY(100); 514 CSR_WRITE_4(sc, IntrControl, 0); 515 /* Stop MAC. */ 516 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 517 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 518 519 CSR_WRITE_4(sc, IntrMask, 0); 520 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 521 522 CSR_WRITE_4(sc, GMIIControl, 0); 523 } 524 525 /* 526 * Probe for an SiS chip. Check the PCI vendor and device 527 * IDs against our list and return a device name if we find a match. 528 */ 529 static int 530 sge_probe(device_t dev) 531 { 532 struct sge_type *t; 533 534 t = sge_devs; 535 while (t->sge_name != NULL) { 536 if ((pci_get_vendor(dev) == t->sge_vid) && 537 (pci_get_device(dev) == t->sge_did)) { 538 device_set_desc(dev, t->sge_name); 539 return (BUS_PROBE_DEFAULT); 540 } 541 t++; 542 } 543 544 return (ENXIO); 545 } 546 547 /* 548 * Attach the interface. Allocate softc structures, do ifmedia 549 * setup and ethernet/BPF attach. 550 */ 551 static int 552 sge_attach(device_t dev) 553 { 554 struct sge_softc *sc; 555 if_t ifp; 556 uint8_t eaddr[ETHER_ADDR_LEN]; 557 int error = 0, rid; 558 559 sc = device_get_softc(dev); 560 sc->sge_dev = dev; 561 562 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 563 MTX_DEF); 564 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 565 566 /* 567 * Map control/status registers. 568 */ 569 pci_enable_busmaster(dev); 570 571 /* Allocate resources. */ 572 sc->sge_res_id = PCIR_BAR(0); 573 sc->sge_res_type = SYS_RES_MEMORY; 574 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 575 &sc->sge_res_id, RF_ACTIVE); 576 if (sc->sge_res == NULL) { 577 device_printf(dev, "couldn't allocate resource\n"); 578 error = ENXIO; 579 goto fail; 580 } 581 582 rid = 0; 583 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 584 RF_SHAREABLE | RF_ACTIVE); 585 if (sc->sge_irq == NULL) { 586 device_printf(dev, "couldn't allocate IRQ resources\n"); 587 error = ENXIO; 588 goto fail; 589 } 590 sc->sge_rev = pci_get_revid(dev); 591 if (pci_get_device(dev) == SIS_DEVICEID_190) 592 sc->sge_flags |= SGE_FLAG_FASTETHER | SGE_FLAG_SIS190; 593 /* Reset the adapter. */ 594 sge_reset(sc); 595 596 /* Get MAC address from the EEPROM. */ 597 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 598 sge_get_mac_addr_apc(sc, eaddr); 599 else 600 sge_get_mac_addr_eeprom(sc, eaddr); 601 602 if ((error = sge_dma_alloc(sc)) != 0) 603 goto fail; 604 605 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 606 if_setsoftc(ifp, sc); 607 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 608 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 609 if_setioctlfn(ifp, sge_ioctl); 610 if_setstartfn(ifp, sge_start); 611 if_setinitfn(ifp, sge_init); 612 if_setsendqlen(ifp, SGE_TX_RING_CNT - 1); 613 if_setsendqready(ifp); 614 if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM | IFCAP_TSO4); 615 if_sethwassist(ifp, SGE_CSUM_FEATURES | CSUM_TSO); 616 if_setcapenable(ifp, if_getcapabilities(ifp)); 617 /* 618 * Do MII setup. 619 */ 620 error = mii_attach(dev, &sc->sge_miibus, ifp, sge_ifmedia_upd, 621 sge_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 622 if (error != 0) { 623 device_printf(dev, "attaching PHYs failed\n"); 624 goto fail; 625 } 626 627 /* 628 * Call MI attach routine. 629 */ 630 ether_ifattach(ifp, eaddr); 631 632 /* VLAN setup. */ 633 if_setcapabilities(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | 634 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU); 635 if_setcapenable(ifp, if_getcapabilities(ifp)); 636 /* Tell the upper layer(s) we support long frames. */ 637 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 638 639 /* Hook interrupt last to avoid having to lock softc */ 640 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 641 NULL, sge_intr, sc, &sc->sge_intrhand); 642 if (error) { 643 device_printf(dev, "couldn't set up irq\n"); 644 ether_ifdetach(ifp); 645 goto fail; 646 } 647 648 fail: 649 if (error) 650 sge_detach(dev); 651 652 return (error); 653 } 654 655 /* 656 * Shutdown hardware and free up resources. This can be called any 657 * time after the mutex has been initialized. It is called in both 658 * the error case in attach and the normal detach case so it needs 659 * to be careful about only freeing resources that have actually been 660 * allocated. 661 */ 662 static int 663 sge_detach(device_t dev) 664 { 665 struct sge_softc *sc; 666 if_t ifp; 667 668 sc = device_get_softc(dev); 669 ifp = sc->sge_ifp; 670 /* These should only be active if attach succeeded. */ 671 if (device_is_attached(dev)) { 672 ether_ifdetach(ifp); 673 SGE_LOCK(sc); 674 sge_stop(sc); 675 SGE_UNLOCK(sc); 676 callout_drain(&sc->sge_stat_ch); 677 } 678 bus_generic_detach(dev); 679 680 if (sc->sge_intrhand) 681 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 682 if (sc->sge_irq) 683 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 684 if (sc->sge_res) 685 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 686 sc->sge_res); 687 if (ifp) 688 if_free(ifp); 689 sge_dma_free(sc); 690 mtx_destroy(&sc->sge_mtx); 691 692 return (0); 693 } 694 695 /* 696 * Stop all chip I/O so that the kernel's probe routines don't 697 * get confused by errant DMAs when rebooting. 698 */ 699 static int 700 sge_shutdown(device_t dev) 701 { 702 struct sge_softc *sc; 703 704 sc = device_get_softc(dev); 705 SGE_LOCK(sc); 706 sge_stop(sc); 707 SGE_UNLOCK(sc); 708 return (0); 709 } 710 711 static int 712 sge_suspend(device_t dev) 713 { 714 struct sge_softc *sc; 715 if_t ifp; 716 717 sc = device_get_softc(dev); 718 SGE_LOCK(sc); 719 ifp = sc->sge_ifp; 720 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 721 sge_stop(sc); 722 SGE_UNLOCK(sc); 723 return (0); 724 } 725 726 static int 727 sge_resume(device_t dev) 728 { 729 struct sge_softc *sc; 730 if_t ifp; 731 732 sc = device_get_softc(dev); 733 SGE_LOCK(sc); 734 ifp = sc->sge_ifp; 735 if ((if_getflags(ifp) & IFF_UP) != 0) 736 sge_init_locked(sc); 737 SGE_UNLOCK(sc); 738 return (0); 739 } 740 741 static int 742 sge_dma_alloc(struct sge_softc *sc) 743 { 744 struct sge_chain_data *cd; 745 struct sge_list_data *ld; 746 struct sge_rxdesc *rxd; 747 struct sge_txdesc *txd; 748 int error, i; 749 750 cd = &sc->sge_cdata; 751 ld = &sc->sge_ldata; 752 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 753 1, 0, /* alignment, boundary */ 754 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 755 BUS_SPACE_MAXADDR, /* highaddr */ 756 NULL, NULL, /* filter, filterarg */ 757 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 758 1, /* nsegments */ 759 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 760 0, /* flags */ 761 NULL, /* lockfunc */ 762 NULL, /* lockarg */ 763 &cd->sge_tag); 764 if (error != 0) { 765 device_printf(sc->sge_dev, 766 "could not create parent DMA tag.\n"); 767 goto fail; 768 } 769 770 /* RX descriptor ring */ 771 error = bus_dma_tag_create(cd->sge_tag, 772 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 773 BUS_SPACE_MAXADDR, /* lowaddr */ 774 BUS_SPACE_MAXADDR, /* highaddr */ 775 NULL, NULL, /* filter, filterarg */ 776 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 777 SGE_RX_RING_SZ, /* maxsegsize */ 778 0, /* flags */ 779 NULL, /* lockfunc */ 780 NULL, /* lockarg */ 781 &cd->sge_rx_tag); 782 if (error != 0) { 783 device_printf(sc->sge_dev, 784 "could not create Rx ring DMA tag.\n"); 785 goto fail; 786 } 787 /* Allocate DMA'able memory and load DMA map for RX ring. */ 788 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 789 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 790 &cd->sge_rx_dmamap); 791 if (error != 0) { 792 device_printf(sc->sge_dev, 793 "could not allocate DMA'able memory for Rx ring.\n"); 794 goto fail; 795 } 796 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 797 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 798 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 799 if (error != 0) { 800 device_printf(sc->sge_dev, 801 "could not load DMA'able memory for Rx ring.\n"); 802 } 803 804 /* TX descriptor ring */ 805 error = bus_dma_tag_create(cd->sge_tag, 806 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 807 BUS_SPACE_MAXADDR, /* lowaddr */ 808 BUS_SPACE_MAXADDR, /* highaddr */ 809 NULL, NULL, /* filter, filterarg */ 810 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 811 SGE_TX_RING_SZ, /* maxsegsize */ 812 0, /* flags */ 813 NULL, /* lockfunc */ 814 NULL, /* lockarg */ 815 &cd->sge_tx_tag); 816 if (error != 0) { 817 device_printf(sc->sge_dev, 818 "could not create Rx ring DMA tag.\n"); 819 goto fail; 820 } 821 /* Allocate DMA'able memory and load DMA map for TX ring. */ 822 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 823 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 824 &cd->sge_tx_dmamap); 825 if (error != 0) { 826 device_printf(sc->sge_dev, 827 "could not allocate DMA'able memory for Tx ring.\n"); 828 goto fail; 829 } 830 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 831 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 832 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 833 if (error != 0) { 834 device_printf(sc->sge_dev, 835 "could not load DMA'able memory for Rx ring.\n"); 836 goto fail; 837 } 838 839 /* Create DMA tag for Tx buffers. */ 840 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 841 BUS_SPACE_MAXADDR, NULL, NULL, SGE_TSO_MAXSIZE, SGE_MAXTXSEGS, 842 SGE_TSO_MAXSEGSIZE, 0, NULL, NULL, &cd->sge_txmbuf_tag); 843 if (error != 0) { 844 device_printf(sc->sge_dev, 845 "could not create Tx mbuf DMA tag.\n"); 846 goto fail; 847 } 848 849 /* Create DMA tag for Rx buffers. */ 850 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 851 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 852 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 853 if (error != 0) { 854 device_printf(sc->sge_dev, 855 "could not create Rx mbuf DMA tag.\n"); 856 goto fail; 857 } 858 859 /* Create DMA maps for Tx buffers. */ 860 for (i = 0; i < SGE_TX_RING_CNT; i++) { 861 txd = &cd->sge_txdesc[i]; 862 txd->tx_m = NULL; 863 txd->tx_dmamap = NULL; 864 txd->tx_ndesc = 0; 865 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 866 &txd->tx_dmamap); 867 if (error != 0) { 868 device_printf(sc->sge_dev, 869 "could not create Tx DMA map.\n"); 870 goto fail; 871 } 872 } 873 /* Create spare DMA map for Rx buffer. */ 874 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 875 if (error != 0) { 876 device_printf(sc->sge_dev, 877 "could not create spare Rx DMA map.\n"); 878 goto fail; 879 } 880 /* Create DMA maps for Rx buffers. */ 881 for (i = 0; i < SGE_RX_RING_CNT; i++) { 882 rxd = &cd->sge_rxdesc[i]; 883 rxd->rx_m = NULL; 884 rxd->rx_dmamap = NULL; 885 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 886 &rxd->rx_dmamap); 887 if (error) { 888 device_printf(sc->sge_dev, 889 "could not create Rx DMA map.\n"); 890 goto fail; 891 } 892 } 893 fail: 894 return (error); 895 } 896 897 static void 898 sge_dma_free(struct sge_softc *sc) 899 { 900 struct sge_chain_data *cd; 901 struct sge_list_data *ld; 902 struct sge_rxdesc *rxd; 903 struct sge_txdesc *txd; 904 int i; 905 906 cd = &sc->sge_cdata; 907 ld = &sc->sge_ldata; 908 /* Rx ring. */ 909 if (cd->sge_rx_tag != NULL) { 910 if (ld->sge_rx_paddr != 0) 911 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 912 if (ld->sge_rx_ring != NULL) 913 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 914 cd->sge_rx_dmamap); 915 ld->sge_rx_ring = NULL; 916 ld->sge_rx_paddr = 0; 917 bus_dma_tag_destroy(cd->sge_rx_tag); 918 cd->sge_rx_tag = NULL; 919 } 920 /* Tx ring. */ 921 if (cd->sge_tx_tag != NULL) { 922 if (ld->sge_tx_paddr != 0) 923 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 924 if (ld->sge_tx_ring != NULL) 925 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 926 cd->sge_tx_dmamap); 927 ld->sge_tx_ring = NULL; 928 ld->sge_tx_paddr = 0; 929 bus_dma_tag_destroy(cd->sge_tx_tag); 930 cd->sge_tx_tag = NULL; 931 } 932 /* Rx buffers. */ 933 if (cd->sge_rxmbuf_tag != NULL) { 934 for (i = 0; i < SGE_RX_RING_CNT; i++) { 935 rxd = &cd->sge_rxdesc[i]; 936 if (rxd->rx_dmamap != NULL) { 937 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 938 rxd->rx_dmamap); 939 rxd->rx_dmamap = NULL; 940 } 941 } 942 if (cd->sge_rx_spare_map != NULL) { 943 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 944 cd->sge_rx_spare_map); 945 cd->sge_rx_spare_map = NULL; 946 } 947 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 948 cd->sge_rxmbuf_tag = NULL; 949 } 950 /* Tx buffers. */ 951 if (cd->sge_txmbuf_tag != NULL) { 952 for (i = 0; i < SGE_TX_RING_CNT; i++) { 953 txd = &cd->sge_txdesc[i]; 954 if (txd->tx_dmamap != NULL) { 955 bus_dmamap_destroy(cd->sge_txmbuf_tag, 956 txd->tx_dmamap); 957 txd->tx_dmamap = NULL; 958 } 959 } 960 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 961 cd->sge_txmbuf_tag = NULL; 962 } 963 if (cd->sge_tag != NULL) 964 bus_dma_tag_destroy(cd->sge_tag); 965 cd->sge_tag = NULL; 966 } 967 968 /* 969 * Initialize the TX descriptors. 970 */ 971 static int 972 sge_list_tx_init(struct sge_softc *sc) 973 { 974 struct sge_list_data *ld; 975 struct sge_chain_data *cd; 976 977 SGE_LOCK_ASSERT(sc); 978 ld = &sc->sge_ldata; 979 cd = &sc->sge_cdata; 980 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 981 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 982 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 983 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 984 cd->sge_tx_prod = 0; 985 cd->sge_tx_cons = 0; 986 cd->sge_tx_cnt = 0; 987 return (0); 988 } 989 990 static int 991 sge_list_tx_free(struct sge_softc *sc) 992 { 993 struct sge_chain_data *cd; 994 struct sge_txdesc *txd; 995 int i; 996 997 SGE_LOCK_ASSERT(sc); 998 cd = &sc->sge_cdata; 999 for (i = 0; i < SGE_TX_RING_CNT; i++) { 1000 txd = &cd->sge_txdesc[i]; 1001 if (txd->tx_m != NULL) { 1002 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1003 BUS_DMASYNC_POSTWRITE); 1004 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1005 m_freem(txd->tx_m); 1006 txd->tx_m = NULL; 1007 txd->tx_ndesc = 0; 1008 } 1009 } 1010 1011 return (0); 1012 } 1013 1014 /* 1015 * Initialize the RX descriptors and allocate mbufs for them. Note that 1016 * we arrange the descriptors in a closed ring, so that the last descriptor 1017 * has RING_END flag set. 1018 */ 1019 static int 1020 sge_list_rx_init(struct sge_softc *sc) 1021 { 1022 struct sge_chain_data *cd; 1023 int i; 1024 1025 SGE_LOCK_ASSERT(sc); 1026 cd = &sc->sge_cdata; 1027 cd->sge_rx_cons = 0; 1028 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1029 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1030 if (sge_newbuf(sc, i) != 0) 1031 return (ENOBUFS); 1032 } 1033 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1034 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1035 return (0); 1036 } 1037 1038 static int 1039 sge_list_rx_free(struct sge_softc *sc) 1040 { 1041 struct sge_chain_data *cd; 1042 struct sge_rxdesc *rxd; 1043 int i; 1044 1045 SGE_LOCK_ASSERT(sc); 1046 cd = &sc->sge_cdata; 1047 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1048 rxd = &cd->sge_rxdesc[i]; 1049 if (rxd->rx_m != NULL) { 1050 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1051 BUS_DMASYNC_POSTREAD); 1052 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1053 rxd->rx_dmamap); 1054 m_freem(rxd->rx_m); 1055 rxd->rx_m = NULL; 1056 } 1057 } 1058 return (0); 1059 } 1060 1061 /* 1062 * Initialize an RX descriptor and attach an MBUF cluster. 1063 */ 1064 static int 1065 sge_newbuf(struct sge_softc *sc, int prod) 1066 { 1067 struct mbuf *m; 1068 struct sge_desc *desc; 1069 struct sge_chain_data *cd; 1070 struct sge_rxdesc *rxd; 1071 bus_dma_segment_t segs[1]; 1072 bus_dmamap_t map; 1073 int error, nsegs; 1074 1075 SGE_LOCK_ASSERT(sc); 1076 1077 cd = &sc->sge_cdata; 1078 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1079 if (m == NULL) 1080 return (ENOBUFS); 1081 m->m_len = m->m_pkthdr.len = MCLBYTES; 1082 m_adj(m, SGE_RX_BUF_ALIGN); 1083 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1084 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1085 if (error != 0) { 1086 m_freem(m); 1087 return (error); 1088 } 1089 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1090 rxd = &cd->sge_rxdesc[prod]; 1091 if (rxd->rx_m != NULL) { 1092 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1093 BUS_DMASYNC_POSTREAD); 1094 bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap); 1095 } 1096 map = rxd->rx_dmamap; 1097 rxd->rx_dmamap = cd->sge_rx_spare_map; 1098 cd->sge_rx_spare_map = map; 1099 bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap, 1100 BUS_DMASYNC_PREREAD); 1101 rxd->rx_m = m; 1102 1103 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1104 desc->sge_sts_size = 0; 1105 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1106 desc->sge_flags = htole32(segs[0].ds_len); 1107 if (prod == SGE_RX_RING_CNT - 1) 1108 desc->sge_flags |= htole32(RING_END); 1109 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1110 return (0); 1111 } 1112 1113 static __inline void 1114 sge_discard_rxbuf(struct sge_softc *sc, int index) 1115 { 1116 struct sge_desc *desc; 1117 1118 desc = &sc->sge_ldata.sge_rx_ring[index]; 1119 desc->sge_sts_size = 0; 1120 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1121 if (index == SGE_RX_RING_CNT - 1) 1122 desc->sge_flags |= htole32(RING_END); 1123 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR); 1124 } 1125 1126 /* 1127 * A frame has been uploaded: pass the resulting mbuf chain up to 1128 * the higher level protocols. 1129 */ 1130 static void 1131 sge_rxeof(struct sge_softc *sc) 1132 { 1133 if_t ifp; 1134 struct mbuf *m; 1135 struct sge_chain_data *cd; 1136 struct sge_desc *cur_rx; 1137 uint32_t rxinfo, rxstat; 1138 int cons, prog; 1139 1140 SGE_LOCK_ASSERT(sc); 1141 1142 ifp = sc->sge_ifp; 1143 cd = &sc->sge_cdata; 1144 1145 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1146 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1147 cons = cd->sge_rx_cons; 1148 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1149 SGE_INC(cons, SGE_RX_RING_CNT)) { 1150 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1151 break; 1152 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1153 rxinfo = le32toh(cur_rx->sge_cmdsts); 1154 if ((rxinfo & RDC_OWN) != 0) 1155 break; 1156 rxstat = le32toh(cur_rx->sge_sts_size); 1157 if ((rxstat & RDS_CRCOK) == 0 || SGE_RX_ERROR(rxstat) != 0 || 1158 SGE_RX_NSEGS(rxstat) != 1) { 1159 /* XXX We don't support multi-segment frames yet. */ 1160 #ifdef SGE_SHOW_ERRORS 1161 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1162 RX_ERR_BITS); 1163 #endif 1164 sge_discard_rxbuf(sc, cons); 1165 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1166 continue; 1167 } 1168 m = cd->sge_rxdesc[cons].rx_m; 1169 if (sge_newbuf(sc, cons) != 0) { 1170 sge_discard_rxbuf(sc, cons); 1171 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1172 continue; 1173 } 1174 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 1175 if ((rxinfo & RDC_IP_CSUM) != 0 && 1176 (rxinfo & RDC_IP_CSUM_OK) != 0) 1177 m->m_pkthdr.csum_flags |= 1178 CSUM_IP_CHECKED | CSUM_IP_VALID; 1179 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1180 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1181 ((rxinfo & RDC_UDP_CSUM) != 0 && 1182 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1183 m->m_pkthdr.csum_flags |= 1184 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1185 m->m_pkthdr.csum_data = 0xffff; 1186 } 1187 } 1188 /* Check for VLAN tagged frame. */ 1189 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 && 1190 (rxstat & RDS_VLAN) != 0) { 1191 m->m_pkthdr.ether_vtag = rxinfo & RDC_VLAN_MASK; 1192 m->m_flags |= M_VLANTAG; 1193 } 1194 /* 1195 * Account for 10bytes auto padding which is used 1196 * to align IP header on 32bit boundary. Also note, 1197 * CRC bytes is automatically removed by the 1198 * hardware. 1199 */ 1200 m->m_data += SGE_RX_PAD_BYTES; 1201 m->m_pkthdr.len = m->m_len = SGE_RX_BYTES(rxstat) - 1202 SGE_RX_PAD_BYTES; 1203 m->m_pkthdr.rcvif = ifp; 1204 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1205 SGE_UNLOCK(sc); 1206 if_input(ifp, m); 1207 SGE_LOCK(sc); 1208 } 1209 1210 if (prog > 0) { 1211 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1212 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1213 cd->sge_rx_cons = cons; 1214 } 1215 } 1216 1217 /* 1218 * A frame was downloaded to the chip. It's safe for us to clean up 1219 * the list buffers. 1220 */ 1221 static void 1222 sge_txeof(struct sge_softc *sc) 1223 { 1224 if_t ifp; 1225 struct sge_list_data *ld; 1226 struct sge_chain_data *cd; 1227 struct sge_txdesc *txd; 1228 uint32_t txstat; 1229 int cons, nsegs, prod; 1230 1231 SGE_LOCK_ASSERT(sc); 1232 1233 ifp = sc->sge_ifp; 1234 ld = &sc->sge_ldata; 1235 cd = &sc->sge_cdata; 1236 1237 if (cd->sge_tx_cnt == 0) 1238 return; 1239 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1240 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1241 cons = cd->sge_tx_cons; 1242 prod = cd->sge_tx_prod; 1243 for (; cons != prod;) { 1244 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1245 if ((txstat & TDC_OWN) != 0) 1246 break; 1247 /* 1248 * Only the first descriptor of multi-descriptor transmission 1249 * is updated by controller. Driver should skip entire 1250 * chained buffers for the transmitted frame. In other words 1251 * TDC_OWN bit is valid only at the first descriptor of a 1252 * multi-descriptor transmission. 1253 */ 1254 if (SGE_TX_ERROR(txstat) != 0) { 1255 #ifdef SGE_SHOW_ERRORS 1256 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1257 txstat, TX_ERR_BITS); 1258 #endif 1259 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1260 } else { 1261 #ifdef notyet 1262 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0xFFFF) - 1); 1263 #endif 1264 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1265 } 1266 txd = &cd->sge_txdesc[cons]; 1267 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 1268 ld->sge_tx_ring[cons].sge_cmdsts = 0; 1269 SGE_INC(cons, SGE_TX_RING_CNT); 1270 } 1271 /* Reclaim transmitted mbuf. */ 1272 KASSERT(txd->tx_m != NULL, 1273 ("%s: freeing NULL mbuf\n", __func__)); 1274 bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap, 1275 BUS_DMASYNC_POSTWRITE); 1276 bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap); 1277 m_freem(txd->tx_m); 1278 txd->tx_m = NULL; 1279 cd->sge_tx_cnt -= txd->tx_ndesc; 1280 KASSERT(cd->sge_tx_cnt >= 0, 1281 ("%s: Active Tx desc counter was garbled\n", __func__)); 1282 txd->tx_ndesc = 0; 1283 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1284 } 1285 cd->sge_tx_cons = cons; 1286 if (cd->sge_tx_cnt == 0) 1287 sc->sge_timer = 0; 1288 } 1289 1290 static void 1291 sge_tick(void *arg) 1292 { 1293 struct sge_softc *sc; 1294 struct mii_data *mii; 1295 if_t ifp; 1296 1297 sc = arg; 1298 SGE_LOCK_ASSERT(sc); 1299 1300 ifp = sc->sge_ifp; 1301 mii = device_get_softc(sc->sge_miibus); 1302 mii_tick(mii); 1303 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1304 sge_miibus_statchg(sc->sge_dev); 1305 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1306 !if_sendq_empty(ifp)) 1307 sge_start_locked(ifp); 1308 } 1309 /* 1310 * Reclaim transmitted frames here as we do not request 1311 * Tx completion interrupt for every queued frames to 1312 * reduce excessive interrupts. 1313 */ 1314 sge_txeof(sc); 1315 sge_watchdog(sc); 1316 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1317 } 1318 1319 static void 1320 sge_intr(void *arg) 1321 { 1322 struct sge_softc *sc; 1323 if_t ifp; 1324 uint32_t status; 1325 1326 sc = arg; 1327 SGE_LOCK(sc); 1328 ifp = sc->sge_ifp; 1329 1330 status = CSR_READ_4(sc, IntrStatus); 1331 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1332 /* Not ours. */ 1333 SGE_UNLOCK(sc); 1334 return; 1335 } 1336 /* Acknowledge interrupts. */ 1337 CSR_WRITE_4(sc, IntrStatus, status); 1338 /* Disable further interrupts. */ 1339 CSR_WRITE_4(sc, IntrMask, 0); 1340 /* 1341 * It seems the controller supports some kind of interrupt 1342 * moderation mechanism but we still don't know how to 1343 * enable that. To reduce number of generated interrupts 1344 * under load we check pending interrupts in a loop. This 1345 * will increase number of register access and is not correct 1346 * way to handle interrupt moderation but there seems to be 1347 * no other way at this time. 1348 */ 1349 for (;;) { 1350 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1351 break; 1352 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1353 sge_rxeof(sc); 1354 /* Wakeup Rx MAC. */ 1355 if ((status & INTR_RX_IDLE) != 0) 1356 CSR_WRITE_4(sc, RX_CTL, 1357 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1358 } 1359 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1360 sge_txeof(sc); 1361 status = CSR_READ_4(sc, IntrStatus); 1362 if ((status & SGE_INTRS) == 0) 1363 break; 1364 /* Acknowledge interrupts. */ 1365 CSR_WRITE_4(sc, IntrStatus, status); 1366 } 1367 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1368 /* Re-enable interrupts */ 1369 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1370 if (!if_sendq_empty(ifp)) 1371 sge_start_locked(ifp); 1372 } 1373 SGE_UNLOCK(sc); 1374 } 1375 1376 /* 1377 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1378 * pointers to the fragment pointers. 1379 */ 1380 static int 1381 sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1382 { 1383 struct mbuf *m; 1384 struct sge_desc *desc; 1385 struct sge_txdesc *txd; 1386 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1387 uint32_t cflags, mss; 1388 int error, i, nsegs, prod, si; 1389 1390 SGE_LOCK_ASSERT(sc); 1391 1392 si = prod = sc->sge_cdata.sge_tx_prod; 1393 txd = &sc->sge_cdata.sge_txdesc[prod]; 1394 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1395 struct ether_header *eh; 1396 struct ip *ip; 1397 struct tcphdr *tcp; 1398 uint32_t ip_off, poff; 1399 1400 if (M_WRITABLE(*m_head) == 0) { 1401 /* Get a writable copy. */ 1402 m = m_dup(*m_head, M_NOWAIT); 1403 m_freem(*m_head); 1404 if (m == NULL) { 1405 *m_head = NULL; 1406 return (ENOBUFS); 1407 } 1408 *m_head = m; 1409 } 1410 ip_off = sizeof(struct ether_header); 1411 m = m_pullup(*m_head, ip_off); 1412 if (m == NULL) { 1413 *m_head = NULL; 1414 return (ENOBUFS); 1415 } 1416 eh = mtod(m, struct ether_header *); 1417 /* Check the existence of VLAN tag. */ 1418 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1419 ip_off = sizeof(struct ether_vlan_header); 1420 m = m_pullup(m, ip_off); 1421 if (m == NULL) { 1422 *m_head = NULL; 1423 return (ENOBUFS); 1424 } 1425 } 1426 m = m_pullup(m, ip_off + sizeof(struct ip)); 1427 if (m == NULL) { 1428 *m_head = NULL; 1429 return (ENOBUFS); 1430 } 1431 ip = (struct ip *)(mtod(m, char *) + ip_off); 1432 poff = ip_off + (ip->ip_hl << 2); 1433 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1434 if (m == NULL) { 1435 *m_head = NULL; 1436 return (ENOBUFS); 1437 } 1438 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1439 m = m_pullup(m, poff + (tcp->th_off << 2)); 1440 if (m == NULL) { 1441 *m_head = NULL; 1442 return (ENOBUFS); 1443 } 1444 /* 1445 * Reset IP checksum and recompute TCP pseudo 1446 * checksum that NDIS specification requires. 1447 */ 1448 ip = (struct ip *)(mtod(m, char *) + ip_off); 1449 ip->ip_sum = 0; 1450 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1451 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1452 htons(IPPROTO_TCP)); 1453 *m_head = m; 1454 } 1455 1456 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1457 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1458 if (error == EFBIG) { 1459 m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS); 1460 if (m == NULL) { 1461 m_freem(*m_head); 1462 *m_head = NULL; 1463 return (ENOBUFS); 1464 } 1465 *m_head = m; 1466 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, 1467 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1468 if (error != 0) { 1469 m_freem(*m_head); 1470 *m_head = NULL; 1471 return (error); 1472 } 1473 } else if (error != 0) 1474 return (error); 1475 1476 KASSERT(nsegs != 0, ("zero segment returned")); 1477 /* Check descriptor overrun. */ 1478 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1479 bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap); 1480 return (ENOBUFS); 1481 } 1482 bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap, 1483 BUS_DMASYNC_PREWRITE); 1484 1485 m = *m_head; 1486 cflags = 0; 1487 mss = 0; 1488 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1489 cflags |= TDC_LS; 1490 mss = (uint32_t)m->m_pkthdr.tso_segsz; 1491 mss <<= 16; 1492 } else { 1493 if (m->m_pkthdr.csum_flags & CSUM_IP) 1494 cflags |= TDC_IP_CSUM; 1495 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1496 cflags |= TDC_TCP_CSUM; 1497 if (m->m_pkthdr.csum_flags & CSUM_UDP) 1498 cflags |= TDC_UDP_CSUM; 1499 } 1500 for (i = 0; i < nsegs; i++) { 1501 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1502 if (i == 0) { 1503 desc->sge_sts_size = htole32(m->m_pkthdr.len | mss); 1504 desc->sge_cmdsts = 0; 1505 } else { 1506 desc->sge_sts_size = 0; 1507 desc->sge_cmdsts = htole32(TDC_OWN); 1508 } 1509 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr)); 1510 desc->sge_flags = htole32(txsegs[i].ds_len); 1511 if (prod == SGE_TX_RING_CNT - 1) 1512 desc->sge_flags |= htole32(RING_END); 1513 sc->sge_cdata.sge_tx_cnt++; 1514 SGE_INC(prod, SGE_TX_RING_CNT); 1515 } 1516 /* Update producer index. */ 1517 sc->sge_cdata.sge_tx_prod = prod; 1518 1519 desc = &sc->sge_ldata.sge_tx_ring[si]; 1520 /* Configure VLAN. */ 1521 if((m->m_flags & M_VLANTAG) != 0) { 1522 cflags |= m->m_pkthdr.ether_vtag; 1523 desc->sge_sts_size |= htole32(TDS_INS_VLAN); 1524 } 1525 desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1526 #if 1 1527 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1528 desc->sge_cmdsts |= htole32(TDC_BST); 1529 #else 1530 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1531 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1532 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1533 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1534 } 1535 #endif 1536 /* Request interrupt and give ownership to controller. */ 1537 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1538 txd->tx_m = m; 1539 txd->tx_ndesc = nsegs; 1540 return (0); 1541 } 1542 1543 static void 1544 sge_start(if_t ifp) 1545 { 1546 struct sge_softc *sc; 1547 1548 sc = if_getsoftc(ifp); 1549 SGE_LOCK(sc); 1550 sge_start_locked(ifp); 1551 SGE_UNLOCK(sc); 1552 } 1553 1554 static void 1555 sge_start_locked(if_t ifp) 1556 { 1557 struct sge_softc *sc; 1558 struct mbuf *m_head; 1559 int queued = 0; 1560 1561 sc = if_getsoftc(ifp); 1562 SGE_LOCK_ASSERT(sc); 1563 1564 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1565 (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1566 IFF_DRV_RUNNING) 1567 return; 1568 1569 for (queued = 0; !if_sendq_empty(ifp); ) { 1570 if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT - 1571 SGE_MAXTXSEGS)) { 1572 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1573 break; 1574 } 1575 m_head = if_dequeue(ifp); 1576 if (m_head == NULL) 1577 break; 1578 if (sge_encap(sc, &m_head)) { 1579 if (m_head == NULL) 1580 break; 1581 if_sendq_prepend(ifp, m_head); 1582 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1583 break; 1584 } 1585 queued++; 1586 /* 1587 * If there's a BPF listener, bounce a copy of this frame 1588 * to him. 1589 */ 1590 BPF_MTAP(ifp, m_head); 1591 } 1592 1593 if (queued > 0) { 1594 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1595 sc->sge_cdata.sge_tx_dmamap, 1596 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1597 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1598 sc->sge_timer = 5; 1599 } 1600 } 1601 1602 static void 1603 sge_init(void *arg) 1604 { 1605 struct sge_softc *sc; 1606 1607 sc = arg; 1608 SGE_LOCK(sc); 1609 sge_init_locked(sc); 1610 SGE_UNLOCK(sc); 1611 } 1612 1613 static void 1614 sge_init_locked(struct sge_softc *sc) 1615 { 1616 if_t ifp; 1617 struct mii_data *mii; 1618 uint16_t rxfilt; 1619 int i; 1620 1621 SGE_LOCK_ASSERT(sc); 1622 ifp = sc->sge_ifp; 1623 mii = device_get_softc(sc->sge_miibus); 1624 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1625 return; 1626 /* 1627 * Cancel pending I/O and free all RX/TX buffers. 1628 */ 1629 sge_stop(sc); 1630 sge_reset(sc); 1631 1632 /* Init circular RX list. */ 1633 if (sge_list_rx_init(sc) == ENOBUFS) { 1634 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1635 sge_stop(sc); 1636 return; 1637 } 1638 /* Init TX descriptors. */ 1639 sge_list_tx_init(sc); 1640 /* 1641 * Load the address of the RX and TX lists. 1642 */ 1643 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1644 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1645 1646 CSR_WRITE_4(sc, TxMacControl, 0x60); 1647 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1648 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1649 /* Allow receiving VLAN frames. */ 1650 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1651 SGE_RX_PAD_BYTES); 1652 1653 for (i = 0; i < ETHER_ADDR_LEN; i++) 1654 CSR_WRITE_1(sc, RxMacAddr + i, if_getlladdr(ifp)[i]); 1655 /* Configure RX MAC. */ 1656 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1657 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1658 sge_rxfilter(sc); 1659 sge_setvlan(sc); 1660 1661 /* Initialize default speed/duplex information. */ 1662 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1663 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1664 sc->sge_flags |= SGE_FLAG_FDX; 1665 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1666 CSR_WRITE_4(sc, StationControl, 0x04008001); 1667 else 1668 CSR_WRITE_4(sc, StationControl, 0x04000001); 1669 /* 1670 * XXX Try to mitigate interrupts. 1671 */ 1672 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1673 #ifdef notyet 1674 if (sc->sge_intrcontrol != 0) 1675 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1676 if (sc->sge_intrtimer != 0) 1677 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1678 #endif 1679 1680 /* 1681 * Clear and enable interrupts. 1682 */ 1683 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1684 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1685 1686 /* Enable receiver and transmitter. */ 1687 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1688 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1689 1690 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1691 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1692 1693 sc->sge_flags &= ~SGE_FLAG_LINK; 1694 mii_mediachg(mii); 1695 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1696 } 1697 1698 /* 1699 * Set media options. 1700 */ 1701 static int 1702 sge_ifmedia_upd(if_t ifp) 1703 { 1704 struct sge_softc *sc; 1705 struct mii_data *mii; 1706 struct mii_softc *miisc; 1707 int error; 1708 1709 sc = if_getsoftc(ifp); 1710 SGE_LOCK(sc); 1711 mii = device_get_softc(sc->sge_miibus); 1712 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1713 PHY_RESET(miisc); 1714 error = mii_mediachg(mii); 1715 SGE_UNLOCK(sc); 1716 1717 return (error); 1718 } 1719 1720 /* 1721 * Report current media status. 1722 */ 1723 static void 1724 sge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 1725 { 1726 struct sge_softc *sc; 1727 struct mii_data *mii; 1728 1729 sc = if_getsoftc(ifp); 1730 SGE_LOCK(sc); 1731 mii = device_get_softc(sc->sge_miibus); 1732 if ((if_getflags(ifp) & IFF_UP) == 0) { 1733 SGE_UNLOCK(sc); 1734 return; 1735 } 1736 mii_pollstat(mii); 1737 ifmr->ifm_active = mii->mii_media_active; 1738 ifmr->ifm_status = mii->mii_media_status; 1739 SGE_UNLOCK(sc); 1740 } 1741 1742 static int 1743 sge_ioctl(if_t ifp, u_long command, caddr_t data) 1744 { 1745 struct sge_softc *sc; 1746 struct ifreq *ifr; 1747 struct mii_data *mii; 1748 int error = 0, mask, reinit; 1749 1750 sc = if_getsoftc(ifp); 1751 ifr = (struct ifreq *)data; 1752 1753 switch(command) { 1754 case SIOCSIFFLAGS: 1755 SGE_LOCK(sc); 1756 if ((if_getflags(ifp) & IFF_UP) != 0) { 1757 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 1758 ((if_getflags(ifp) ^ sc->sge_if_flags) & 1759 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1760 sge_rxfilter(sc); 1761 else 1762 sge_init_locked(sc); 1763 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1764 sge_stop(sc); 1765 sc->sge_if_flags = if_getflags(ifp); 1766 SGE_UNLOCK(sc); 1767 break; 1768 case SIOCSIFCAP: 1769 SGE_LOCK(sc); 1770 reinit = 0; 1771 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1772 if ((mask & IFCAP_TXCSUM) != 0 && 1773 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 1774 if_togglecapenable(ifp, IFCAP_TXCSUM); 1775 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1776 if_sethwassistbits(ifp, SGE_CSUM_FEATURES, 0); 1777 else 1778 if_sethwassistbits(ifp, 0, SGE_CSUM_FEATURES); 1779 } 1780 if ((mask & IFCAP_RXCSUM) != 0 && 1781 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) 1782 if_togglecapenable(ifp, IFCAP_RXCSUM); 1783 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1784 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0) 1785 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); 1786 if ((mask & IFCAP_TSO4) != 0 && 1787 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { 1788 if_togglecapenable(ifp, IFCAP_TSO4); 1789 if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) 1790 if_sethwassistbits(ifp, CSUM_TSO, 0); 1791 else 1792 if_sethwassistbits(ifp, 0, CSUM_TSO); 1793 } 1794 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1795 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) 1796 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 1797 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1798 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 1799 /* 1800 * Due to unknown reason, toggling VLAN hardware 1801 * tagging require interface reinitialization. 1802 */ 1803 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 1804 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 1805 if_setcapenablebit(ifp, 0, 1806 IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1807 reinit = 1; 1808 } 1809 if (reinit > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1810 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1811 sge_init_locked(sc); 1812 } 1813 SGE_UNLOCK(sc); 1814 VLAN_CAPABILITIES(ifp); 1815 break; 1816 case SIOCADDMULTI: 1817 case SIOCDELMULTI: 1818 SGE_LOCK(sc); 1819 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1820 sge_rxfilter(sc); 1821 SGE_UNLOCK(sc); 1822 break; 1823 case SIOCGIFMEDIA: 1824 case SIOCSIFMEDIA: 1825 mii = device_get_softc(sc->sge_miibus); 1826 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1827 break; 1828 default: 1829 error = ether_ioctl(ifp, command, data); 1830 break; 1831 } 1832 1833 return (error); 1834 } 1835 1836 static void 1837 sge_watchdog(struct sge_softc *sc) 1838 { 1839 if_t ifp; 1840 1841 SGE_LOCK_ASSERT(sc); 1842 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1843 return; 1844 1845 ifp = sc->sge_ifp; 1846 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1847 if (1 || bootverbose) 1848 device_printf(sc->sge_dev, 1849 "watchdog timeout (lost link)\n"); 1850 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1851 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1852 sge_init_locked(sc); 1853 return; 1854 } 1855 device_printf(sc->sge_dev, "watchdog timeout\n"); 1856 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1857 1858 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1859 sge_init_locked(sc); 1860 if (!if_sendq_empty(sc->sge_ifp)) 1861 sge_start_locked(ifp); 1862 } 1863 1864 /* 1865 * Stop the adapter and free any mbufs allocated to the 1866 * RX and TX lists. 1867 */ 1868 static void 1869 sge_stop(struct sge_softc *sc) 1870 { 1871 if_t ifp; 1872 1873 ifp = sc->sge_ifp; 1874 1875 SGE_LOCK_ASSERT(sc); 1876 1877 sc->sge_timer = 0; 1878 callout_stop(&sc->sge_stat_ch); 1879 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 1880 1881 CSR_WRITE_4(sc, IntrMask, 0); 1882 CSR_READ_4(sc, IntrMask); 1883 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1884 /* Stop TX/RX MAC. */ 1885 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1886 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1887 /* XXX Can we assume active DMA cycles gone? */ 1888 DELAY(2000); 1889 CSR_WRITE_4(sc, IntrMask, 0); 1890 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1891 1892 sc->sge_flags &= ~SGE_FLAG_LINK; 1893 sge_list_rx_free(sc); 1894 sge_list_tx_free(sc); 1895 } 1896