1 /*- 2 * Copyright (c) 2008-2010 Nikolay Denev <ndenev@gmail.com> 3 * Copyright (c) 2007-2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 25 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 26 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 33 * OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 /* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 49 * Linux and Solaris drivers. 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/bus.h> 55 #include <sys/endian.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/module.h> 61 #include <sys/mutex.h> 62 #include <sys/rman.h> 63 #include <sys/socket.h> 64 #include <sys/sockio.h> 65 66 #include <net/bpf.h> 67 #include <net/if.h> 68 #include <net/if_arp.h> 69 #include <net/ethernet.h> 70 #include <net/if_dl.h> 71 #include <net/if_media.h> 72 #include <net/if_types.h> 73 #include <net/if_vlan_var.h> 74 75 #include <machine/bus.h> 76 #include <machine/resource.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/pci/pcireg.h> 82 #include <dev/pci/pcivar.h> 83 84 #include <dev/sge/if_sgereg.h> 85 86 MODULE_DEPEND(sge, pci, 1, 1, 1); 87 MODULE_DEPEND(sge, ether, 1, 1, 1); 88 MODULE_DEPEND(sge, miibus, 1, 1, 1); 89 90 /* "device miibus0" required. See GENERIC if you get errors here. */ 91 #include "miibus_if.h" 92 93 /* 94 * Various supported device vendors/types and their names. 95 */ 96 static struct sge_type sge_devs[] = { 97 { SIS_VENDORID, SIS_DEVICEID_190, "SiS190 Fast Ethernet" }, 98 { SIS_VENDORID, SIS_DEVICEID_191, "SiS191 Fast/Gigabit Ethernet" }, 99 { 0, 0, NULL } 100 }; 101 102 static int sge_probe(device_t); 103 static int sge_attach(device_t); 104 static int sge_detach(device_t); 105 static int sge_shutdown(device_t); 106 static int sge_suspend(device_t); 107 static int sge_resume(device_t); 108 109 static int sge_miibus_readreg(device_t, int, int); 110 static int sge_miibus_writereg(device_t, int, int, int); 111 static void sge_miibus_statchg(device_t); 112 113 static int sge_newbuf(struct sge_softc *, int); 114 static int sge_encap(struct sge_softc *, struct mbuf **); 115 #ifndef __NO_STRICT_ALIGNMENT 116 static __inline void 117 sge_fixup_rx(struct mbuf *); 118 #endif 119 static __inline void 120 sge_discard_rxbuf(struct sge_softc *, int); 121 static void sge_rxeof(struct sge_softc *); 122 static void sge_txeof(struct sge_softc *); 123 static void sge_intr(void *); 124 static void sge_tick(void *); 125 static void sge_start(struct ifnet *); 126 static void sge_start_locked(struct ifnet *); 127 static int sge_ioctl(struct ifnet *, u_long, caddr_t); 128 static void sge_init(void *); 129 static void sge_init_locked(struct sge_softc *); 130 static void sge_stop(struct sge_softc *); 131 static void sge_watchdog(struct sge_softc *); 132 static int sge_ifmedia_upd(struct ifnet *); 133 static void sge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 134 135 static int sge_get_mac_addr_apc(struct sge_softc *, uint8_t *); 136 static int sge_get_mac_addr_eeprom(struct sge_softc *, uint8_t *); 137 static uint16_t sge_read_eeprom(struct sge_softc *, int); 138 139 static void sge_rxfilter(struct sge_softc *); 140 static void sge_reset(struct sge_softc *); 141 static int sge_list_rx_init(struct sge_softc *); 142 static int sge_list_rx_free(struct sge_softc *); 143 static int sge_list_tx_init(struct sge_softc *); 144 static int sge_list_tx_free(struct sge_softc *); 145 146 static int sge_dma_alloc(struct sge_softc *); 147 static void sge_dma_free(struct sge_softc *); 148 static void sge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 149 150 static device_method_t sge_methods[] = { 151 /* Device interface */ 152 DEVMETHOD(device_probe, sge_probe), 153 DEVMETHOD(device_attach, sge_attach), 154 DEVMETHOD(device_detach, sge_detach), 155 DEVMETHOD(device_suspend, sge_suspend), 156 DEVMETHOD(device_resume, sge_resume), 157 DEVMETHOD(device_shutdown, sge_shutdown), 158 159 /* Bus interface */ 160 DEVMETHOD(bus_print_child, bus_generic_print_child), 161 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 162 163 /* MII interface */ 164 DEVMETHOD(miibus_readreg, sge_miibus_readreg), 165 DEVMETHOD(miibus_writereg, sge_miibus_writereg), 166 DEVMETHOD(miibus_statchg, sge_miibus_statchg), 167 168 KOBJMETHOD_END 169 }; 170 171 static driver_t sge_driver = { 172 "sge", sge_methods, sizeof(struct sge_softc) 173 }; 174 175 static devclass_t sge_devclass; 176 177 DRIVER_MODULE(sge, pci, sge_driver, sge_devclass, 0, 0); 178 DRIVER_MODULE(miibus, sge, miibus_driver, miibus_devclass, 0, 0); 179 180 /* 181 * Register space access macros. 182 */ 183 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sge_res, reg, val) 184 #define CSR_WRITE_2(sc, reg, val) bus_write_2(sc->sge_res, reg, val) 185 #define CSR_WRITE_1(cs, reg, val) bus_write_1(sc->sge_res, reg, val) 186 187 #define CSR_READ_4(sc, reg) bus_read_4(sc->sge_res, reg) 188 #define CSR_READ_2(sc, reg) bus_read_2(sc->sge_res, reg) 189 #define CSR_READ_1(sc, reg) bus_read_1(sc->sge_res, reg) 190 191 /* Define to show Tx/Rx error status. */ 192 #undef SGE_SHOW_ERRORS 193 194 #define SGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 195 196 static void 197 sge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 198 { 199 bus_addr_t *p; 200 201 if (error != 0) 202 return; 203 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 204 p = arg; 205 *p = segs->ds_addr; 206 } 207 208 /* 209 * Read a sequence of words from the EEPROM. 210 */ 211 static uint16_t 212 sge_read_eeprom(struct sge_softc *sc, int offset) 213 { 214 uint32_t val; 215 int i; 216 217 KASSERT(offset <= EI_OFFSET, ("EEPROM offset too big")); 218 CSR_WRITE_4(sc, ROMInterface, 219 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 220 DELAY(500); 221 for (i = 0; i < SGE_TIMEOUT; i++) { 222 val = CSR_READ_4(sc, ROMInterface); 223 if ((val & EI_REQ) == 0) 224 break; 225 DELAY(100); 226 } 227 if (i == SGE_TIMEOUT) { 228 device_printf(sc->sge_dev, 229 "EEPROM read timeout : 0x%08x\n", val); 230 return (0xffff); 231 } 232 233 return ((val & EI_DATA) >> EI_DATA_SHIFT); 234 } 235 236 static int 237 sge_get_mac_addr_eeprom(struct sge_softc *sc, uint8_t *dest) 238 { 239 uint16_t val; 240 int i; 241 242 val = sge_read_eeprom(sc, EEPROMSignature); 243 if (val == 0xffff || val == 0) { 244 device_printf(sc->sge_dev, 245 "invalid EEPROM signature : 0x%04x\n", val); 246 return (EINVAL); 247 } 248 249 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 250 val = sge_read_eeprom(sc, EEPROMMACAddr + i / 2); 251 dest[i + 0] = (uint8_t)val; 252 dest[i + 1] = (uint8_t)(val >> 8); 253 } 254 255 if ((sge_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 256 sc->sge_flags |= SGE_FLAG_RGMII; 257 return (0); 258 } 259 260 /* 261 * For SiS96x, APC CMOS RAM is used to store ethernet address. 262 * APC CMOS RAM is accessed through ISA bridge. 263 */ 264 static int 265 sge_get_mac_addr_apc(struct sge_softc *sc, uint8_t *dest) 266 { 267 #if defined(__amd64__) || defined(__i386__) 268 devclass_t pci; 269 device_t bus, dev = NULL; 270 device_t *kids; 271 struct apc_tbl { 272 uint16_t vid; 273 uint16_t did; 274 } *tp, apc_tbls[] = { 275 { SIS_VENDORID, 0x0965 }, 276 { SIS_VENDORID, 0x0966 }, 277 { SIS_VENDORID, 0x0968 } 278 }; 279 uint8_t reg; 280 int busnum, cnt, i, j, numkids; 281 282 cnt = sizeof(apc_tbls) / sizeof(apc_tbls[0]); 283 pci = devclass_find("pci"); 284 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) { 285 bus = devclass_get_device(pci, busnum); 286 if (!bus) 287 continue; 288 if (device_get_children(bus, &kids, &numkids) != 0) 289 continue; 290 for (i = 0; i < numkids; i++) { 291 dev = kids[i]; 292 if (pci_get_class(dev) == PCIC_BRIDGE && 293 pci_get_subclass(dev) == PCIS_BRIDGE_ISA) { 294 tp = apc_tbls; 295 for (j = 0; j < cnt; j++) { 296 if (pci_get_vendor(dev) == tp->vid && 297 pci_get_device(dev) == tp->did) { 298 free(kids, M_TEMP); 299 goto apc_found; 300 } 301 tp++; 302 } 303 } 304 } 305 free(kids, M_TEMP); 306 } 307 device_printf(sc->sge_dev, "couldn't find PCI-ISA bridge\n"); 308 return (EINVAL); 309 apc_found: 310 /* Enable port 0x78 and 0x79 to access APC registers. */ 311 reg = pci_read_config(dev, 0x48, 1); 312 pci_write_config(dev, 0x48, reg & ~0x02, 1); 313 DELAY(50); 314 pci_read_config(dev, 0x48, 1); 315 /* Read stored ethernet address. */ 316 for (i = 0; i < ETHER_ADDR_LEN; i++) { 317 outb(0x78, 0x09 + i); 318 dest[i] = inb(0x79); 319 } 320 outb(0x78, 0x12); 321 if ((inb(0x79) & 0x80) != 0) 322 sc->sge_flags |= SGE_FLAG_RGMII; 323 /* Restore access to APC registers. */ 324 pci_write_config(dev, 0x48, reg, 1); 325 326 return (0); 327 #else 328 return (EINVAL); 329 #endif 330 } 331 332 static int 333 sge_miibus_readreg(device_t dev, int phy, int reg) 334 { 335 struct sge_softc *sc; 336 uint32_t val; 337 int i; 338 339 sc = device_get_softc(dev); 340 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 341 (reg << GMI_REG_SHIFT) | GMI_OP_RD | GMI_REQ); 342 DELAY(10); 343 for (i = 0; i < SGE_TIMEOUT; i++) { 344 val = CSR_READ_4(sc, GMIIControl); 345 if ((val & GMI_REQ) == 0) 346 break; 347 DELAY(10); 348 } 349 if (i == SGE_TIMEOUT) { 350 device_printf(sc->sge_dev, "PHY read timeout : %d\n", reg); 351 return (0); 352 } 353 return ((val & GMI_DATA) >> GMI_DATA_SHIFT); 354 } 355 356 static int 357 sge_miibus_writereg(device_t dev, int phy, int reg, int data) 358 { 359 struct sge_softc *sc; 360 uint32_t val; 361 int i; 362 363 sc = device_get_softc(dev); 364 CSR_WRITE_4(sc, GMIIControl, (phy << GMI_PHY_SHIFT) | 365 (reg << GMI_REG_SHIFT) | (data << GMI_DATA_SHIFT) | 366 GMI_OP_WR | GMI_REQ); 367 DELAY(10); 368 for (i = 0; i < SGE_TIMEOUT; i++) { 369 val = CSR_READ_4(sc, GMIIControl); 370 if ((val & GMI_REQ) == 0) 371 break; 372 DELAY(10); 373 } 374 if (i == SGE_TIMEOUT) 375 device_printf(sc->sge_dev, "PHY write timeout : %d\n", reg); 376 return (0); 377 } 378 379 static void 380 sge_miibus_statchg(device_t dev) 381 { 382 struct sge_softc *sc; 383 struct mii_data *mii; 384 struct ifnet *ifp; 385 uint32_t ctl, speed; 386 387 sc = device_get_softc(dev); 388 mii = device_get_softc(sc->sge_miibus); 389 ifp = sc->sge_ifp; 390 if (mii == NULL || ifp == NULL || 391 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 392 return; 393 speed = 0; 394 sc->sge_flags &= ~SGE_FLAG_LINK; 395 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 396 (IFM_ACTIVE | IFM_AVALID)) { 397 switch (IFM_SUBTYPE(mii->mii_media_active)) { 398 case IFM_10_T: 399 sc->sge_flags |= SGE_FLAG_LINK; 400 speed = SC_SPEED_10; 401 break; 402 case IFM_100_TX: 403 sc->sge_flags |= SGE_FLAG_LINK; 404 speed = SC_SPEED_100; 405 break; 406 case IFM_1000_T: 407 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) { 408 sc->sge_flags |= SGE_FLAG_LINK; 409 speed = SC_SPEED_1000; 410 } 411 break; 412 default: 413 break; 414 } 415 } 416 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) 417 return; 418 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 419 ctl = CSR_READ_4(sc, StationControl); 420 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 421 if (speed == SC_SPEED_1000) { 422 ctl |= 0x07000000; 423 sc->sge_flags |= SGE_FLAG_SPEED_1000; 424 } else { 425 ctl |= 0x04000000; 426 sc->sge_flags &= ~SGE_FLAG_SPEED_1000; 427 } 428 #ifdef notyet 429 if ((sc->sge_flags & SGE_FLAG_GMII) != 0) 430 ctl |= 0x03000000; 431 #endif 432 ctl |= speed; 433 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 434 ctl |= SC_FDX; 435 sc->sge_flags |= SGE_FLAG_FDX; 436 } else 437 sc->sge_flags &= ~SGE_FLAG_FDX; 438 CSR_WRITE_4(sc, StationControl, ctl); 439 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) { 440 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 441 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 442 } 443 } 444 445 static void 446 sge_rxfilter(struct sge_softc *sc) 447 { 448 struct ifnet *ifp; 449 struct ifmultiaddr *ifma; 450 uint32_t crc, hashes[2]; 451 uint16_t rxfilt; 452 453 SGE_LOCK_ASSERT(sc); 454 455 ifp = sc->sge_ifp; 456 hashes[0] = hashes[1] = 0; 457 rxfilt = AcceptMyPhys; 458 if ((ifp->if_flags & IFF_BROADCAST) != 0) 459 rxfilt |= AcceptBroadcast; 460 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 461 if ((ifp->if_flags & IFF_PROMISC) != 0) 462 rxfilt |= AcceptAllPhys; 463 rxfilt |= AcceptMulticast; 464 hashes[0] = 0xFFFFFFFF; 465 hashes[1] = 0xFFFFFFFF; 466 goto done; 467 } 468 rxfilt |= AcceptMulticast; 469 /* Now program new ones. */ 470 if_maddr_rlock(ifp); 471 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 472 if (ifma->ifma_addr->sa_family != AF_LINK) 473 continue; 474 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 475 ifma->ifma_addr), ETHER_ADDR_LEN); 476 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 477 } 478 if_maddr_runlock(ifp); 479 done: 480 CSR_WRITE_2(sc, RxMacControl, rxfilt | 0x02); 481 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 482 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 483 } 484 485 static void 486 sge_reset(struct sge_softc *sc) 487 { 488 489 CSR_WRITE_4(sc, IntrMask, 0); 490 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 491 492 /* Soft reset. */ 493 CSR_WRITE_4(sc, IntrControl, 0x8000); 494 CSR_READ_4(sc, IntrControl); 495 DELAY(100); 496 CSR_WRITE_4(sc, IntrControl, 0); 497 /* Stop MAC. */ 498 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 499 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 500 501 CSR_WRITE_4(sc, IntrMask, 0); 502 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 503 504 CSR_WRITE_4(sc, GMIIControl, 0); 505 } 506 507 /* 508 * Probe for an SiS chip. Check the PCI vendor and device 509 * IDs against our list and return a device name if we find a match. 510 */ 511 static int 512 sge_probe(device_t dev) 513 { 514 struct sge_type *t; 515 516 t = sge_devs; 517 while (t->sge_name != NULL) { 518 if ((pci_get_vendor(dev) == t->sge_vid) && 519 (pci_get_device(dev) == t->sge_did)) { 520 device_set_desc(dev, t->sge_name); 521 return (BUS_PROBE_DEFAULT); 522 } 523 t++; 524 } 525 526 return (ENXIO); 527 } 528 529 /* 530 * Attach the interface. Allocate softc structures, do ifmedia 531 * setup and ethernet/BPF attach. 532 */ 533 static int 534 sge_attach(device_t dev) 535 { 536 struct sge_softc *sc; 537 struct ifnet *ifp; 538 uint8_t eaddr[ETHER_ADDR_LEN]; 539 int error = 0, rid; 540 541 sc = device_get_softc(dev); 542 sc->sge_dev = dev; 543 544 mtx_init(&sc->sge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 545 MTX_DEF); 546 callout_init_mtx(&sc->sge_stat_ch, &sc->sge_mtx, 0); 547 548 /* 549 * Map control/status registers. 550 */ 551 pci_enable_busmaster(dev); 552 553 /* Allocate resources. */ 554 sc->sge_res_id = PCIR_BAR(0); 555 sc->sge_res_type = SYS_RES_MEMORY; 556 sc->sge_res = bus_alloc_resource_any(dev, sc->sge_res_type, 557 &sc->sge_res_id, RF_ACTIVE); 558 if (sc->sge_res == NULL) { 559 device_printf(dev, "couldn't allocate resource\n"); 560 error = ENXIO; 561 goto fail; 562 } 563 564 rid = 0; 565 sc->sge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 566 RF_SHAREABLE | RF_ACTIVE); 567 if (sc->sge_irq == NULL) { 568 device_printf(dev, "couldn't allocate IRQ resources\n"); 569 error = ENXIO; 570 goto fail; 571 } 572 sc->sge_rev = pci_get_revid(dev); 573 if (pci_get_device(dev) == SIS_DEVICEID_190) 574 sc->sge_flags |= SGE_FLAG_FASTETHER; 575 /* Reset the adapter. */ 576 sge_reset(sc); 577 578 /* Get MAC address from the EEPROM. */ 579 if ((pci_read_config(dev, 0x73, 1) & 0x01) != 0) 580 sge_get_mac_addr_apc(sc, eaddr); 581 else 582 sge_get_mac_addr_eeprom(sc, eaddr); 583 584 if ((error = sge_dma_alloc(sc)) != 0) 585 goto fail; 586 587 ifp = sc->sge_ifp = if_alloc(IFT_ETHER); 588 if (ifp == NULL) { 589 device_printf(dev, "cannot allocate ifnet structure.\n"); 590 error = ENOSPC; 591 goto fail; 592 } 593 ifp->if_softc = sc; 594 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 595 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 596 ifp->if_ioctl = sge_ioctl; 597 ifp->if_start = sge_start; 598 ifp->if_init = sge_init; 599 ifp->if_snd.ifq_drv_maxlen = SGE_TX_RING_CNT - 1; 600 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 601 IFQ_SET_READY(&ifp->if_snd); 602 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM; 603 ifp->if_hwassist = SGE_CSUM_FEATURES; 604 ifp->if_capenable = ifp->if_capabilities; 605 /* 606 * Do MII setup. 607 */ 608 if (mii_phy_probe(dev, &sc->sge_miibus, sge_ifmedia_upd, 609 sge_ifmedia_sts)) { 610 device_printf(dev, "no PHY found!\n"); 611 error = ENXIO; 612 goto fail; 613 } 614 615 /* 616 * Call MI attach routine. 617 */ 618 ether_ifattach(ifp, eaddr); 619 620 /* VLAN setup. */ 621 ifp->if_capabilities |= IFCAP_VLAN_MTU; 622 ifp->if_capenable = ifp->if_capabilities; 623 /* Tell the upper layer(s) we support long frames. */ 624 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 625 626 /* Hook interrupt last to avoid having to lock softc */ 627 error = bus_setup_intr(dev, sc->sge_irq, INTR_TYPE_NET | INTR_MPSAFE, 628 NULL, sge_intr, sc, &sc->sge_intrhand); 629 if (error) { 630 device_printf(dev, "couldn't set up irq\n"); 631 ether_ifdetach(ifp); 632 goto fail; 633 } 634 635 fail: 636 if (error) 637 sge_detach(dev); 638 639 return (error); 640 } 641 642 /* 643 * Shutdown hardware and free up resources. This can be called any 644 * time after the mutex has been initialized. It is called in both 645 * the error case in attach and the normal detach case so it needs 646 * to be careful about only freeing resources that have actually been 647 * allocated. 648 */ 649 static int 650 sge_detach(device_t dev) 651 { 652 struct sge_softc *sc; 653 struct ifnet *ifp; 654 655 sc = device_get_softc(dev); 656 ifp = sc->sge_ifp; 657 /* These should only be active if attach succeeded. */ 658 if (device_is_attached(dev)) { 659 ether_ifdetach(ifp); 660 SGE_LOCK(sc); 661 sge_stop(sc); 662 SGE_UNLOCK(sc); 663 callout_drain(&sc->sge_stat_ch); 664 } 665 if (sc->sge_miibus) 666 device_delete_child(dev, sc->sge_miibus); 667 bus_generic_detach(dev); 668 669 if (sc->sge_intrhand) 670 bus_teardown_intr(dev, sc->sge_irq, sc->sge_intrhand); 671 if (sc->sge_irq) 672 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sge_irq); 673 if (sc->sge_res) 674 bus_release_resource(dev, sc->sge_res_type, sc->sge_res_id, 675 sc->sge_res); 676 if (ifp) 677 if_free(ifp); 678 sge_dma_free(sc); 679 mtx_destroy(&sc->sge_mtx); 680 681 return (0); 682 } 683 684 /* 685 * Stop all chip I/O so that the kernel's probe routines don't 686 * get confused by errant DMAs when rebooting. 687 */ 688 static int 689 sge_shutdown(device_t dev) 690 { 691 struct sge_softc *sc; 692 693 sc = device_get_softc(dev); 694 SGE_LOCK(sc); 695 sge_stop(sc); 696 SGE_UNLOCK(sc); 697 return (0); 698 } 699 700 static int 701 sge_suspend(device_t dev) 702 { 703 struct sge_softc *sc; 704 struct ifnet *ifp; 705 706 sc = device_get_softc(dev); 707 SGE_LOCK(sc); 708 ifp = sc->sge_ifp; 709 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 710 sge_stop(sc); 711 SGE_UNLOCK(sc); 712 return (0); 713 } 714 715 static int 716 sge_resume(device_t dev) 717 { 718 struct sge_softc *sc; 719 struct ifnet *ifp; 720 721 sc = device_get_softc(dev); 722 SGE_LOCK(sc); 723 ifp = sc->sge_ifp; 724 if ((ifp->if_flags & IFF_UP) != 0) 725 sge_init_locked(sc); 726 SGE_UNLOCK(sc); 727 return (0); 728 } 729 730 static int 731 sge_dma_alloc(struct sge_softc *sc) 732 { 733 struct sge_chain_data *cd; 734 struct sge_list_data *ld; 735 int error, i; 736 737 cd = &sc->sge_cdata; 738 ld = &sc->sge_ldata; 739 error = bus_dma_tag_create(bus_get_dma_tag(sc->sge_dev), 740 1, 0, /* alignment, boundary */ 741 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 742 BUS_SPACE_MAXADDR, /* highaddr */ 743 NULL, NULL, /* filter, filterarg */ 744 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 745 1, /* nsegments */ 746 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 747 0, /* flags */ 748 NULL, /* lockfunc */ 749 NULL, /* lockarg */ 750 &cd->sge_tag); 751 if (error != 0) { 752 device_printf(sc->sge_dev, 753 "could not create parent DMA tag.\n"); 754 goto fail; 755 } 756 757 /* RX descriptor ring */ 758 error = bus_dma_tag_create(cd->sge_tag, 759 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 760 BUS_SPACE_MAXADDR, /* lowaddr */ 761 BUS_SPACE_MAXADDR, /* highaddr */ 762 NULL, NULL, /* filter, filterarg */ 763 SGE_RX_RING_SZ, 1, /* maxsize,nsegments */ 764 SGE_RX_RING_SZ, /* maxsegsize */ 765 0, /* flags */ 766 NULL, /* lockfunc */ 767 NULL, /* lockarg */ 768 &cd->sge_rx_tag); 769 if (error != 0) { 770 device_printf(sc->sge_dev, 771 "could not create Rx ring DMA tag.\n"); 772 goto fail; 773 } 774 /* Allocate DMA'able memory and load DMA map for RX ring. */ 775 error = bus_dmamem_alloc(cd->sge_rx_tag, (void **)&ld->sge_rx_ring, 776 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 777 &cd->sge_rx_dmamap); 778 if (error != 0) { 779 device_printf(sc->sge_dev, 780 "could not allocate DMA'able memory for Rx ring.\n"); 781 goto fail; 782 } 783 error = bus_dmamap_load(cd->sge_rx_tag, cd->sge_rx_dmamap, 784 ld->sge_rx_ring, SGE_RX_RING_SZ, sge_dma_map_addr, 785 &ld->sge_rx_paddr, BUS_DMA_NOWAIT); 786 if (error != 0) { 787 device_printf(sc->sge_dev, 788 "could not load DMA'able memory for Rx ring.\n"); 789 } 790 791 /* TX descriptor ring */ 792 error = bus_dma_tag_create(cd->sge_tag, 793 SGE_DESC_ALIGN, 0, /* alignment, boundary */ 794 BUS_SPACE_MAXADDR, /* lowaddr */ 795 BUS_SPACE_MAXADDR, /* highaddr */ 796 NULL, NULL, /* filter, filterarg */ 797 SGE_TX_RING_SZ, 1, /* maxsize,nsegments */ 798 SGE_TX_RING_SZ, /* maxsegsize */ 799 0, /* flags */ 800 NULL, /* lockfunc */ 801 NULL, /* lockarg */ 802 &cd->sge_tx_tag); 803 if (error != 0) { 804 device_printf(sc->sge_dev, 805 "could not create Rx ring DMA tag.\n"); 806 goto fail; 807 } 808 /* Allocate DMA'able memory and load DMA map for TX ring. */ 809 error = bus_dmamem_alloc(cd->sge_tx_tag, (void **)&ld->sge_tx_ring, 810 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 811 &cd->sge_tx_dmamap); 812 if (error != 0) { 813 device_printf(sc->sge_dev, 814 "could not allocate DMA'able memory for Tx ring.\n"); 815 goto fail; 816 } 817 error = bus_dmamap_load(cd->sge_tx_tag, cd->sge_tx_dmamap, 818 ld->sge_tx_ring, SGE_TX_RING_SZ, sge_dma_map_addr, 819 &ld->sge_tx_paddr, BUS_DMA_NOWAIT); 820 if (error != 0) { 821 device_printf(sc->sge_dev, 822 "could not load DMA'able memory for Rx ring.\n"); 823 goto fail; 824 } 825 826 /* Create DMA tag for Tx buffers. */ 827 error = bus_dma_tag_create(cd->sge_tag, 1, 0, BUS_SPACE_MAXADDR, 828 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * SGE_MAXTXSEGS, 829 SGE_MAXTXSEGS, MCLBYTES, 0, NULL, NULL, &cd->sge_txmbuf_tag); 830 if (error != 0) { 831 device_printf(sc->sge_dev, 832 "could not create Tx mbuf DMA tag.\n"); 833 goto fail; 834 } 835 836 /* Create DMA tag for Rx buffers. */ 837 error = bus_dma_tag_create(cd->sge_tag, SGE_RX_BUF_ALIGN, 0, 838 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 839 MCLBYTES, 0, NULL, NULL, &cd->sge_rxmbuf_tag); 840 if (error != 0) { 841 device_printf(sc->sge_dev, 842 "could not create Rx mbuf DMA tag.\n"); 843 goto fail; 844 } 845 846 /* Create DMA maps for Tx buffers. */ 847 for (i = 0; i < SGE_TX_RING_CNT; i++) { 848 error = bus_dmamap_create(cd->sge_txmbuf_tag, 0, 849 &cd->sge_tx_map[i]); 850 if (error != 0) { 851 device_printf(sc->sge_dev, 852 "could not create Tx DMA map.\n"); 853 goto fail; 854 } 855 } 856 /* Create spare DMA map for Rx buffer. */ 857 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, &cd->sge_rx_spare_map); 858 if (error != 0) { 859 device_printf(sc->sge_dev, 860 "could not create spare Rx DMA map.\n"); 861 goto fail; 862 } 863 /* Create DMA maps for Rx buffers. */ 864 for (i = 0; i < SGE_RX_RING_CNT; i++) { 865 error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0, 866 &cd->sge_rx_map[i]); 867 if (error) { 868 device_printf(sc->sge_dev, 869 "could not create Rx DMA map.\n"); 870 goto fail; 871 } 872 } 873 fail: 874 return (error); 875 } 876 877 static void 878 sge_dma_free(struct sge_softc *sc) 879 { 880 struct sge_chain_data *cd; 881 struct sge_list_data *ld; 882 int i; 883 884 cd = &sc->sge_cdata; 885 ld = &sc->sge_ldata; 886 /* Rx ring. */ 887 if (cd->sge_rx_tag != NULL) { 888 if (cd->sge_rx_dmamap != NULL) 889 bus_dmamap_unload(cd->sge_rx_tag, cd->sge_rx_dmamap); 890 if (cd->sge_rx_dmamap != NULL && ld->sge_rx_ring != NULL) 891 bus_dmamem_free(cd->sge_rx_tag, ld->sge_rx_ring, 892 cd->sge_rx_dmamap); 893 ld->sge_rx_ring = NULL; 894 cd->sge_rx_dmamap = NULL; 895 bus_dma_tag_destroy(cd->sge_rx_tag); 896 cd->sge_rx_tag = NULL; 897 } 898 /* Tx ring. */ 899 if (cd->sge_tx_tag != NULL) { 900 if (cd->sge_tx_dmamap != NULL) 901 bus_dmamap_unload(cd->sge_tx_tag, cd->sge_tx_dmamap); 902 if (cd->sge_tx_dmamap != NULL && ld->sge_tx_ring != NULL) 903 bus_dmamem_free(cd->sge_tx_tag, ld->sge_tx_ring, 904 cd->sge_tx_dmamap); 905 ld->sge_tx_ring = NULL; 906 cd->sge_tx_dmamap = NULL; 907 bus_dma_tag_destroy(cd->sge_tx_tag); 908 cd->sge_tx_tag = NULL; 909 } 910 /* Rx buffers. */ 911 if (cd->sge_rxmbuf_tag != NULL) { 912 for (i = 0; i < SGE_RX_RING_CNT; i++) { 913 if (cd->sge_rx_map[i] != NULL) { 914 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 915 cd->sge_rx_map[i]); 916 cd->sge_rx_map[i] = NULL; 917 } 918 } 919 if (cd->sge_rx_spare_map != NULL) { 920 bus_dmamap_destroy(cd->sge_rxmbuf_tag, 921 cd->sge_rx_spare_map); 922 cd->sge_rx_spare_map = NULL; 923 } 924 bus_dma_tag_destroy(cd->sge_rxmbuf_tag); 925 cd->sge_rxmbuf_tag = NULL; 926 } 927 /* Tx buffers. */ 928 if (cd->sge_txmbuf_tag != NULL) { 929 for (i = 0; i < SGE_TX_RING_CNT; i++) { 930 if (cd->sge_tx_map[i] != NULL) { 931 bus_dmamap_destroy(cd->sge_txmbuf_tag, 932 cd->sge_tx_map[i]); 933 cd->sge_tx_map[i] = NULL; 934 } 935 } 936 bus_dma_tag_destroy(cd->sge_txmbuf_tag); 937 cd->sge_txmbuf_tag = NULL; 938 } 939 if (cd->sge_tag != NULL) 940 bus_dma_tag_destroy(cd->sge_tag); 941 cd->sge_tag = NULL; 942 } 943 944 /* 945 * Initialize the TX descriptors. 946 */ 947 static int 948 sge_list_tx_init(struct sge_softc *sc) 949 { 950 struct sge_list_data *ld; 951 struct sge_chain_data *cd; 952 953 SGE_LOCK_ASSERT(sc); 954 ld = &sc->sge_ldata; 955 cd = &sc->sge_cdata; 956 bzero(ld->sge_tx_ring, SGE_TX_RING_SZ); 957 ld->sge_tx_ring[SGE_TX_RING_CNT - 1].sge_flags = htole32(RING_END); 958 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 959 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 960 cd->sge_tx_prod = 0; 961 cd->sge_tx_cons = 0; 962 cd->sge_tx_cnt = 0; 963 return (0); 964 } 965 966 static int 967 sge_list_tx_free(struct sge_softc *sc) 968 { 969 struct sge_chain_data *cd; 970 int i; 971 972 SGE_LOCK_ASSERT(sc); 973 cd = &sc->sge_cdata; 974 for (i = 0; i < SGE_TX_RING_CNT; i++) { 975 if (cd->sge_tx_mbuf[i] != NULL) { 976 bus_dmamap_sync(cd->sge_txmbuf_tag, 977 cd->sge_tx_map[i], BUS_DMASYNC_POSTWRITE); 978 bus_dmamap_unload(cd->sge_txmbuf_tag, 979 cd->sge_tx_map[i]); 980 m_free(cd->sge_tx_mbuf[i]); 981 cd->sge_tx_mbuf[i] = NULL; 982 } 983 } 984 985 return (0); 986 } 987 988 /* 989 * Initialize the RX descriptors and allocate mbufs for them. Note that 990 * we arrange the descriptors in a closed ring, so that the last descriptor 991 * has RING_END flag set. 992 */ 993 static int 994 sge_list_rx_init(struct sge_softc *sc) 995 { 996 struct sge_chain_data *cd; 997 int i; 998 999 SGE_LOCK_ASSERT(sc); 1000 cd = &sc->sge_cdata; 1001 cd->sge_rx_cons = 0; 1002 bzero(sc->sge_ldata.sge_rx_ring, SGE_RX_RING_SZ); 1003 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1004 if (sge_newbuf(sc, i) != 0) 1005 return (ENOBUFS); 1006 } 1007 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1008 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1009 return (0); 1010 } 1011 1012 static int 1013 sge_list_rx_free(struct sge_softc *sc) 1014 { 1015 struct sge_chain_data *cd; 1016 int i; 1017 1018 SGE_LOCK_ASSERT(sc); 1019 cd = &sc->sge_cdata; 1020 for (i = 0; i < SGE_RX_RING_CNT; i++) { 1021 if (cd->sge_rx_mbuf[i] != NULL) { 1022 bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[i], 1023 BUS_DMASYNC_POSTREAD); 1024 bus_dmamap_unload(cd->sge_rxmbuf_tag, 1025 cd->sge_rx_map[i]); 1026 m_free(cd->sge_rx_mbuf[i]); 1027 cd->sge_rx_mbuf[i] = NULL; 1028 } 1029 } 1030 return (0); 1031 } 1032 1033 /* 1034 * Initialize an RX descriptor and attach an MBUF cluster. 1035 */ 1036 static int 1037 sge_newbuf(struct sge_softc *sc, int prod) 1038 { 1039 struct mbuf *m; 1040 struct sge_desc *desc; 1041 struct sge_chain_data *cd; 1042 bus_dma_segment_t segs[1]; 1043 bus_dmamap_t map; 1044 int error, nsegs; 1045 1046 SGE_LOCK_ASSERT(sc); 1047 1048 cd = &sc->sge_cdata; 1049 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1050 if (m == NULL) 1051 return (ENOBUFS); 1052 m->m_len = m->m_pkthdr.len = MCLBYTES; 1053 m_adj(m, SGE_RX_BUF_ALIGN); 1054 error = bus_dmamap_load_mbuf_sg(cd->sge_rxmbuf_tag, 1055 cd->sge_rx_spare_map, m, segs, &nsegs, 0); 1056 if (error != 0) { 1057 m_freem(m); 1058 return (error); 1059 } 1060 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1061 if (cd->sge_rx_mbuf[prod] != NULL) { 1062 bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod], 1063 BUS_DMASYNC_POSTREAD); 1064 bus_dmamap_unload(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod]); 1065 } 1066 map = cd->sge_rx_map[prod]; 1067 cd->sge_rx_map[prod] = cd->sge_rx_spare_map; 1068 cd->sge_rx_spare_map = map; 1069 bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod], 1070 BUS_DMASYNC_PREREAD); 1071 cd->sge_rx_mbuf[prod] = m; 1072 1073 desc = &sc->sge_ldata.sge_rx_ring[prod]; 1074 desc->sge_sts_size = 0; 1075 desc->sge_ptr = htole32(SGE_ADDR_LO(segs[0].ds_addr)); 1076 desc->sge_flags = htole32(segs[0].ds_len); 1077 if (prod == SGE_RX_RING_CNT - 1) 1078 desc->sge_flags |= htole32(RING_END); 1079 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | 1080 RDC_TCP_CSUM | RDC_UDP_CSUM); 1081 return (0); 1082 } 1083 1084 #ifndef __NO_STRICT_ALIGNMENT 1085 static __inline void 1086 sge_fixup_rx(struct mbuf *m) 1087 { 1088 int i; 1089 uint16_t *src, *dst; 1090 1091 src = mtod(m, uint16_t *); 1092 dst = src - 3; 1093 1094 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1095 *dst++ = *src++; 1096 1097 m->m_data -= (SGE_RX_BUF_ALIGN - ETHER_ALIGN); 1098 } 1099 #endif 1100 1101 static __inline void 1102 sge_discard_rxbuf(struct sge_softc *sc, int index) 1103 { 1104 struct sge_desc *desc; 1105 1106 desc = &sc->sge_ldata.sge_rx_ring[index]; 1107 desc->sge_sts_size = 0; 1108 desc->sge_flags = htole32(MCLBYTES - SGE_RX_BUF_ALIGN); 1109 if (index == SGE_RX_RING_CNT - 1) 1110 desc->sge_flags |= htole32(RING_END); 1111 desc->sge_cmdsts = htole32(RDC_OWN | RDC_INTR | RDC_IP_CSUM | 1112 RDC_TCP_CSUM | RDC_UDP_CSUM); 1113 } 1114 1115 /* 1116 * A frame has been uploaded: pass the resulting mbuf chain up to 1117 * the higher level protocols. 1118 */ 1119 static void 1120 sge_rxeof(struct sge_softc *sc) 1121 { 1122 struct ifnet *ifp; 1123 struct mbuf *m; 1124 struct sge_chain_data *cd; 1125 struct sge_desc *cur_rx; 1126 uint32_t rxinfo, rxstat; 1127 int cons, prog; 1128 1129 SGE_LOCK_ASSERT(sc); 1130 1131 ifp = sc->sge_ifp; 1132 cd = &sc->sge_cdata; 1133 1134 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1135 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1136 cons = cd->sge_rx_cons; 1137 for (prog = 0; prog < SGE_RX_RING_CNT; prog++, 1138 SGE_INC(cons, SGE_RX_RING_CNT)) { 1139 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1140 break; 1141 cur_rx = &sc->sge_ldata.sge_rx_ring[cons]; 1142 rxinfo = le32toh(cur_rx->sge_cmdsts); 1143 if ((rxinfo & RDC_OWN) != 0) 1144 break; 1145 rxstat = le32toh(cur_rx->sge_sts_size); 1146 if (SGE_RX_ERROR(rxstat) != 0 || SGE_RX_NSEGS(rxstat) != 1) { 1147 /* XXX We don't support multi-segment frames yet. */ 1148 #ifdef SGE_SHOW_ERRORS 1149 device_printf(sc->sge_dev, "Rx error : 0x%b\n", rxstat, 1150 RX_ERR_BITS); 1151 #endif 1152 sge_discard_rxbuf(sc, cons); 1153 ifp->if_ierrors++; 1154 continue; 1155 } 1156 m = cd->sge_rx_mbuf[cons]; 1157 if (sge_newbuf(sc, cons) != 0) { 1158 sge_discard_rxbuf(sc, cons); 1159 ifp->if_iqdrops++; 1160 continue; 1161 } 1162 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1163 if ((rxinfo & RDC_IP_CSUM) != 0 && 1164 (rxinfo & RDC_IP_CSUM_OK) != 0) 1165 m->m_pkthdr.csum_flags |= 1166 CSUM_IP_CHECKED | CSUM_IP_VALID; 1167 if (((rxinfo & RDC_TCP_CSUM) != 0 && 1168 (rxinfo & RDC_TCP_CSUM_OK) != 0) || 1169 ((rxinfo & RDC_UDP_CSUM) != 0 && 1170 (rxinfo & RDC_UDP_CSUM_OK) != 0)) { 1171 m->m_pkthdr.csum_flags |= 1172 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1173 m->m_pkthdr.csum_data = 0xffff; 1174 } 1175 } 1176 /* 1177 * TODO : VLAN hardware tag stripping. 1178 */ 1179 m->m_pkthdr.len = m->m_len = 1180 SGE_RX_BYTES(rxstat) - ETHER_CRC_LEN; 1181 #ifndef __NO_STRICT_ALIGNMENT 1182 sge_fixup_rx(m); 1183 #endif 1184 m->m_pkthdr.rcvif = ifp; 1185 ifp->if_ipackets++; 1186 SGE_UNLOCK(sc); 1187 (*ifp->if_input)(ifp, m); 1188 SGE_LOCK(sc); 1189 } 1190 1191 if (prog > 0) { 1192 bus_dmamap_sync(cd->sge_rx_tag, cd->sge_rx_dmamap, 1193 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1194 cd->sge_rx_cons = cons; 1195 } 1196 } 1197 1198 /* 1199 * A frame was downloaded to the chip. It's safe for us to clean up 1200 * the list buffers. 1201 */ 1202 static void 1203 sge_txeof(struct sge_softc *sc) 1204 { 1205 struct ifnet *ifp; 1206 struct sge_list_data *ld; 1207 struct sge_chain_data *cd; 1208 uint32_t txstat; 1209 int cons, prod; 1210 1211 SGE_LOCK_ASSERT(sc); 1212 1213 ifp = sc->sge_ifp; 1214 ld = &sc->sge_ldata; 1215 cd = &sc->sge_cdata; 1216 1217 if (cd->sge_tx_cnt == 0) 1218 return; 1219 bus_dmamap_sync(cd->sge_tx_tag, cd->sge_tx_dmamap, 1220 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1221 cons = cd->sge_tx_cons; 1222 prod = cd->sge_tx_prod; 1223 for (; cons != prod; SGE_INC(cons, SGE_TX_RING_CNT)) { 1224 txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts); 1225 if ((txstat & TDC_OWN) != 0) 1226 break; 1227 cd->sge_tx_cnt--; 1228 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1229 if (cd->sge_tx_mbuf[cons] != NULL) { 1230 bus_dmamap_sync(cd->sge_txmbuf_tag, 1231 cd->sge_tx_map[cons], BUS_DMASYNC_POSTWRITE); 1232 bus_dmamap_unload(cd->sge_txmbuf_tag, 1233 cd->sge_tx_map[cons]); 1234 m_freem(cd->sge_tx_mbuf[cons]); 1235 cd->sge_tx_mbuf[cons] = NULL; 1236 if (SGE_TX_ERROR(txstat) != 0) { 1237 #ifdef SGE_SHOW_ERRORS 1238 device_printf(sc->sge_dev, "Tx error : 0x%b\n", 1239 txstat, TX_ERR_BITS); 1240 #endif 1241 ifp->if_oerrors++; 1242 } else { 1243 #ifdef notyet 1244 ifp->if_collisions += (txstat & 0xFFFF) - 1; 1245 #endif 1246 ifp->if_opackets++; 1247 } 1248 } 1249 1250 } 1251 cd->sge_tx_cons = cons; 1252 if (cd->sge_tx_cnt == 0) 1253 sc->sge_timer = 0; 1254 } 1255 1256 static void 1257 sge_tick(void *arg) 1258 { 1259 struct sge_softc *sc; 1260 struct mii_data *mii; 1261 struct ifnet *ifp; 1262 1263 sc = arg; 1264 SGE_LOCK_ASSERT(sc); 1265 1266 ifp = sc->sge_ifp; 1267 mii = device_get_softc(sc->sge_miibus); 1268 mii_tick(mii); 1269 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1270 sge_miibus_statchg(sc->sge_dev); 1271 if ((sc->sge_flags & SGE_FLAG_LINK) != 0 && 1272 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1273 sge_start_locked(ifp); 1274 } 1275 /* 1276 * Reclaim transmitted frames here as we do not request 1277 * Tx completion interrupt for every queued frames to 1278 * reduce excessive interrupts. 1279 */ 1280 sge_txeof(sc); 1281 sge_watchdog(sc); 1282 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1283 } 1284 1285 static void 1286 sge_intr(void *arg) 1287 { 1288 struct sge_softc *sc; 1289 struct ifnet *ifp; 1290 uint32_t status; 1291 1292 sc = arg; 1293 SGE_LOCK(sc); 1294 ifp = sc->sge_ifp; 1295 1296 status = CSR_READ_4(sc, IntrStatus); 1297 if (status == 0xFFFFFFFF || (status & SGE_INTRS) == 0) { 1298 /* Not ours. */ 1299 SGE_UNLOCK(sc); 1300 return; 1301 } 1302 /* Acknowledge interrupts. */ 1303 CSR_WRITE_4(sc, IntrStatus, status); 1304 /* Disable further interrupts. */ 1305 CSR_WRITE_4(sc, IntrMask, 0); 1306 /* 1307 * It seems the controller supports some kind of interrupt 1308 * moderation mechanism but we still don't know how to 1309 * enable that. To reduce number of generated interrupts 1310 * under load we check pending interrupts in a loop. This 1311 * will increase number of register access and is not correct 1312 * way to handle interrupt moderation but there seems to be 1313 * no other way at this time. 1314 */ 1315 for (;;) { 1316 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1317 break; 1318 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1319 sge_rxeof(sc); 1320 /* Wakeup Rx MAC. */ 1321 if ((status & INTR_RX_IDLE) != 0) 1322 CSR_WRITE_4(sc, RX_CTL, 1323 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1324 } 1325 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1326 sge_txeof(sc); 1327 status = CSR_READ_4(sc, IntrStatus); 1328 if ((status & SGE_INTRS) == 0) 1329 break; 1330 /* Acknowledge interrupts. */ 1331 CSR_WRITE_4(sc, IntrStatus, status); 1332 } 1333 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1334 /* Re-enable interrupts */ 1335 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1336 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1337 sge_start_locked(ifp); 1338 } 1339 SGE_UNLOCK(sc); 1340 } 1341 1342 /* 1343 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1344 * pointers to the fragment pointers. 1345 */ 1346 static int 1347 sge_encap(struct sge_softc *sc, struct mbuf **m_head) 1348 { 1349 struct mbuf *m; 1350 struct sge_desc *desc; 1351 bus_dma_segment_t txsegs[SGE_MAXTXSEGS]; 1352 bus_dmamap_t map; 1353 uint32_t cflags; 1354 int error, nsegs, prod; 1355 1356 SGE_LOCK_ASSERT(sc); 1357 1358 prod = sc->sge_cdata.sge_tx_prod; 1359 map = sc->sge_cdata.sge_tx_map[prod]; 1360 /* 1361 * Reading Windows inf file indicates SiS controller supports 1362 * TSO, VLAN hardware tag insertion/stripping, interrupt 1363 * moderation and Tx/Rx checksum offloading. Unfortunately 1364 * vendor didn't release these information so we're guessing 1365 * descriptor usage with trial and errors. 1366 * 1367 * Controller seems to support multi-fragmented buffers but 1368 * don't know how to enable that feature so limit number of 1369 * fragmented Tx buffers to single buffer until we understand 1370 * the controller internals. 1371 * I assume the controller can pad zero bytes if frame length 1372 * is less than 60 bytes and I also think the controller has 1373 * no Tx buffer alignment limitation. - Need testing! 1374 */ 1375 if ((*m_head)->m_next != NULL) { 1376 m = m_defrag(*m_head, M_DONTWAIT); 1377 if (m == NULL) { 1378 m_freem(*m_head); 1379 *m_head = NULL; 1380 return (ENOBUFS); 1381 } 1382 *m_head = m; 1383 } 1384 error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_tx_tag, map, 1385 *m_head, txsegs, &nsegs, 0); 1386 if (error != 0) { 1387 m_freem(*m_head); 1388 *m_head = NULL; 1389 return (error); 1390 } 1391 /* Check descriptor overrun. */ 1392 if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) { 1393 bus_dmamap_unload(sc->sge_cdata.sge_tx_tag, map); 1394 return (ENOBUFS); 1395 } 1396 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, map, BUS_DMASYNC_PREWRITE); 1397 1398 cflags = 0; 1399 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1400 cflags |= TDC_IP_CSUM; 1401 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1402 cflags |= TDC_TCP_CSUM; 1403 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1404 cflags |= TDC_UDP_CSUM; 1405 desc = &sc->sge_ldata.sge_tx_ring[prod]; 1406 desc->sge_sts_size = htole32((*m_head)->m_pkthdr.len); 1407 desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[0].ds_addr)); 1408 desc->sge_flags = htole32(txsegs[0].ds_len); 1409 if (prod == SGE_TX_RING_CNT - 1) 1410 desc->sge_flags |= htole32(RING_END); 1411 desc->sge_cmdsts = htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags); 1412 #if 1 1413 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1414 desc->sge_cmdsts |= htole32(TDC_BST); 1415 #else 1416 if ((sc->sge_flags & SGE_FLAG_FDX) == 0) { 1417 desc->sge_cmdsts |= htole32(TDC_COL | TDC_CRS | TDC_BKF); 1418 if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0) 1419 desc->sge_cmdsts |= htole32(TDC_EXT | TDC_BST); 1420 } 1421 #endif 1422 /* Request interrupt and give ownership to controller. */ 1423 if ((prod % SGE_TX_INTR_FRAMES) == 0) 1424 desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR); 1425 else 1426 desc->sge_cmdsts |= htole32(TDC_OWN); 1427 sc->sge_cdata.sge_tx_mbuf[prod] = *m_head; 1428 sc->sge_cdata.sge_tx_cnt++; 1429 SGE_INC(sc->sge_cdata.sge_tx_prod, SGE_TX_RING_CNT); 1430 return (0); 1431 } 1432 1433 static void 1434 sge_start(struct ifnet *ifp) 1435 { 1436 struct sge_softc *sc; 1437 1438 sc = ifp->if_softc; 1439 SGE_LOCK(sc); 1440 sge_start_locked(ifp); 1441 SGE_UNLOCK(sc); 1442 } 1443 1444 static void 1445 sge_start_locked(struct ifnet *ifp) 1446 { 1447 struct sge_softc *sc; 1448 struct mbuf *m_head; 1449 int queued = 0; 1450 1451 sc = ifp->if_softc; 1452 SGE_LOCK_ASSERT(sc); 1453 1454 if ((sc->sge_flags & SGE_FLAG_LINK) == 0 || 1455 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1456 IFF_DRV_RUNNING) 1457 return; 1458 1459 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1460 if (sc->sge_cdata.sge_tx_cnt == SGE_TX_RING_CNT - 1) { 1461 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1462 break; 1463 } 1464 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1465 if (m_head == NULL) 1466 break; 1467 if (sge_encap(sc, &m_head)) { 1468 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1469 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1470 break; 1471 } 1472 queued++; 1473 /* 1474 * If there's a BPF listener, bounce a copy of this frame 1475 * to him. 1476 */ 1477 BPF_MTAP(ifp, m_head); 1478 } 1479 1480 if (queued > 0) { 1481 bus_dmamap_sync(sc->sge_cdata.sge_tx_tag, 1482 sc->sge_cdata.sge_tx_dmamap, 1483 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1484 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1485 sc->sge_timer = 5; 1486 } 1487 } 1488 1489 static void 1490 sge_init(void *arg) 1491 { 1492 struct sge_softc *sc; 1493 1494 sc = arg; 1495 SGE_LOCK(sc); 1496 sge_init_locked(sc); 1497 SGE_UNLOCK(sc); 1498 } 1499 1500 static void 1501 sge_init_locked(struct sge_softc *sc) 1502 { 1503 struct ifnet *ifp; 1504 struct mii_data *mii; 1505 int i; 1506 1507 SGE_LOCK_ASSERT(sc); 1508 ifp = sc->sge_ifp; 1509 mii = device_get_softc(sc->sge_miibus); 1510 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1511 return; 1512 /* 1513 * Cancel pending I/O and free all RX/TX buffers. 1514 */ 1515 sge_stop(sc); 1516 sge_reset(sc); 1517 1518 /* Init circular RX list. */ 1519 if (sge_list_rx_init(sc) == ENOBUFS) { 1520 device_printf(sc->sge_dev, "no memory for Rx buffers\n"); 1521 sge_stop(sc); 1522 return; 1523 } 1524 /* Init TX descriptors. */ 1525 sge_list_tx_init(sc); 1526 /* 1527 * Load the address of the RX and TX lists. 1528 */ 1529 CSR_WRITE_4(sc, TX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_tx_paddr)); 1530 CSR_WRITE_4(sc, RX_DESC, SGE_ADDR_LO(sc->sge_ldata.sge_rx_paddr)); 1531 1532 CSR_WRITE_4(sc, TxMacControl, 0x60); 1533 CSR_WRITE_4(sc, 0x6c, 0); 1534 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1535 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1536 /* Allow receiving VLAN frames. */ 1537 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1538 1539 for (i = 0; i < ETHER_ADDR_LEN; i++) 1540 CSR_WRITE_1(sc, RxMacAddr + i, IF_LLADDR(ifp)[i]); 1541 sge_rxfilter(sc); 1542 1543 /* Initialize default speed/duplex information. */ 1544 if ((sc->sge_flags & SGE_FLAG_FASTETHER) == 0) 1545 sc->sge_flags |= SGE_FLAG_SPEED_1000; 1546 sc->sge_flags |= SGE_FLAG_FDX; 1547 if ((sc->sge_flags & SGE_FLAG_RGMII) != 0) 1548 CSR_WRITE_4(sc, StationControl, 0x04008001); 1549 else 1550 CSR_WRITE_4(sc, StationControl, 0x04000001); 1551 /* 1552 * XXX Try to mitigate interrupts. 1553 */ 1554 CSR_WRITE_4(sc, IntrControl, 0x08880000); 1555 #ifdef notyet 1556 if (sc->sge_intrcontrol != 0) 1557 CSR_WRITE_4(sc, IntrControl, sc->sge_intrcontrol); 1558 if (sc->sge_intrtimer != 0) 1559 CSR_WRITE_4(sc, IntrTimer, sc->sge_intrtimer); 1560 #endif 1561 1562 /* 1563 * Clear and enable interrupts. 1564 */ 1565 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1566 CSR_WRITE_4(sc, IntrMask, SGE_INTRS); 1567 1568 /* Enable receiver and transmitter. */ 1569 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1570 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1571 1572 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1573 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1574 1575 sc->sge_flags &= ~SGE_FLAG_LINK; 1576 mii_mediachg(mii); 1577 callout_reset(&sc->sge_stat_ch, hz, sge_tick, sc); 1578 } 1579 1580 /* 1581 * Set media options. 1582 */ 1583 static int 1584 sge_ifmedia_upd(struct ifnet *ifp) 1585 { 1586 struct sge_softc *sc; 1587 struct mii_data *mii; 1588 int error; 1589 1590 sc = ifp->if_softc; 1591 SGE_LOCK(sc); 1592 mii = device_get_softc(sc->sge_miibus); 1593 sc->sge_flags &= ~SGE_FLAG_LINK; 1594 if (mii->mii_instance) { 1595 struct mii_softc *miisc; 1596 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1597 mii_phy_reset(miisc); 1598 } 1599 error = mii_mediachg(mii); 1600 SGE_UNLOCK(sc); 1601 1602 return (error); 1603 } 1604 1605 /* 1606 * Report current media status. 1607 */ 1608 static void 1609 sge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1610 { 1611 struct sge_softc *sc; 1612 struct mii_data *mii; 1613 1614 sc = ifp->if_softc; 1615 SGE_LOCK(sc); 1616 mii = device_get_softc(sc->sge_miibus); 1617 if ((ifp->if_flags & IFF_UP) == 0) { 1618 SGE_UNLOCK(sc); 1619 return; 1620 } 1621 mii_pollstat(mii); 1622 SGE_UNLOCK(sc); 1623 ifmr->ifm_active = mii->mii_media_active; 1624 ifmr->ifm_status = mii->mii_media_status; 1625 } 1626 1627 static int 1628 sge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1629 { 1630 struct sge_softc *sc; 1631 struct ifreq *ifr; 1632 struct mii_data *mii; 1633 int error = 0, mask; 1634 1635 sc = ifp->if_softc; 1636 ifr = (struct ifreq *)data; 1637 1638 switch(command) { 1639 case SIOCSIFFLAGS: 1640 SGE_LOCK(sc); 1641 if ((ifp->if_flags & IFF_UP) != 0) { 1642 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1643 ((ifp->if_flags ^ sc->sge_if_flags) & 1644 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1645 sge_rxfilter(sc); 1646 else 1647 sge_init_locked(sc); 1648 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1649 sge_stop(sc); 1650 sc->sge_if_flags = ifp->if_flags; 1651 SGE_UNLOCK(sc); 1652 break; 1653 case SIOCSIFCAP: 1654 SGE_LOCK(sc); 1655 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1656 if ((mask & IFCAP_TXCSUM) != 0 && 1657 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1658 ifp->if_capenable ^= IFCAP_TXCSUM; 1659 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1660 ifp->if_hwassist |= SGE_CSUM_FEATURES; 1661 else 1662 ifp->if_hwassist &= ~SGE_CSUM_FEATURES; 1663 } 1664 if ((mask & IFCAP_RXCSUM) != 0 && 1665 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1666 ifp->if_capenable ^= IFCAP_RXCSUM; 1667 SGE_UNLOCK(sc); 1668 break; 1669 case SIOCADDMULTI: 1670 case SIOCDELMULTI: 1671 SGE_LOCK(sc); 1672 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1673 sge_rxfilter(sc); 1674 SGE_UNLOCK(sc); 1675 break; 1676 case SIOCGIFMEDIA: 1677 case SIOCSIFMEDIA: 1678 mii = device_get_softc(sc->sge_miibus); 1679 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1680 break; 1681 default: 1682 error = ether_ioctl(ifp, command, data); 1683 break; 1684 } 1685 1686 return (error); 1687 } 1688 1689 static void 1690 sge_watchdog(struct sge_softc *sc) 1691 { 1692 struct ifnet *ifp; 1693 1694 SGE_LOCK_ASSERT(sc); 1695 if (sc->sge_timer == 0 || --sc->sge_timer > 0) 1696 return; 1697 1698 ifp = sc->sge_ifp; 1699 if ((sc->sge_flags & SGE_FLAG_LINK) == 0) { 1700 if (1 || bootverbose) 1701 device_printf(sc->sge_dev, 1702 "watchdog timeout (lost link)\n"); 1703 ifp->if_oerrors++; 1704 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1705 sge_init_locked(sc); 1706 return; 1707 } 1708 device_printf(sc->sge_dev, "watchdog timeout\n"); 1709 ifp->if_oerrors++; 1710 1711 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1712 sge_init_locked(sc); 1713 if (!IFQ_DRV_IS_EMPTY(&sc->sge_ifp->if_snd)) 1714 sge_start_locked(ifp); 1715 } 1716 1717 /* 1718 * Stop the adapter and free any mbufs allocated to the 1719 * RX and TX lists. 1720 */ 1721 static void 1722 sge_stop(struct sge_softc *sc) 1723 { 1724 struct ifnet *ifp; 1725 1726 ifp = sc->sge_ifp; 1727 1728 SGE_LOCK_ASSERT(sc); 1729 1730 sc->sge_timer = 0; 1731 callout_stop(&sc->sge_stat_ch); 1732 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1733 1734 CSR_WRITE_4(sc, IntrMask, 0); 1735 CSR_READ_4(sc, IntrMask); 1736 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1737 /* Stop TX/RX MAC. */ 1738 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1739 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1740 /* XXX Can we assume active DMA cycles gone? */ 1741 DELAY(2000); 1742 CSR_WRITE_4(sc, IntrMask, 0); 1743 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1744 1745 sc->sge_flags &= ~SGE_FLAG_LINK; 1746 sge_list_rx_free(sc); 1747 sge_list_tx_free(sc); 1748 } 1749