1 /*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44 /* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83 #ifdef HAVE_KERNEL_OPTION_HEADERS 84 #include "opt_device_polling.h" 85 #endif 86 87 #include <sys/param.h> 88 #include <sys/endian.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/module.h> 94 #include <sys/kernel.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/ethernet.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 #include <net/if_types.h> 104 #include <net/if_vlan_var.h> 105 106 #include <net/bpf.h> 107 108 #include <machine/bus.h> 109 #include <machine/resource.h> 110 #include <sys/bus.h> 111 #include <sys/rman.h> 112 113 #include <dev/mii/mii.h> 114 #include <dev/mii/miivar.h> 115 116 #include <dev/pci/pcireg.h> 117 #include <dev/pci/pcivar.h> 118 119 MODULE_DEPEND(vge, pci, 1, 1, 1); 120 MODULE_DEPEND(vge, ether, 1, 1, 1); 121 MODULE_DEPEND(vge, miibus, 1, 1, 1); 122 123 /* "device miibus" required. See GENERIC if you get errors here. */ 124 #include "miibus_if.h" 125 126 #include <dev/vge/if_vgereg.h> 127 #include <dev/vge/if_vgevar.h> 128 129 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 130 131 /* Tunables */ 132 static int msi_disable = 0; 133 TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 134 135 /* 136 * The SQE error counter of MIB seems to report bogus value. 137 * Vendor's workaround does not seem to work on PCIe based 138 * controllers. Disable it until we find better workaround. 139 */ 140 #undef VGE_ENABLE_SQEERR 141 142 /* 143 * Various supported device vendors/types and their names. 144 */ 145 static struct vge_type vge_devs[] = { 146 { VIA_VENDORID, VIA_DEVICEID_61XX, 147 "VIA Networking Velocity Gigabit Ethernet" }, 148 { 0, 0, NULL } 149 }; 150 151 static int vge_attach(device_t); 152 static int vge_detach(device_t); 153 static int vge_probe(device_t); 154 static int vge_resume(device_t); 155 static int vge_shutdown(device_t); 156 static int vge_suspend(device_t); 157 158 static void vge_cam_clear(struct vge_softc *); 159 static int vge_cam_set(struct vge_softc *, uint8_t *); 160 static void vge_clrwol(struct vge_softc *); 161 static void vge_discard_rxbuf(struct vge_softc *, int); 162 static int vge_dma_alloc(struct vge_softc *); 163 static void vge_dma_free(struct vge_softc *); 164 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 165 #ifdef VGE_EEPROM 166 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 167 #endif 168 static int vge_encap(struct vge_softc *, struct mbuf **); 169 #ifndef __NO_STRICT_ALIGNMENT 170 static __inline void 171 vge_fixup_rx(struct mbuf *); 172 #endif 173 static void vge_freebufs(struct vge_softc *); 174 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 175 static int vge_ifmedia_upd(struct ifnet *); 176 static void vge_init(void *); 177 static void vge_init_locked(struct vge_softc *); 178 static void vge_intr(void *); 179 static void vge_intr_holdoff(struct vge_softc *); 180 static int vge_ioctl(struct ifnet *, u_long, caddr_t); 181 static void vge_link_statchg(void *); 182 static int vge_miibus_readreg(device_t, int, int); 183 static void vge_miibus_statchg(device_t); 184 static int vge_miibus_writereg(device_t, int, int, int); 185 static void vge_miipoll_start(struct vge_softc *); 186 static void vge_miipoll_stop(struct vge_softc *); 187 static int vge_newbuf(struct vge_softc *, int); 188 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 189 static void vge_reset(struct vge_softc *); 190 static int vge_rx_list_init(struct vge_softc *); 191 static int vge_rxeof(struct vge_softc *, int); 192 static void vge_rxfilter(struct vge_softc *); 193 static void vge_setvlan(struct vge_softc *); 194 static void vge_setwol(struct vge_softc *); 195 static void vge_start(struct ifnet *); 196 static void vge_start_locked(struct ifnet *); 197 static void vge_stats_clear(struct vge_softc *); 198 static void vge_stats_update(struct vge_softc *); 199 static void vge_stop(struct vge_softc *); 200 static void vge_sysctl_node(struct vge_softc *); 201 static int vge_tx_list_init(struct vge_softc *); 202 static void vge_txeof(struct vge_softc *); 203 static void vge_watchdog(void *); 204 205 static device_method_t vge_methods[] = { 206 /* Device interface */ 207 DEVMETHOD(device_probe, vge_probe), 208 DEVMETHOD(device_attach, vge_attach), 209 DEVMETHOD(device_detach, vge_detach), 210 DEVMETHOD(device_suspend, vge_suspend), 211 DEVMETHOD(device_resume, vge_resume), 212 DEVMETHOD(device_shutdown, vge_shutdown), 213 214 /* bus interface */ 215 DEVMETHOD(bus_print_child, bus_generic_print_child), 216 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 217 218 /* MII interface */ 219 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 220 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 221 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 222 223 { 0, 0 } 224 }; 225 226 static driver_t vge_driver = { 227 "vge", 228 vge_methods, 229 sizeof(struct vge_softc) 230 }; 231 232 static devclass_t vge_devclass; 233 234 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 235 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 236 237 #ifdef VGE_EEPROM 238 /* 239 * Read a word of data stored in the EEPROM at address 'addr.' 240 */ 241 static void 242 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 243 { 244 int i; 245 uint16_t word = 0; 246 247 /* 248 * Enter EEPROM embedded programming mode. In order to 249 * access the EEPROM at all, we first have to set the 250 * EELOAD bit in the CHIPCFG2 register. 251 */ 252 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 253 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 254 255 /* Select the address of the word we want to read */ 256 CSR_WRITE_1(sc, VGE_EEADDR, addr); 257 258 /* Issue read command */ 259 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 260 261 /* Wait for the done bit to be set. */ 262 for (i = 0; i < VGE_TIMEOUT; i++) { 263 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 264 break; 265 } 266 267 if (i == VGE_TIMEOUT) { 268 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 269 *dest = 0; 270 return; 271 } 272 273 /* Read the result */ 274 word = CSR_READ_2(sc, VGE_EERDDAT); 275 276 /* Turn off EEPROM access mode. */ 277 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 278 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 279 280 *dest = word; 281 } 282 #endif 283 284 /* 285 * Read a sequence of words from the EEPROM. 286 */ 287 static void 288 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 289 { 290 int i; 291 #ifdef VGE_EEPROM 292 uint16_t word = 0, *ptr; 293 294 for (i = 0; i < cnt; i++) { 295 vge_eeprom_getword(sc, off + i, &word); 296 ptr = (uint16_t *)(dest + (i * 2)); 297 if (swap) 298 *ptr = ntohs(word); 299 else 300 *ptr = word; 301 } 302 #else 303 for (i = 0; i < ETHER_ADDR_LEN; i++) 304 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 305 #endif 306 } 307 308 static void 309 vge_miipoll_stop(struct vge_softc *sc) 310 { 311 int i; 312 313 CSR_WRITE_1(sc, VGE_MIICMD, 0); 314 315 for (i = 0; i < VGE_TIMEOUT; i++) { 316 DELAY(1); 317 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 318 break; 319 } 320 321 if (i == VGE_TIMEOUT) 322 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 323 } 324 325 static void 326 vge_miipoll_start(struct vge_softc *sc) 327 { 328 int i; 329 330 /* First, make sure we're idle. */ 331 332 CSR_WRITE_1(sc, VGE_MIICMD, 0); 333 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 334 335 for (i = 0; i < VGE_TIMEOUT; i++) { 336 DELAY(1); 337 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 338 break; 339 } 340 341 if (i == VGE_TIMEOUT) { 342 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 343 return; 344 } 345 346 /* Now enable auto poll mode. */ 347 348 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 349 350 /* And make sure it started. */ 351 352 for (i = 0; i < VGE_TIMEOUT; i++) { 353 DELAY(1); 354 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 355 break; 356 } 357 358 if (i == VGE_TIMEOUT) 359 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 360 } 361 362 static int 363 vge_miibus_readreg(device_t dev, int phy, int reg) 364 { 365 struct vge_softc *sc; 366 int i; 367 uint16_t rval = 0; 368 369 sc = device_get_softc(dev); 370 371 vge_miipoll_stop(sc); 372 373 /* Specify the register we want to read. */ 374 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 375 376 /* Issue read command. */ 377 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 378 379 /* Wait for the read command bit to self-clear. */ 380 for (i = 0; i < VGE_TIMEOUT; i++) { 381 DELAY(1); 382 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 383 break; 384 } 385 386 if (i == VGE_TIMEOUT) 387 device_printf(sc->vge_dev, "MII read timed out\n"); 388 else 389 rval = CSR_READ_2(sc, VGE_MIIDATA); 390 391 vge_miipoll_start(sc); 392 393 return (rval); 394 } 395 396 static int 397 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 398 { 399 struct vge_softc *sc; 400 int i, rval = 0; 401 402 sc = device_get_softc(dev); 403 404 vge_miipoll_stop(sc); 405 406 /* Specify the register we want to write. */ 407 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 408 409 /* Specify the data we want to write. */ 410 CSR_WRITE_2(sc, VGE_MIIDATA, data); 411 412 /* Issue write command. */ 413 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 414 415 /* Wait for the write command bit to self-clear. */ 416 for (i = 0; i < VGE_TIMEOUT; i++) { 417 DELAY(1); 418 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 419 break; 420 } 421 422 if (i == VGE_TIMEOUT) { 423 device_printf(sc->vge_dev, "MII write timed out\n"); 424 rval = EIO; 425 } 426 427 vge_miipoll_start(sc); 428 429 return (rval); 430 } 431 432 static void 433 vge_cam_clear(struct vge_softc *sc) 434 { 435 int i; 436 437 /* 438 * Turn off all the mask bits. This tells the chip 439 * that none of the entries in the CAM filter are valid. 440 * desired entries will be enabled as we fill the filter in. 441 */ 442 443 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 444 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 445 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 446 for (i = 0; i < 8; i++) 447 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 448 449 /* Clear the VLAN filter too. */ 450 451 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 452 for (i = 0; i < 8; i++) 453 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 454 455 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 456 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 457 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 458 459 sc->vge_camidx = 0; 460 } 461 462 static int 463 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 464 { 465 int i, error = 0; 466 467 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 468 return (ENOSPC); 469 470 /* Select the CAM data page. */ 471 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 472 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 473 474 /* Set the filter entry we want to update and enable writing. */ 475 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 476 477 /* Write the address to the CAM registers */ 478 for (i = 0; i < ETHER_ADDR_LEN; i++) 479 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 480 481 /* Issue a write command. */ 482 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 483 484 /* Wake for it to clear. */ 485 for (i = 0; i < VGE_TIMEOUT; i++) { 486 DELAY(1); 487 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 488 break; 489 } 490 491 if (i == VGE_TIMEOUT) { 492 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 493 error = EIO; 494 goto fail; 495 } 496 497 /* Select the CAM mask page. */ 498 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 499 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 500 501 /* Set the mask bit that enables this filter. */ 502 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 503 1<<(sc->vge_camidx & 7)); 504 505 sc->vge_camidx++; 506 507 fail: 508 /* Turn off access to CAM. */ 509 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 510 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 511 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 512 513 return (error); 514 } 515 516 static void 517 vge_setvlan(struct vge_softc *sc) 518 { 519 struct ifnet *ifp; 520 uint8_t cfg; 521 522 VGE_LOCK_ASSERT(sc); 523 524 ifp = sc->vge_ifp; 525 cfg = CSR_READ_1(sc, VGE_RXCFG); 526 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 527 cfg |= VGE_VTAG_OPT2; 528 else 529 cfg &= ~VGE_VTAG_OPT2; 530 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 531 } 532 533 /* 534 * Program the multicast filter. We use the 64-entry CAM filter 535 * for perfect filtering. If there's more than 64 multicast addresses, 536 * we use the hash filter instead. 537 */ 538 static void 539 vge_rxfilter(struct vge_softc *sc) 540 { 541 struct ifnet *ifp; 542 struct ifmultiaddr *ifma; 543 uint32_t h, hashes[2]; 544 uint8_t rxcfg; 545 int error = 0; 546 547 VGE_LOCK_ASSERT(sc); 548 549 /* First, zot all the multicast entries. */ 550 hashes[0] = 0; 551 hashes[1] = 0; 552 553 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 554 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 555 VGE_RXCTL_RX_PROMISC); 556 /* 557 * Always allow VLAN oversized frames and frames for 558 * this host. 559 */ 560 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 561 562 ifp = sc->vge_ifp; 563 if ((ifp->if_flags & IFF_BROADCAST) != 0) 564 rxcfg |= VGE_RXCTL_RX_BCAST; 565 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 566 if ((ifp->if_flags & IFF_PROMISC) != 0) 567 rxcfg |= VGE_RXCTL_RX_PROMISC; 568 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 569 hashes[0] = 0xFFFFFFFF; 570 hashes[1] = 0xFFFFFFFF; 571 } 572 goto done; 573 } 574 575 vge_cam_clear(sc); 576 /* Now program new ones */ 577 if_maddr_rlock(ifp); 578 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 579 if (ifma->ifma_addr->sa_family != AF_LINK) 580 continue; 581 error = vge_cam_set(sc, 582 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 583 if (error) 584 break; 585 } 586 587 /* If there were too many addresses, use the hash filter. */ 588 if (error) { 589 vge_cam_clear(sc); 590 591 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 592 if (ifma->ifma_addr->sa_family != AF_LINK) 593 continue; 594 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 595 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 596 if (h < 32) 597 hashes[0] |= (1 << h); 598 else 599 hashes[1] |= (1 << (h - 32)); 600 } 601 } 602 if_maddr_runlock(ifp); 603 604 done: 605 if (hashes[0] != 0 || hashes[1] != 0) 606 rxcfg |= VGE_RXCTL_RX_MCAST; 607 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 608 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 609 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 610 } 611 612 static void 613 vge_reset(struct vge_softc *sc) 614 { 615 int i; 616 617 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 618 619 for (i = 0; i < VGE_TIMEOUT; i++) { 620 DELAY(5); 621 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 622 break; 623 } 624 625 if (i == VGE_TIMEOUT) { 626 device_printf(sc->vge_dev, "soft reset timed out\n"); 627 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 628 DELAY(2000); 629 } 630 631 DELAY(5000); 632 } 633 634 /* 635 * Probe for a VIA gigabit chip. Check the PCI vendor and device 636 * IDs against our list and return a device name if we find a match. 637 */ 638 static int 639 vge_probe(device_t dev) 640 { 641 struct vge_type *t; 642 643 t = vge_devs; 644 645 while (t->vge_name != NULL) { 646 if ((pci_get_vendor(dev) == t->vge_vid) && 647 (pci_get_device(dev) == t->vge_did)) { 648 device_set_desc(dev, t->vge_name); 649 return (BUS_PROBE_DEFAULT); 650 } 651 t++; 652 } 653 654 return (ENXIO); 655 } 656 657 /* 658 * Map a single buffer address. 659 */ 660 661 struct vge_dmamap_arg { 662 bus_addr_t vge_busaddr; 663 }; 664 665 static void 666 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 667 { 668 struct vge_dmamap_arg *ctx; 669 670 if (error != 0) 671 return; 672 673 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 674 675 ctx = (struct vge_dmamap_arg *)arg; 676 ctx->vge_busaddr = segs[0].ds_addr; 677 } 678 679 static int 680 vge_dma_alloc(struct vge_softc *sc) 681 { 682 struct vge_dmamap_arg ctx; 683 struct vge_txdesc *txd; 684 struct vge_rxdesc *rxd; 685 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 686 int error, i; 687 688 lowaddr = BUS_SPACE_MAXADDR; 689 690 again: 691 /* Create parent ring tag. */ 692 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 693 1, 0, /* algnmnt, boundary */ 694 lowaddr, /* lowaddr */ 695 BUS_SPACE_MAXADDR, /* highaddr */ 696 NULL, NULL, /* filter, filterarg */ 697 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 698 0, /* nsegments */ 699 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 700 0, /* flags */ 701 NULL, NULL, /* lockfunc, lockarg */ 702 &sc->vge_cdata.vge_ring_tag); 703 if (error != 0) { 704 device_printf(sc->vge_dev, 705 "could not create parent DMA tag.\n"); 706 goto fail; 707 } 708 709 /* Create tag for Tx ring. */ 710 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 711 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 712 BUS_SPACE_MAXADDR, /* lowaddr */ 713 BUS_SPACE_MAXADDR, /* highaddr */ 714 NULL, NULL, /* filter, filterarg */ 715 VGE_TX_LIST_SZ, /* maxsize */ 716 1, /* nsegments */ 717 VGE_TX_LIST_SZ, /* maxsegsize */ 718 0, /* flags */ 719 NULL, NULL, /* lockfunc, lockarg */ 720 &sc->vge_cdata.vge_tx_ring_tag); 721 if (error != 0) { 722 device_printf(sc->vge_dev, 723 "could not allocate Tx ring DMA tag.\n"); 724 goto fail; 725 } 726 727 /* Create tag for Rx ring. */ 728 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 729 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 730 BUS_SPACE_MAXADDR, /* lowaddr */ 731 BUS_SPACE_MAXADDR, /* highaddr */ 732 NULL, NULL, /* filter, filterarg */ 733 VGE_RX_LIST_SZ, /* maxsize */ 734 1, /* nsegments */ 735 VGE_RX_LIST_SZ, /* maxsegsize */ 736 0, /* flags */ 737 NULL, NULL, /* lockfunc, lockarg */ 738 &sc->vge_cdata.vge_rx_ring_tag); 739 if (error != 0) { 740 device_printf(sc->vge_dev, 741 "could not allocate Rx ring DMA tag.\n"); 742 goto fail; 743 } 744 745 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 746 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 747 (void **)&sc->vge_rdata.vge_tx_ring, 748 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 749 &sc->vge_cdata.vge_tx_ring_map); 750 if (error != 0) { 751 device_printf(sc->vge_dev, 752 "could not allocate DMA'able memory for Tx ring.\n"); 753 goto fail; 754 } 755 756 ctx.vge_busaddr = 0; 757 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 758 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 759 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 760 if (error != 0 || ctx.vge_busaddr == 0) { 761 device_printf(sc->vge_dev, 762 "could not load DMA'able memory for Tx ring.\n"); 763 goto fail; 764 } 765 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 766 767 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 768 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 769 (void **)&sc->vge_rdata.vge_rx_ring, 770 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 771 &sc->vge_cdata.vge_rx_ring_map); 772 if (error != 0) { 773 device_printf(sc->vge_dev, 774 "could not allocate DMA'able memory for Rx ring.\n"); 775 goto fail; 776 } 777 778 ctx.vge_busaddr = 0; 779 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 780 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 781 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 782 if (error != 0 || ctx.vge_busaddr == 0) { 783 device_printf(sc->vge_dev, 784 "could not load DMA'able memory for Rx ring.\n"); 785 goto fail; 786 } 787 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 788 789 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 790 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 791 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 792 if ((VGE_ADDR_HI(tx_ring_end) != 793 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 794 (VGE_ADDR_HI(rx_ring_end) != 795 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 796 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 797 device_printf(sc->vge_dev, "4GB boundary crossed, " 798 "switching to 32bit DMA address mode.\n"); 799 vge_dma_free(sc); 800 /* Limit DMA address space to 32bit and try again. */ 801 lowaddr = BUS_SPACE_MAXADDR_32BIT; 802 goto again; 803 } 804 805 /* Create parent buffer tag. */ 806 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 807 1, 0, /* algnmnt, boundary */ 808 VGE_BUF_DMA_MAXADDR, /* lowaddr */ 809 BUS_SPACE_MAXADDR, /* highaddr */ 810 NULL, NULL, /* filter, filterarg */ 811 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 812 0, /* nsegments */ 813 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 814 0, /* flags */ 815 NULL, NULL, /* lockfunc, lockarg */ 816 &sc->vge_cdata.vge_buffer_tag); 817 if (error != 0) { 818 device_printf(sc->vge_dev, 819 "could not create parent buffer DMA tag.\n"); 820 goto fail; 821 } 822 823 /* Create tag for Tx buffers. */ 824 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 825 1, 0, /* algnmnt, boundary */ 826 BUS_SPACE_MAXADDR, /* lowaddr */ 827 BUS_SPACE_MAXADDR, /* highaddr */ 828 NULL, NULL, /* filter, filterarg */ 829 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 830 VGE_MAXTXSEGS, /* nsegments */ 831 MCLBYTES, /* maxsegsize */ 832 0, /* flags */ 833 NULL, NULL, /* lockfunc, lockarg */ 834 &sc->vge_cdata.vge_tx_tag); 835 if (error != 0) { 836 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 837 goto fail; 838 } 839 840 /* Create tag for Rx buffers. */ 841 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 842 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 843 BUS_SPACE_MAXADDR, /* lowaddr */ 844 BUS_SPACE_MAXADDR, /* highaddr */ 845 NULL, NULL, /* filter, filterarg */ 846 MCLBYTES, /* maxsize */ 847 1, /* nsegments */ 848 MCLBYTES, /* maxsegsize */ 849 0, /* flags */ 850 NULL, NULL, /* lockfunc, lockarg */ 851 &sc->vge_cdata.vge_rx_tag); 852 if (error != 0) { 853 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 854 goto fail; 855 } 856 857 /* Create DMA maps for Tx buffers. */ 858 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 859 txd = &sc->vge_cdata.vge_txdesc[i]; 860 txd->tx_m = NULL; 861 txd->tx_dmamap = NULL; 862 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 863 &txd->tx_dmamap); 864 if (error != 0) { 865 device_printf(sc->vge_dev, 866 "could not create Tx dmamap.\n"); 867 goto fail; 868 } 869 } 870 /* Create DMA maps for Rx buffers. */ 871 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 872 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 873 device_printf(sc->vge_dev, 874 "could not create spare Rx dmamap.\n"); 875 goto fail; 876 } 877 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 878 rxd = &sc->vge_cdata.vge_rxdesc[i]; 879 rxd->rx_m = NULL; 880 rxd->rx_dmamap = NULL; 881 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 882 &rxd->rx_dmamap); 883 if (error != 0) { 884 device_printf(sc->vge_dev, 885 "could not create Rx dmamap.\n"); 886 goto fail; 887 } 888 } 889 890 fail: 891 return (error); 892 } 893 894 static void 895 vge_dma_free(struct vge_softc *sc) 896 { 897 struct vge_txdesc *txd; 898 struct vge_rxdesc *rxd; 899 int i; 900 901 /* Tx ring. */ 902 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 903 if (sc->vge_cdata.vge_tx_ring_map) 904 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 905 sc->vge_cdata.vge_tx_ring_map); 906 if (sc->vge_cdata.vge_tx_ring_map && 907 sc->vge_rdata.vge_tx_ring) 908 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 909 sc->vge_rdata.vge_tx_ring, 910 sc->vge_cdata.vge_tx_ring_map); 911 sc->vge_rdata.vge_tx_ring = NULL; 912 sc->vge_cdata.vge_tx_ring_map = NULL; 913 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 914 sc->vge_cdata.vge_tx_ring_tag = NULL; 915 } 916 /* Rx ring. */ 917 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 918 if (sc->vge_cdata.vge_rx_ring_map) 919 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 920 sc->vge_cdata.vge_rx_ring_map); 921 if (sc->vge_cdata.vge_rx_ring_map && 922 sc->vge_rdata.vge_rx_ring) 923 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 924 sc->vge_rdata.vge_rx_ring, 925 sc->vge_cdata.vge_rx_ring_map); 926 sc->vge_rdata.vge_rx_ring = NULL; 927 sc->vge_cdata.vge_rx_ring_map = NULL; 928 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 929 sc->vge_cdata.vge_rx_ring_tag = NULL; 930 } 931 /* Tx buffers. */ 932 if (sc->vge_cdata.vge_tx_tag != NULL) { 933 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 934 txd = &sc->vge_cdata.vge_txdesc[i]; 935 if (txd->tx_dmamap != NULL) { 936 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 937 txd->tx_dmamap); 938 txd->tx_dmamap = NULL; 939 } 940 } 941 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 942 sc->vge_cdata.vge_tx_tag = NULL; 943 } 944 /* Rx buffers. */ 945 if (sc->vge_cdata.vge_rx_tag != NULL) { 946 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 947 rxd = &sc->vge_cdata.vge_rxdesc[i]; 948 if (rxd->rx_dmamap != NULL) { 949 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 950 rxd->rx_dmamap); 951 rxd->rx_dmamap = NULL; 952 } 953 } 954 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 955 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 956 sc->vge_cdata.vge_rx_sparemap); 957 sc->vge_cdata.vge_rx_sparemap = NULL; 958 } 959 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 960 sc->vge_cdata.vge_rx_tag = NULL; 961 } 962 963 if (sc->vge_cdata.vge_buffer_tag != NULL) { 964 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 965 sc->vge_cdata.vge_buffer_tag = NULL; 966 } 967 if (sc->vge_cdata.vge_ring_tag != NULL) { 968 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 969 sc->vge_cdata.vge_ring_tag = NULL; 970 } 971 } 972 973 /* 974 * Attach the interface. Allocate softc structures, do ifmedia 975 * setup and ethernet/BPF attach. 976 */ 977 static int 978 vge_attach(device_t dev) 979 { 980 u_char eaddr[ETHER_ADDR_LEN]; 981 struct vge_softc *sc; 982 struct ifnet *ifp; 983 int error = 0, cap, i, msic, rid; 984 985 sc = device_get_softc(dev); 986 sc->vge_dev = dev; 987 988 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 989 MTX_DEF); 990 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 991 992 /* 993 * Map control/status registers. 994 */ 995 pci_enable_busmaster(dev); 996 997 rid = PCIR_BAR(1); 998 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 999 RF_ACTIVE); 1000 1001 if (sc->vge_res == NULL) { 1002 device_printf(dev, "couldn't map ports/memory\n"); 1003 error = ENXIO; 1004 goto fail; 1005 } 1006 1007 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) { 1008 sc->vge_flags |= VGE_FLAG_PCIE; 1009 sc->vge_expcap = cap; 1010 } else 1011 sc->vge_flags |= VGE_FLAG_JUMBO; 1012 if (pci_find_extcap(dev, PCIY_PMG, &cap) == 0) { 1013 sc->vge_flags |= VGE_FLAG_PMCAP; 1014 sc->vge_pmcap = cap; 1015 } 1016 rid = 0; 1017 msic = pci_msi_count(dev); 1018 if (msi_disable == 0 && msic > 0) { 1019 msic = 1; 1020 if (pci_alloc_msi(dev, &msic) == 0) { 1021 if (msic == 1) { 1022 sc->vge_flags |= VGE_FLAG_MSI; 1023 device_printf(dev, "Using %d MSI message\n", 1024 msic); 1025 rid = 1; 1026 } else 1027 pci_release_msi(dev); 1028 } 1029 } 1030 1031 /* Allocate interrupt */ 1032 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1033 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1034 if (sc->vge_irq == NULL) { 1035 device_printf(dev, "couldn't map interrupt\n"); 1036 error = ENXIO; 1037 goto fail; 1038 } 1039 1040 /* Reset the adapter. */ 1041 vge_reset(sc); 1042 /* Reload EEPROM. */ 1043 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1044 for (i = 0; i < VGE_TIMEOUT; i++) { 1045 DELAY(5); 1046 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1047 break; 1048 } 1049 if (i == VGE_TIMEOUT) 1050 device_printf(dev, "EEPROM reload timed out\n"); 1051 /* 1052 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1053 * MAC will receive magic packet which in turn confuses 1054 * controller. 1055 */ 1056 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1057 1058 /* 1059 * Get station address from the EEPROM. 1060 */ 1061 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1062 /* 1063 * Save configured PHY address. 1064 * It seems the PHY address of PCIe controllers just 1065 * reflects media jump strapping status so we assume the 1066 * internal PHY address of PCIe controller is at 1. 1067 */ 1068 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1069 sc->vge_phyaddr = 1; 1070 else 1071 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1072 VGE_MIICFG_PHYADDR; 1073 /* Clear WOL and take hardware from powerdown. */ 1074 vge_clrwol(sc); 1075 vge_sysctl_node(sc); 1076 error = vge_dma_alloc(sc); 1077 if (error) 1078 goto fail; 1079 1080 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1081 if (ifp == NULL) { 1082 device_printf(dev, "can not if_alloc()\n"); 1083 error = ENOSPC; 1084 goto fail; 1085 } 1086 1087 /* Do MII setup */ 1088 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd, 1089 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY, 1090 0); 1091 if (error != 0) { 1092 device_printf(dev, "attaching PHYs failed\n"); 1093 goto fail; 1094 } 1095 1096 ifp->if_softc = sc; 1097 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1098 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1099 ifp->if_ioctl = vge_ioctl; 1100 ifp->if_capabilities = IFCAP_VLAN_MTU; 1101 ifp->if_start = vge_start; 1102 ifp->if_hwassist = VGE_CSUM_FEATURES; 1103 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1104 IFCAP_VLAN_HWTAGGING; 1105 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) 1106 ifp->if_capabilities |= IFCAP_WOL; 1107 ifp->if_capenable = ifp->if_capabilities; 1108 #ifdef DEVICE_POLLING 1109 ifp->if_capabilities |= IFCAP_POLLING; 1110 #endif 1111 ifp->if_init = vge_init; 1112 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1); 1113 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1; 1114 IFQ_SET_READY(&ifp->if_snd); 1115 1116 /* 1117 * Call MI attach routine. 1118 */ 1119 ether_ifattach(ifp, eaddr); 1120 1121 /* Tell the upper layer(s) we support long frames. */ 1122 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1123 1124 /* Hook interrupt last to avoid having to lock softc */ 1125 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1126 NULL, vge_intr, sc, &sc->vge_intrhand); 1127 1128 if (error) { 1129 device_printf(dev, "couldn't set up irq\n"); 1130 ether_ifdetach(ifp); 1131 goto fail; 1132 } 1133 1134 fail: 1135 if (error) 1136 vge_detach(dev); 1137 1138 return (error); 1139 } 1140 1141 /* 1142 * Shutdown hardware and free up resources. This can be called any 1143 * time after the mutex has been initialized. It is called in both 1144 * the error case in attach and the normal detach case so it needs 1145 * to be careful about only freeing resources that have actually been 1146 * allocated. 1147 */ 1148 static int 1149 vge_detach(device_t dev) 1150 { 1151 struct vge_softc *sc; 1152 struct ifnet *ifp; 1153 1154 sc = device_get_softc(dev); 1155 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1156 ifp = sc->vge_ifp; 1157 1158 #ifdef DEVICE_POLLING 1159 if (ifp->if_capenable & IFCAP_POLLING) 1160 ether_poll_deregister(ifp); 1161 #endif 1162 1163 /* These should only be active if attach succeeded */ 1164 if (device_is_attached(dev)) { 1165 ether_ifdetach(ifp); 1166 VGE_LOCK(sc); 1167 vge_stop(sc); 1168 VGE_UNLOCK(sc); 1169 callout_drain(&sc->vge_watchdog); 1170 } 1171 if (sc->vge_miibus) 1172 device_delete_child(dev, sc->vge_miibus); 1173 bus_generic_detach(dev); 1174 1175 if (sc->vge_intrhand) 1176 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1177 if (sc->vge_irq) 1178 bus_release_resource(dev, SYS_RES_IRQ, 1179 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1180 if (sc->vge_flags & VGE_FLAG_MSI) 1181 pci_release_msi(dev); 1182 if (sc->vge_res) 1183 bus_release_resource(dev, SYS_RES_MEMORY, 1184 PCIR_BAR(1), sc->vge_res); 1185 if (ifp) 1186 if_free(ifp); 1187 1188 vge_dma_free(sc); 1189 mtx_destroy(&sc->vge_mtx); 1190 1191 return (0); 1192 } 1193 1194 static void 1195 vge_discard_rxbuf(struct vge_softc *sc, int prod) 1196 { 1197 struct vge_rxdesc *rxd; 1198 int i; 1199 1200 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1201 rxd->rx_desc->vge_sts = 0; 1202 rxd->rx_desc->vge_ctl = 0; 1203 1204 /* 1205 * Note: the manual fails to document the fact that for 1206 * proper opration, the driver needs to replentish the RX 1207 * DMA ring 4 descriptors at a time (rather than one at a 1208 * time, like most chips). We can allocate the new buffers 1209 * but we should not set the OWN bits until we're ready 1210 * to hand back 4 of them in one shot. 1211 */ 1212 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1213 for (i = VGE_RXCHUNK; i > 0; i--) { 1214 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1215 rxd = rxd->rxd_prev; 1216 } 1217 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1218 } 1219 } 1220 1221 static int 1222 vge_newbuf(struct vge_softc *sc, int prod) 1223 { 1224 struct vge_rxdesc *rxd; 1225 struct mbuf *m; 1226 bus_dma_segment_t segs[1]; 1227 bus_dmamap_t map; 1228 int i, nsegs; 1229 1230 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1231 if (m == NULL) 1232 return (ENOBUFS); 1233 /* 1234 * This is part of an evil trick to deal with strict-alignment 1235 * architectures. The VIA chip requires RX buffers to be aligned 1236 * on 32-bit boundaries, but that will hose strict-alignment 1237 * architectures. To get around this, we leave some empty space 1238 * at the start of each buffer and for non-strict-alignment hosts, 1239 * we copy the buffer back two bytes to achieve word alignment. 1240 * This is slightly more efficient than allocating a new buffer, 1241 * copying the contents, and discarding the old buffer. 1242 */ 1243 m->m_len = m->m_pkthdr.len = MCLBYTES; 1244 m_adj(m, VGE_RX_BUF_ALIGN); 1245 1246 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1247 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1248 m_freem(m); 1249 return (ENOBUFS); 1250 } 1251 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1252 1253 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1254 if (rxd->rx_m != NULL) { 1255 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1256 BUS_DMASYNC_POSTREAD); 1257 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1258 } 1259 map = rxd->rx_dmamap; 1260 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1261 sc->vge_cdata.vge_rx_sparemap = map; 1262 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1263 BUS_DMASYNC_PREREAD); 1264 rxd->rx_m = m; 1265 1266 rxd->rx_desc->vge_sts = 0; 1267 rxd->rx_desc->vge_ctl = 0; 1268 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1269 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1270 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1271 1272 /* 1273 * Note: the manual fails to document the fact that for 1274 * proper operation, the driver needs to replenish the RX 1275 * DMA ring 4 descriptors at a time (rather than one at a 1276 * time, like most chips). We can allocate the new buffers 1277 * but we should not set the OWN bits until we're ready 1278 * to hand back 4 of them in one shot. 1279 */ 1280 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1281 for (i = VGE_RXCHUNK; i > 0; i--) { 1282 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1283 rxd = rxd->rxd_prev; 1284 } 1285 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1286 } 1287 1288 return (0); 1289 } 1290 1291 static int 1292 vge_tx_list_init(struct vge_softc *sc) 1293 { 1294 struct vge_ring_data *rd; 1295 struct vge_txdesc *txd; 1296 int i; 1297 1298 VGE_LOCK_ASSERT(sc); 1299 1300 sc->vge_cdata.vge_tx_prodidx = 0; 1301 sc->vge_cdata.vge_tx_considx = 0; 1302 sc->vge_cdata.vge_tx_cnt = 0; 1303 1304 rd = &sc->vge_rdata; 1305 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1306 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1307 txd = &sc->vge_cdata.vge_txdesc[i]; 1308 txd->tx_m = NULL; 1309 txd->tx_desc = &rd->vge_tx_ring[i]; 1310 } 1311 1312 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1313 sc->vge_cdata.vge_tx_ring_map, 1314 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1315 1316 return (0); 1317 } 1318 1319 static int 1320 vge_rx_list_init(struct vge_softc *sc) 1321 { 1322 struct vge_ring_data *rd; 1323 struct vge_rxdesc *rxd; 1324 int i; 1325 1326 VGE_LOCK_ASSERT(sc); 1327 1328 sc->vge_cdata.vge_rx_prodidx = 0; 1329 sc->vge_cdata.vge_head = NULL; 1330 sc->vge_cdata.vge_tail = NULL; 1331 sc->vge_cdata.vge_rx_commit = 0; 1332 1333 rd = &sc->vge_rdata; 1334 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1335 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1336 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1337 rxd->rx_m = NULL; 1338 rxd->rx_desc = &rd->vge_rx_ring[i]; 1339 if (i == 0) 1340 rxd->rxd_prev = 1341 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1342 else 1343 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1344 if (vge_newbuf(sc, i) != 0) 1345 return (ENOBUFS); 1346 } 1347 1348 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1349 sc->vge_cdata.vge_rx_ring_map, 1350 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1351 1352 sc->vge_cdata.vge_rx_commit = 0; 1353 1354 return (0); 1355 } 1356 1357 static void 1358 vge_freebufs(struct vge_softc *sc) 1359 { 1360 struct vge_txdesc *txd; 1361 struct vge_rxdesc *rxd; 1362 struct ifnet *ifp; 1363 int i; 1364 1365 VGE_LOCK_ASSERT(sc); 1366 1367 ifp = sc->vge_ifp; 1368 /* 1369 * Free RX and TX mbufs still in the queues. 1370 */ 1371 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1372 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1373 if (rxd->rx_m != NULL) { 1374 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1375 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1376 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1377 rxd->rx_dmamap); 1378 m_freem(rxd->rx_m); 1379 rxd->rx_m = NULL; 1380 } 1381 } 1382 1383 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1384 txd = &sc->vge_cdata.vge_txdesc[i]; 1385 if (txd->tx_m != NULL) { 1386 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1387 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1388 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1389 txd->tx_dmamap); 1390 m_freem(txd->tx_m); 1391 txd->tx_m = NULL; 1392 ifp->if_oerrors++; 1393 } 1394 } 1395 } 1396 1397 #ifndef __NO_STRICT_ALIGNMENT 1398 static __inline void 1399 vge_fixup_rx(struct mbuf *m) 1400 { 1401 int i; 1402 uint16_t *src, *dst; 1403 1404 src = mtod(m, uint16_t *); 1405 dst = src - 1; 1406 1407 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1408 *dst++ = *src++; 1409 1410 m->m_data -= ETHER_ALIGN; 1411 } 1412 #endif 1413 1414 /* 1415 * RX handler. We support the reception of jumbo frames that have 1416 * been fragmented across multiple 2K mbuf cluster buffers. 1417 */ 1418 static int 1419 vge_rxeof(struct vge_softc *sc, int count) 1420 { 1421 struct mbuf *m; 1422 struct ifnet *ifp; 1423 int prod, prog, total_len; 1424 struct vge_rxdesc *rxd; 1425 struct vge_rx_desc *cur_rx; 1426 uint32_t rxstat, rxctl; 1427 1428 VGE_LOCK_ASSERT(sc); 1429 1430 ifp = sc->vge_ifp; 1431 1432 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1433 sc->vge_cdata.vge_rx_ring_map, 1434 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1435 1436 prod = sc->vge_cdata.vge_rx_prodidx; 1437 for (prog = 0; count > 0 && 1438 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1439 VGE_RX_DESC_INC(prod)) { 1440 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1441 rxstat = le32toh(cur_rx->vge_sts); 1442 if ((rxstat & VGE_RDSTS_OWN) != 0) 1443 break; 1444 count--; 1445 prog++; 1446 rxctl = le32toh(cur_rx->vge_ctl); 1447 total_len = VGE_RXBYTES(rxstat); 1448 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1449 m = rxd->rx_m; 1450 1451 /* 1452 * If the 'start of frame' bit is set, this indicates 1453 * either the first fragment in a multi-fragment receive, 1454 * or an intermediate fragment. Either way, we want to 1455 * accumulate the buffers. 1456 */ 1457 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1458 if (vge_newbuf(sc, prod) != 0) { 1459 ifp->if_iqdrops++; 1460 VGE_CHAIN_RESET(sc); 1461 vge_discard_rxbuf(sc, prod); 1462 continue; 1463 } 1464 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1465 if (sc->vge_cdata.vge_head == NULL) { 1466 sc->vge_cdata.vge_head = m; 1467 sc->vge_cdata.vge_tail = m; 1468 } else { 1469 m->m_flags &= ~M_PKTHDR; 1470 sc->vge_cdata.vge_tail->m_next = m; 1471 sc->vge_cdata.vge_tail = m; 1472 } 1473 continue; 1474 } 1475 1476 /* 1477 * Bad/error frames will have the RXOK bit cleared. 1478 * However, there's one error case we want to allow: 1479 * if a VLAN tagged frame arrives and the chip can't 1480 * match it against the CAM filter, it considers this 1481 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1482 * We don't want to drop the frame though: our VLAN 1483 * filtering is done in software. 1484 * We also want to receive bad-checksummed frames and 1485 * and frames with bad-length. 1486 */ 1487 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1488 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1489 VGE_RDSTS_CSUMERR)) == 0) { 1490 ifp->if_ierrors++; 1491 /* 1492 * If this is part of a multi-fragment packet, 1493 * discard all the pieces. 1494 */ 1495 VGE_CHAIN_RESET(sc); 1496 vge_discard_rxbuf(sc, prod); 1497 continue; 1498 } 1499 1500 if (vge_newbuf(sc, prod) != 0) { 1501 ifp->if_iqdrops++; 1502 VGE_CHAIN_RESET(sc); 1503 vge_discard_rxbuf(sc, prod); 1504 continue; 1505 } 1506 1507 /* Chain received mbufs. */ 1508 if (sc->vge_cdata.vge_head != NULL) { 1509 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1510 /* 1511 * Special case: if there's 4 bytes or less 1512 * in this buffer, the mbuf can be discarded: 1513 * the last 4 bytes is the CRC, which we don't 1514 * care about anyway. 1515 */ 1516 if (m->m_len <= ETHER_CRC_LEN) { 1517 sc->vge_cdata.vge_tail->m_len -= 1518 (ETHER_CRC_LEN - m->m_len); 1519 m_freem(m); 1520 } else { 1521 m->m_len -= ETHER_CRC_LEN; 1522 m->m_flags &= ~M_PKTHDR; 1523 sc->vge_cdata.vge_tail->m_next = m; 1524 } 1525 m = sc->vge_cdata.vge_head; 1526 m->m_flags |= M_PKTHDR; 1527 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1528 } else { 1529 m->m_flags |= M_PKTHDR; 1530 m->m_pkthdr.len = m->m_len = 1531 (total_len - ETHER_CRC_LEN); 1532 } 1533 1534 #ifndef __NO_STRICT_ALIGNMENT 1535 vge_fixup_rx(m); 1536 #endif 1537 m->m_pkthdr.rcvif = ifp; 1538 1539 /* Do RX checksumming if enabled */ 1540 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1541 (rxctl & VGE_RDCTL_FRAG) == 0) { 1542 /* Check IP header checksum */ 1543 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1544 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1545 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1546 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1547 1548 /* Check TCP/UDP checksum */ 1549 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1550 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1551 m->m_pkthdr.csum_flags |= 1552 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1553 m->m_pkthdr.csum_data = 0xffff; 1554 } 1555 } 1556 1557 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1558 /* 1559 * The 32-bit rxctl register is stored in little-endian. 1560 * However, the 16-bit vlan tag is stored in big-endian, 1561 * so we have to byte swap it. 1562 */ 1563 m->m_pkthdr.ether_vtag = 1564 bswap16(rxctl & VGE_RDCTL_VLANID); 1565 m->m_flags |= M_VLANTAG; 1566 } 1567 1568 VGE_UNLOCK(sc); 1569 (*ifp->if_input)(ifp, m); 1570 VGE_LOCK(sc); 1571 sc->vge_cdata.vge_head = NULL; 1572 sc->vge_cdata.vge_tail = NULL; 1573 } 1574 1575 if (prog > 0) { 1576 sc->vge_cdata.vge_rx_prodidx = prod; 1577 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1578 sc->vge_cdata.vge_rx_ring_map, 1579 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1580 /* Update residue counter. */ 1581 if (sc->vge_cdata.vge_rx_commit != 0) { 1582 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1583 sc->vge_cdata.vge_rx_commit); 1584 sc->vge_cdata.vge_rx_commit = 0; 1585 } 1586 } 1587 return (prog); 1588 } 1589 1590 static void 1591 vge_txeof(struct vge_softc *sc) 1592 { 1593 struct ifnet *ifp; 1594 struct vge_tx_desc *cur_tx; 1595 struct vge_txdesc *txd; 1596 uint32_t txstat; 1597 int cons, prod; 1598 1599 VGE_LOCK_ASSERT(sc); 1600 1601 ifp = sc->vge_ifp; 1602 1603 if (sc->vge_cdata.vge_tx_cnt == 0) 1604 return; 1605 1606 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1607 sc->vge_cdata.vge_tx_ring_map, 1608 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1609 1610 /* 1611 * Go through our tx list and free mbufs for those 1612 * frames that have been transmitted. 1613 */ 1614 cons = sc->vge_cdata.vge_tx_considx; 1615 prod = sc->vge_cdata.vge_tx_prodidx; 1616 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1617 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1618 txstat = le32toh(cur_tx->vge_sts); 1619 if ((txstat & VGE_TDSTS_OWN) != 0) 1620 break; 1621 sc->vge_cdata.vge_tx_cnt--; 1622 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1623 1624 txd = &sc->vge_cdata.vge_txdesc[cons]; 1625 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1626 BUS_DMASYNC_POSTWRITE); 1627 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1628 1629 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1630 __func__)); 1631 m_freem(txd->tx_m); 1632 txd->tx_m = NULL; 1633 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1634 } 1635 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1636 sc->vge_cdata.vge_tx_ring_map, 1637 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1638 sc->vge_cdata.vge_tx_considx = cons; 1639 if (sc->vge_cdata.vge_tx_cnt == 0) 1640 sc->vge_timer = 0; 1641 } 1642 1643 static void 1644 vge_link_statchg(void *xsc) 1645 { 1646 struct vge_softc *sc; 1647 struct ifnet *ifp; 1648 struct mii_data *mii; 1649 1650 sc = xsc; 1651 ifp = sc->vge_ifp; 1652 VGE_LOCK_ASSERT(sc); 1653 mii = device_get_softc(sc->vge_miibus); 1654 1655 mii_pollstat(mii); 1656 if ((sc->vge_flags & VGE_FLAG_LINK) != 0) { 1657 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1658 sc->vge_flags &= ~VGE_FLAG_LINK; 1659 if_link_state_change(sc->vge_ifp, 1660 LINK_STATE_DOWN); 1661 } 1662 } else { 1663 if (mii->mii_media_status & IFM_ACTIVE && 1664 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1665 sc->vge_flags |= VGE_FLAG_LINK; 1666 if_link_state_change(sc->vge_ifp, 1667 LINK_STATE_UP); 1668 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1669 vge_start_locked(ifp); 1670 } 1671 } 1672 } 1673 1674 #ifdef DEVICE_POLLING 1675 static int 1676 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1677 { 1678 struct vge_softc *sc = ifp->if_softc; 1679 int rx_npkts = 0; 1680 1681 VGE_LOCK(sc); 1682 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1683 goto done; 1684 1685 rx_npkts = vge_rxeof(sc, count); 1686 vge_txeof(sc); 1687 1688 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1689 vge_start_locked(ifp); 1690 1691 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1692 uint32_t status; 1693 status = CSR_READ_4(sc, VGE_ISR); 1694 if (status == 0xFFFFFFFF) 1695 goto done; 1696 if (status) 1697 CSR_WRITE_4(sc, VGE_ISR, status); 1698 1699 /* 1700 * XXX check behaviour on receiver stalls. 1701 */ 1702 1703 if (status & VGE_ISR_TXDMA_STALL || 1704 status & VGE_ISR_RXDMA_STALL) { 1705 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1706 vge_init_locked(sc); 1707 } 1708 1709 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1710 vge_rxeof(sc, count); 1711 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1712 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1713 } 1714 } 1715 done: 1716 VGE_UNLOCK(sc); 1717 return (rx_npkts); 1718 } 1719 #endif /* DEVICE_POLLING */ 1720 1721 static void 1722 vge_intr(void *arg) 1723 { 1724 struct vge_softc *sc; 1725 struct ifnet *ifp; 1726 uint32_t status; 1727 1728 sc = arg; 1729 VGE_LOCK(sc); 1730 1731 ifp = sc->vge_ifp; 1732 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1733 (ifp->if_flags & IFF_UP) == 0) { 1734 VGE_UNLOCK(sc); 1735 return; 1736 } 1737 1738 #ifdef DEVICE_POLLING 1739 if (ifp->if_capenable & IFCAP_POLLING) { 1740 VGE_UNLOCK(sc); 1741 return; 1742 } 1743 #endif 1744 1745 /* Disable interrupts */ 1746 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1747 status = CSR_READ_4(sc, VGE_ISR); 1748 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD); 1749 /* If the card has gone away the read returns 0xffff. */ 1750 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0) 1751 goto done; 1752 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1753 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1754 vge_rxeof(sc, VGE_RX_DESC_CNT); 1755 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1756 vge_rxeof(sc, VGE_RX_DESC_CNT); 1757 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1758 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1759 } 1760 1761 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO)) 1762 vge_txeof(sc); 1763 1764 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1765 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1766 vge_init_locked(sc); 1767 } 1768 1769 if (status & VGE_ISR_LINKSTS) 1770 vge_link_statchg(sc); 1771 } 1772 done: 1773 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1774 /* Re-enable interrupts */ 1775 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1776 1777 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1778 vge_start_locked(ifp); 1779 } 1780 VGE_UNLOCK(sc); 1781 } 1782 1783 static int 1784 vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1785 { 1786 struct vge_txdesc *txd; 1787 struct vge_tx_frag *frag; 1788 struct mbuf *m; 1789 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1790 int error, i, nsegs, padlen; 1791 uint32_t cflags; 1792 1793 VGE_LOCK_ASSERT(sc); 1794 1795 M_ASSERTPKTHDR((*m_head)); 1796 1797 /* Argh. This chip does not autopad short frames. */ 1798 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1799 m = *m_head; 1800 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1801 if (M_WRITABLE(m) == 0) { 1802 /* Get a writable copy. */ 1803 m = m_dup(*m_head, M_DONTWAIT); 1804 m_freem(*m_head); 1805 if (m == NULL) { 1806 *m_head = NULL; 1807 return (ENOBUFS); 1808 } 1809 *m_head = m; 1810 } 1811 if (M_TRAILINGSPACE(m) < padlen) { 1812 m = m_defrag(m, M_DONTWAIT); 1813 if (m == NULL) { 1814 m_freem(*m_head); 1815 *m_head = NULL; 1816 return (ENOBUFS); 1817 } 1818 } 1819 /* 1820 * Manually pad short frames, and zero the pad space 1821 * to avoid leaking data. 1822 */ 1823 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1824 m->m_pkthdr.len += padlen; 1825 m->m_len = m->m_pkthdr.len; 1826 *m_head = m; 1827 } 1828 1829 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1830 1831 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1832 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1833 if (error == EFBIG) { 1834 m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS); 1835 if (m == NULL) { 1836 m_freem(*m_head); 1837 *m_head = NULL; 1838 return (ENOMEM); 1839 } 1840 *m_head = m; 1841 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1842 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1843 if (error != 0) { 1844 m_freem(*m_head); 1845 *m_head = NULL; 1846 return (error); 1847 } 1848 } else if (error != 0) 1849 return (error); 1850 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1851 BUS_DMASYNC_PREWRITE); 1852 1853 m = *m_head; 1854 cflags = 0; 1855 1856 /* Configure checksum offload. */ 1857 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1858 cflags |= VGE_TDCTL_IPCSUM; 1859 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1860 cflags |= VGE_TDCTL_TCPCSUM; 1861 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1862 cflags |= VGE_TDCTL_UDPCSUM; 1863 1864 /* Configure VLAN. */ 1865 if ((m->m_flags & M_VLANTAG) != 0) 1866 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1867 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1868 /* 1869 * XXX 1870 * Velocity family seems to support TSO but no information 1871 * for MSS configuration is available. Also the number of 1872 * fragments supported by a descriptor is too small to hold 1873 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1874 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1875 * longer chain of buffers but no additional information is 1876 * available. 1877 * 1878 * When telling the chip how many segments there are, we 1879 * must use nsegs + 1 instead of just nsegs. Darned if I 1880 * know why. This also means we can't use the last fragment 1881 * field of Tx descriptor. 1882 */ 1883 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1884 VGE_TD_LS_NORM); 1885 for (i = 0; i < nsegs; i++) { 1886 frag = &txd->tx_desc->vge_frag[i]; 1887 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1888 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1889 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1890 } 1891 1892 sc->vge_cdata.vge_tx_cnt++; 1893 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1894 1895 /* 1896 * Finally request interrupt and give the first descriptor 1897 * ownership to hardware. 1898 */ 1899 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1900 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1901 txd->tx_m = m; 1902 1903 return (0); 1904 } 1905 1906 /* 1907 * Main transmit routine. 1908 */ 1909 1910 static void 1911 vge_start(struct ifnet *ifp) 1912 { 1913 struct vge_softc *sc; 1914 1915 sc = ifp->if_softc; 1916 VGE_LOCK(sc); 1917 vge_start_locked(ifp); 1918 VGE_UNLOCK(sc); 1919 } 1920 1921 1922 static void 1923 vge_start_locked(struct ifnet *ifp) 1924 { 1925 struct vge_softc *sc; 1926 struct vge_txdesc *txd; 1927 struct mbuf *m_head; 1928 int enq, idx; 1929 1930 sc = ifp->if_softc; 1931 1932 VGE_LOCK_ASSERT(sc); 1933 1934 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1935 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1936 IFF_DRV_RUNNING) 1937 return; 1938 1939 idx = sc->vge_cdata.vge_tx_prodidx; 1940 VGE_TX_DESC_DEC(idx); 1941 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1942 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1943 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1944 if (m_head == NULL) 1945 break; 1946 /* 1947 * Pack the data into the transmit ring. If we 1948 * don't have room, set the OACTIVE flag and wait 1949 * for the NIC to drain the ring. 1950 */ 1951 if (vge_encap(sc, &m_head)) { 1952 if (m_head == NULL) 1953 break; 1954 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1955 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1956 break; 1957 } 1958 1959 txd = &sc->vge_cdata.vge_txdesc[idx]; 1960 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1961 VGE_TX_DESC_INC(idx); 1962 1963 enq++; 1964 /* 1965 * If there's a BPF listener, bounce a copy of this frame 1966 * to him. 1967 */ 1968 ETHER_BPF_MTAP(ifp, m_head); 1969 } 1970 1971 if (enq > 0) { 1972 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1973 sc->vge_cdata.vge_tx_ring_map, 1974 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1975 /* Issue a transmit command. */ 1976 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1977 /* 1978 * Set a timeout in case the chip goes out to lunch. 1979 */ 1980 sc->vge_timer = 5; 1981 } 1982 } 1983 1984 static void 1985 vge_init(void *xsc) 1986 { 1987 struct vge_softc *sc = xsc; 1988 1989 VGE_LOCK(sc); 1990 vge_init_locked(sc); 1991 VGE_UNLOCK(sc); 1992 } 1993 1994 static void 1995 vge_init_locked(struct vge_softc *sc) 1996 { 1997 struct ifnet *ifp = sc->vge_ifp; 1998 struct mii_data *mii; 1999 int error, i; 2000 2001 VGE_LOCK_ASSERT(sc); 2002 mii = device_get_softc(sc->vge_miibus); 2003 2004 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2005 return; 2006 2007 /* 2008 * Cancel pending I/O and free all RX/TX buffers. 2009 */ 2010 vge_stop(sc); 2011 vge_reset(sc); 2012 2013 /* 2014 * Initialize the RX and TX descriptors and mbufs. 2015 */ 2016 2017 error = vge_rx_list_init(sc); 2018 if (error != 0) { 2019 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2020 return; 2021 } 2022 vge_tx_list_init(sc); 2023 /* Clear MAC statistics. */ 2024 vge_stats_clear(sc); 2025 /* Set our station address */ 2026 for (i = 0; i < ETHER_ADDR_LEN; i++) 2027 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 2028 2029 /* 2030 * Set receive FIFO threshold. Also allow transmission and 2031 * reception of VLAN tagged frames. 2032 */ 2033 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2034 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2035 2036 /* Set DMA burst length */ 2037 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2038 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2039 2040 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2041 2042 /* Set collision backoff algorithm */ 2043 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2044 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2045 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2046 2047 /* Disable LPSEL field in priority resolution */ 2048 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2049 2050 /* 2051 * Load the addresses of the DMA queues into the chip. 2052 * Note that we only use one transmit queue. 2053 */ 2054 2055 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2056 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2057 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2058 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2059 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2060 2061 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2062 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2063 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2064 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2065 2066 /* Configure interrupt moderation. */ 2067 vge_intr_holdoff(sc); 2068 2069 /* Enable and wake up the RX descriptor queue */ 2070 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2071 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2072 2073 /* Enable the TX descriptor queue */ 2074 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2075 2076 /* Init the cam filter. */ 2077 vge_cam_clear(sc); 2078 2079 /* Set up receiver filter. */ 2080 vge_rxfilter(sc); 2081 vge_setvlan(sc); 2082 2083 /* Enable flow control */ 2084 2085 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2086 2087 /* Enable jumbo frame reception (if desired) */ 2088 2089 /* Start the MAC. */ 2090 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2091 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2092 CSR_WRITE_1(sc, VGE_CRS0, 2093 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2094 2095 #ifdef DEVICE_POLLING 2096 /* 2097 * Disable interrupts if we are polling. 2098 */ 2099 if (ifp->if_capenable & IFCAP_POLLING) { 2100 CSR_WRITE_4(sc, VGE_IMR, 0); 2101 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2102 } else /* otherwise ... */ 2103 #endif 2104 { 2105 /* 2106 * Enable interrupts. 2107 */ 2108 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2109 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2110 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2111 } 2112 2113 sc->vge_flags &= ~VGE_FLAG_LINK; 2114 mii_mediachg(mii); 2115 2116 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2117 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2118 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2119 } 2120 2121 /* 2122 * Set media options. 2123 */ 2124 static int 2125 vge_ifmedia_upd(struct ifnet *ifp) 2126 { 2127 struct vge_softc *sc; 2128 struct mii_data *mii; 2129 int error; 2130 2131 sc = ifp->if_softc; 2132 VGE_LOCK(sc); 2133 mii = device_get_softc(sc->vge_miibus); 2134 error = mii_mediachg(mii); 2135 VGE_UNLOCK(sc); 2136 2137 return (error); 2138 } 2139 2140 /* 2141 * Report current media status. 2142 */ 2143 static void 2144 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2145 { 2146 struct vge_softc *sc; 2147 struct mii_data *mii; 2148 2149 sc = ifp->if_softc; 2150 mii = device_get_softc(sc->vge_miibus); 2151 2152 VGE_LOCK(sc); 2153 if ((ifp->if_flags & IFF_UP) == 0) { 2154 VGE_UNLOCK(sc); 2155 return; 2156 } 2157 mii_pollstat(mii); 2158 VGE_UNLOCK(sc); 2159 ifmr->ifm_active = mii->mii_media_active; 2160 ifmr->ifm_status = mii->mii_media_status; 2161 } 2162 2163 static void 2164 vge_miibus_statchg(device_t dev) 2165 { 2166 struct vge_softc *sc; 2167 struct mii_data *mii; 2168 struct ifmedia_entry *ife; 2169 2170 sc = device_get_softc(dev); 2171 mii = device_get_softc(sc->vge_miibus); 2172 ife = mii->mii_media.ifm_cur; 2173 2174 /* 2175 * If the user manually selects a media mode, we need to turn 2176 * on the forced MAC mode bit in the DIAGCTL register. If the 2177 * user happens to choose a full duplex mode, we also need to 2178 * set the 'force full duplex' bit. This applies only to 2179 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2180 * mode is disabled, and in 1000baseT mode, full duplex is 2181 * always implied, so we turn on the forced mode bit but leave 2182 * the FDX bit cleared. 2183 */ 2184 2185 switch (IFM_SUBTYPE(ife->ifm_media)) { 2186 case IFM_AUTO: 2187 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2188 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2189 break; 2190 case IFM_1000_T: 2191 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2192 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2193 break; 2194 case IFM_100_TX: 2195 case IFM_10_T: 2196 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2197 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2198 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2199 } else { 2200 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2201 } 2202 break; 2203 default: 2204 device_printf(dev, "unknown media type: %x\n", 2205 IFM_SUBTYPE(ife->ifm_media)); 2206 break; 2207 } 2208 } 2209 2210 static int 2211 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2212 { 2213 struct vge_softc *sc = ifp->if_softc; 2214 struct ifreq *ifr = (struct ifreq *) data; 2215 struct mii_data *mii; 2216 int error = 0, mask; 2217 2218 switch (command) { 2219 case SIOCSIFMTU: 2220 VGE_LOCK(sc); 2221 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU) 2222 error = EINVAL; 2223 else if (ifp->if_mtu != ifr->ifr_mtu) { 2224 if (ifr->ifr_mtu > ETHERMTU && 2225 (sc->vge_flags & VGE_FLAG_JUMBO) == 0) 2226 error = EINVAL; 2227 else 2228 ifp->if_mtu = ifr->ifr_mtu; 2229 } 2230 VGE_UNLOCK(sc); 2231 break; 2232 case SIOCSIFFLAGS: 2233 VGE_LOCK(sc); 2234 if ((ifp->if_flags & IFF_UP) != 0) { 2235 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2236 ((ifp->if_flags ^ sc->vge_if_flags) & 2237 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2238 vge_rxfilter(sc); 2239 else 2240 vge_init_locked(sc); 2241 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2242 vge_stop(sc); 2243 sc->vge_if_flags = ifp->if_flags; 2244 VGE_UNLOCK(sc); 2245 break; 2246 case SIOCADDMULTI: 2247 case SIOCDELMULTI: 2248 VGE_LOCK(sc); 2249 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2250 vge_rxfilter(sc); 2251 VGE_UNLOCK(sc); 2252 break; 2253 case SIOCGIFMEDIA: 2254 case SIOCSIFMEDIA: 2255 mii = device_get_softc(sc->vge_miibus); 2256 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2257 break; 2258 case SIOCSIFCAP: 2259 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2260 #ifdef DEVICE_POLLING 2261 if (mask & IFCAP_POLLING) { 2262 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2263 error = ether_poll_register(vge_poll, ifp); 2264 if (error) 2265 return (error); 2266 VGE_LOCK(sc); 2267 /* Disable interrupts */ 2268 CSR_WRITE_4(sc, VGE_IMR, 0); 2269 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2270 ifp->if_capenable |= IFCAP_POLLING; 2271 VGE_UNLOCK(sc); 2272 } else { 2273 error = ether_poll_deregister(ifp); 2274 /* Enable interrupts. */ 2275 VGE_LOCK(sc); 2276 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2277 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2278 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2279 ifp->if_capenable &= ~IFCAP_POLLING; 2280 VGE_UNLOCK(sc); 2281 } 2282 } 2283 #endif /* DEVICE_POLLING */ 2284 VGE_LOCK(sc); 2285 if ((mask & IFCAP_TXCSUM) != 0 && 2286 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2287 ifp->if_capenable ^= IFCAP_TXCSUM; 2288 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2289 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2290 else 2291 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2292 } 2293 if ((mask & IFCAP_RXCSUM) != 0 && 2294 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2295 ifp->if_capenable ^= IFCAP_RXCSUM; 2296 if ((mask & IFCAP_WOL_UCAST) != 0 && 2297 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2298 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2299 if ((mask & IFCAP_WOL_MCAST) != 0 && 2300 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2301 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2302 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2303 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2304 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2305 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2306 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2307 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2308 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2309 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2310 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2311 vge_setvlan(sc); 2312 } 2313 VGE_UNLOCK(sc); 2314 VLAN_CAPABILITIES(ifp); 2315 break; 2316 default: 2317 error = ether_ioctl(ifp, command, data); 2318 break; 2319 } 2320 2321 return (error); 2322 } 2323 2324 static void 2325 vge_watchdog(void *arg) 2326 { 2327 struct vge_softc *sc; 2328 struct ifnet *ifp; 2329 2330 sc = arg; 2331 VGE_LOCK_ASSERT(sc); 2332 vge_stats_update(sc); 2333 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2334 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2335 return; 2336 2337 ifp = sc->vge_ifp; 2338 if_printf(ifp, "watchdog timeout\n"); 2339 ifp->if_oerrors++; 2340 2341 vge_txeof(sc); 2342 vge_rxeof(sc, VGE_RX_DESC_CNT); 2343 2344 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2345 vge_init_locked(sc); 2346 } 2347 2348 /* 2349 * Stop the adapter and free any mbufs allocated to the 2350 * RX and TX lists. 2351 */ 2352 static void 2353 vge_stop(struct vge_softc *sc) 2354 { 2355 struct ifnet *ifp; 2356 2357 VGE_LOCK_ASSERT(sc); 2358 ifp = sc->vge_ifp; 2359 sc->vge_timer = 0; 2360 callout_stop(&sc->vge_watchdog); 2361 2362 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2363 2364 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2365 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2366 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2367 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2368 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2369 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2370 2371 vge_stats_update(sc); 2372 VGE_CHAIN_RESET(sc); 2373 vge_txeof(sc); 2374 vge_freebufs(sc); 2375 } 2376 2377 /* 2378 * Device suspend routine. Stop the interface and save some PCI 2379 * settings in case the BIOS doesn't restore them properly on 2380 * resume. 2381 */ 2382 static int 2383 vge_suspend(device_t dev) 2384 { 2385 struct vge_softc *sc; 2386 2387 sc = device_get_softc(dev); 2388 2389 VGE_LOCK(sc); 2390 vge_stop(sc); 2391 vge_setwol(sc); 2392 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2393 VGE_UNLOCK(sc); 2394 2395 return (0); 2396 } 2397 2398 /* 2399 * Device resume routine. Restore some PCI settings in case the BIOS 2400 * doesn't, re-enable busmastering, and restart the interface if 2401 * appropriate. 2402 */ 2403 static int 2404 vge_resume(device_t dev) 2405 { 2406 struct vge_softc *sc; 2407 struct ifnet *ifp; 2408 uint16_t pmstat; 2409 2410 sc = device_get_softc(dev); 2411 VGE_LOCK(sc); 2412 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) { 2413 /* Disable PME and clear PME status. */ 2414 pmstat = pci_read_config(sc->vge_dev, 2415 sc->vge_pmcap + PCIR_POWER_STATUS, 2); 2416 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2417 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2418 pci_write_config(sc->vge_dev, 2419 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2420 } 2421 } 2422 vge_clrwol(sc); 2423 /* Restart MII auto-polling. */ 2424 vge_miipoll_start(sc); 2425 ifp = sc->vge_ifp; 2426 /* Reinitialize interface if necessary. */ 2427 if ((ifp->if_flags & IFF_UP) != 0) { 2428 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2429 vge_init_locked(sc); 2430 } 2431 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2432 VGE_UNLOCK(sc); 2433 2434 return (0); 2435 } 2436 2437 /* 2438 * Stop all chip I/O so that the kernel's probe routines don't 2439 * get confused by errant DMAs when rebooting. 2440 */ 2441 static int 2442 vge_shutdown(device_t dev) 2443 { 2444 2445 return (vge_suspend(dev)); 2446 } 2447 2448 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2449 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2450 2451 static void 2452 vge_sysctl_node(struct vge_softc *sc) 2453 { 2454 struct sysctl_ctx_list *ctx; 2455 struct sysctl_oid_list *child, *parent; 2456 struct sysctl_oid *tree; 2457 struct vge_hw_stats *stats; 2458 2459 stats = &sc->vge_stats; 2460 ctx = device_get_sysctl_ctx(sc->vge_dev); 2461 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2462 2463 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff", 2464 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff"); 2465 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt", 2466 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet"); 2467 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt", 2468 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet"); 2469 2470 /* Pull in device tunables. */ 2471 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT; 2472 resource_int_value(device_get_name(sc->vge_dev), 2473 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff); 2474 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT; 2475 resource_int_value(device_get_name(sc->vge_dev), 2476 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt); 2477 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT; 2478 resource_int_value(device_get_name(sc->vge_dev), 2479 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt); 2480 2481 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2482 NULL, "VGE statistics"); 2483 parent = SYSCTL_CHILDREN(tree); 2484 2485 /* Rx statistics. */ 2486 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2487 NULL, "RX MAC statistics"); 2488 child = SYSCTL_CHILDREN(tree); 2489 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2490 &stats->rx_frames, "frames"); 2491 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2492 &stats->rx_good_frames, "Good frames"); 2493 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2494 &stats->rx_fifo_oflows, "FIFO overflows"); 2495 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2496 &stats->rx_runts, "Too short frames"); 2497 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2498 &stats->rx_runts_errs, "Too short frames with errors"); 2499 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2500 &stats->rx_pkts_64, "64 bytes frames"); 2501 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2502 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2503 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2504 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2505 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2506 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2507 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2508 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2509 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2510 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2511 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2512 &stats->rx_pkts_1519_max, "1519 to max frames"); 2513 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2514 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2515 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2516 &stats->rx_jumbos, "Jumbo frames"); 2517 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2518 &stats->rx_crcerrs, "CRC errors"); 2519 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2520 &stats->rx_pause_frames, "CRC errors"); 2521 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2522 &stats->rx_alignerrs, "Alignment errors"); 2523 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2524 &stats->rx_nobufs, "Frames with no buffer event"); 2525 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2526 &stats->rx_symerrs, "Frames with symbol errors"); 2527 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2528 &stats->rx_lenerrs, "Frames with length mismatched"); 2529 2530 /* Tx statistics. */ 2531 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2532 NULL, "TX MAC statistics"); 2533 child = SYSCTL_CHILDREN(tree); 2534 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2535 &stats->tx_good_frames, "Good frames"); 2536 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2537 &stats->tx_pkts_64, "64 bytes frames"); 2538 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2539 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2540 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2541 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2542 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2543 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2544 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2545 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2546 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2547 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2548 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2549 &stats->tx_jumbos, "Jumbo frames"); 2550 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2551 &stats->tx_colls, "Collisions"); 2552 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2553 &stats->tx_latecolls, "Late collisions"); 2554 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2555 &stats->tx_pause, "Pause frames"); 2556 #ifdef VGE_ENABLE_SQEERR 2557 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2558 &stats->tx_sqeerrs, "SQE errors"); 2559 #endif 2560 /* Clear MAC statistics. */ 2561 vge_stats_clear(sc); 2562 } 2563 2564 #undef VGE_SYSCTL_STAT_ADD32 2565 2566 static void 2567 vge_stats_clear(struct vge_softc *sc) 2568 { 2569 int i; 2570 2571 CSR_WRITE_1(sc, VGE_MIBCSR, 2572 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2573 CSR_WRITE_1(sc, VGE_MIBCSR, 2574 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2575 for (i = VGE_TIMEOUT; i > 0; i--) { 2576 DELAY(1); 2577 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2578 break; 2579 } 2580 if (i == 0) 2581 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2582 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2583 ~VGE_MIBCSR_FREEZE); 2584 } 2585 2586 static void 2587 vge_stats_update(struct vge_softc *sc) 2588 { 2589 struct vge_hw_stats *stats; 2590 struct ifnet *ifp; 2591 uint32_t mib[VGE_MIB_CNT], val; 2592 int i; 2593 2594 VGE_LOCK_ASSERT(sc); 2595 2596 stats = &sc->vge_stats; 2597 ifp = sc->vge_ifp; 2598 2599 CSR_WRITE_1(sc, VGE_MIBCSR, 2600 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2601 for (i = VGE_TIMEOUT; i > 0; i--) { 2602 DELAY(1); 2603 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2604 break; 2605 } 2606 if (i == 0) { 2607 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2608 vge_stats_clear(sc); 2609 return; 2610 } 2611 2612 bzero(mib, sizeof(mib)); 2613 reset_idx: 2614 /* Set MIB read index to 0. */ 2615 CSR_WRITE_1(sc, VGE_MIBCSR, 2616 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2617 for (i = 0; i < VGE_MIB_CNT; i++) { 2618 val = CSR_READ_4(sc, VGE_MIBDATA); 2619 if (i != VGE_MIB_DATA_IDX(val)) { 2620 /* Reading interrupted. */ 2621 goto reset_idx; 2622 } 2623 mib[i] = val & VGE_MIB_DATA_MASK; 2624 } 2625 2626 /* Rx stats. */ 2627 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2628 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2629 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2630 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2631 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2632 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2633 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2634 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2635 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2636 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2637 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2638 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2639 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2640 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2641 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2642 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2643 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2644 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2645 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2646 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2647 2648 /* Tx stats. */ 2649 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2650 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2651 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2652 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2653 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2654 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2655 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2656 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2657 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2658 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2659 #ifdef VGE_ENABLE_SQEERR 2660 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2661 #endif 2662 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2663 2664 /* Update counters in ifnet. */ 2665 ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES]; 2666 2667 ifp->if_collisions += mib[VGE_MIB_TX_COLLS] + 2668 mib[VGE_MIB_TX_LATECOLLS]; 2669 2670 ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] + 2671 mib[VGE_MIB_TX_LATECOLLS]; 2672 2673 ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES]; 2674 2675 ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2676 mib[VGE_MIB_RX_RUNTS] + 2677 mib[VGE_MIB_RX_RUNTS_ERRS] + 2678 mib[VGE_MIB_RX_CRCERRS] + 2679 mib[VGE_MIB_RX_ALIGNERRS] + 2680 mib[VGE_MIB_RX_NOBUFS] + 2681 mib[VGE_MIB_RX_SYMERRS] + 2682 mib[VGE_MIB_RX_LENERRS]; 2683 } 2684 2685 static void 2686 vge_intr_holdoff(struct vge_softc *sc) 2687 { 2688 uint8_t intctl; 2689 2690 VGE_LOCK_ASSERT(sc); 2691 2692 /* 2693 * Set Tx interrupt supression threshold. 2694 * It's possible to use single-shot timer in VGE_CRS1 register 2695 * in Tx path such that driver can remove most of Tx completion 2696 * interrupts. However this requires additional access to 2697 * VGE_CRS1 register to reload the timer in addintion to 2698 * activating Tx kick command. Another downside is we don't know 2699 * what single-shot timer value should be used in advance so 2700 * reclaiming transmitted mbufs could be delayed a lot which in 2701 * turn slows down Tx operation. 2702 */ 2703 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR); 2704 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt); 2705 2706 /* Set Rx interrupt suppresion threshold. */ 2707 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2708 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt); 2709 2710 intctl = CSR_READ_1(sc, VGE_INTCTL1); 2711 intctl &= ~VGE_INTCTL_SC_RELOAD; 2712 intctl |= VGE_INTCTL_HC_RELOAD; 2713 if (sc->vge_tx_coal_pkt <= 0) 2714 intctl |= VGE_INTCTL_TXINTSUP_DISABLE; 2715 else 2716 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE; 2717 if (sc->vge_rx_coal_pkt <= 0) 2718 intctl |= VGE_INTCTL_RXINTSUP_DISABLE; 2719 else 2720 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE; 2721 CSR_WRITE_1(sc, VGE_INTCTL1, intctl); 2722 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF); 2723 if (sc->vge_int_holdoff > 0) { 2724 /* Set interrupt holdoff timer. */ 2725 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2726 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 2727 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff)); 2728 /* Enable holdoff timer. */ 2729 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2730 } 2731 } 2732 2733 static void 2734 vge_setlinkspeed(struct vge_softc *sc) 2735 { 2736 struct mii_data *mii; 2737 int aneg, i; 2738 2739 VGE_LOCK_ASSERT(sc); 2740 2741 mii = device_get_softc(sc->vge_miibus); 2742 mii_pollstat(mii); 2743 aneg = 0; 2744 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2745 (IFM_ACTIVE | IFM_AVALID)) { 2746 switch IFM_SUBTYPE(mii->mii_media_active) { 2747 case IFM_10_T: 2748 case IFM_100_TX: 2749 return; 2750 case IFM_1000_T: 2751 aneg++; 2752 default: 2753 break; 2754 } 2755 } 2756 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0); 2757 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR, 2758 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2759 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2760 BMCR_AUTOEN | BMCR_STARTNEG); 2761 DELAY(1000); 2762 if (aneg != 0) { 2763 /* Poll link state until vge(4) get a 10/100 link. */ 2764 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2765 mii_pollstat(mii); 2766 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2767 == (IFM_ACTIVE | IFM_AVALID)) { 2768 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2769 case IFM_10_T: 2770 case IFM_100_TX: 2771 return; 2772 default: 2773 break; 2774 } 2775 } 2776 VGE_UNLOCK(sc); 2777 pause("vgelnk", hz); 2778 VGE_LOCK(sc); 2779 } 2780 if (i == MII_ANEGTICKS_GIGE) 2781 device_printf(sc->vge_dev, "establishing link failed, " 2782 "WOL may not work!"); 2783 } 2784 /* 2785 * No link, force MAC to have 100Mbps, full-duplex link. 2786 * This is the last resort and may/may not work. 2787 */ 2788 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2789 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2790 } 2791 2792 static void 2793 vge_setwol(struct vge_softc *sc) 2794 { 2795 struct ifnet *ifp; 2796 uint16_t pmstat; 2797 uint8_t val; 2798 2799 VGE_LOCK_ASSERT(sc); 2800 2801 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) { 2802 /* No PME capability, PHY power down. */ 2803 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2804 BMCR_PDOWN); 2805 vge_miipoll_stop(sc); 2806 return; 2807 } 2808 2809 ifp = sc->vge_ifp; 2810 2811 /* Clear WOL on pattern match. */ 2812 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2813 /* Disable WOL on magic/unicast packet. */ 2814 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2815 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2816 VGE_WOLCFG_PMEOVR); 2817 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2818 vge_setlinkspeed(sc); 2819 val = 0; 2820 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2821 val |= VGE_WOLCR1_UCAST; 2822 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2823 val |= VGE_WOLCR1_MAGIC; 2824 CSR_WRITE_1(sc, VGE_WOLCR1S, val); 2825 val = 0; 2826 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2827 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB; 2828 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR); 2829 /* Disable MII auto-polling. */ 2830 vge_miipoll_stop(sc); 2831 } 2832 CSR_SETBIT_1(sc, VGE_DIAGCTL, 2833 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE); 2834 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2835 2836 /* Clear WOL status on pattern match. */ 2837 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2838 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2839 2840 val = CSR_READ_1(sc, VGE_PWRSTAT); 2841 val |= VGE_STICKHW_SWPTAG; 2842 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2843 /* Put hardware into sleep. */ 2844 val = CSR_READ_1(sc, VGE_PWRSTAT); 2845 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1; 2846 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2847 /* Request PME if WOL is requested. */ 2848 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap + 2849 PCIR_POWER_STATUS, 2); 2850 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2851 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2852 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2853 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS, 2854 pmstat, 2); 2855 } 2856 2857 static void 2858 vge_clrwol(struct vge_softc *sc) 2859 { 2860 uint8_t val; 2861 2862 val = CSR_READ_1(sc, VGE_PWRSTAT); 2863 val &= ~VGE_STICKHW_SWPTAG; 2864 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2865 /* Disable WOL and clear power state indicator. */ 2866 val = CSR_READ_1(sc, VGE_PWRSTAT); 2867 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); 2868 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2869 2870 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2871 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2872 2873 /* Clear WOL on pattern match. */ 2874 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2875 /* Disable WOL on magic/unicast packet. */ 2876 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2877 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2878 VGE_WOLCFG_PMEOVR); 2879 /* Clear WOL status on pattern match. */ 2880 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2881 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2882 } 2883