1 /*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44 /* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83 #ifdef HAVE_KERNEL_OPTION_HEADERS 84 #include "opt_device_polling.h" 85 #endif 86 87 #include <sys/param.h> 88 #include <sys/endian.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/module.h> 94 #include <sys/kernel.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/ethernet.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 #include <net/if_types.h> 104 #include <net/if_vlan_var.h> 105 106 #include <net/bpf.h> 107 108 #include <machine/bus.h> 109 #include <machine/resource.h> 110 #include <sys/bus.h> 111 #include <sys/rman.h> 112 113 #include <dev/mii/mii.h> 114 #include <dev/mii/miivar.h> 115 116 #include <dev/pci/pcireg.h> 117 #include <dev/pci/pcivar.h> 118 119 MODULE_DEPEND(vge, pci, 1, 1, 1); 120 MODULE_DEPEND(vge, ether, 1, 1, 1); 121 MODULE_DEPEND(vge, miibus, 1, 1, 1); 122 123 /* "device miibus" required. See GENERIC if you get errors here. */ 124 #include "miibus_if.h" 125 126 #include <dev/vge/if_vgereg.h> 127 #include <dev/vge/if_vgevar.h> 128 129 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 130 131 /* Tunables */ 132 static int msi_disable = 0; 133 TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 134 135 /* 136 * The SQE error counter of MIB seems to report bogus value. 137 * Vendor's workaround does not seem to work on PCIe based 138 * controllers. Disable it until we find better workaround. 139 */ 140 #undef VGE_ENABLE_SQEERR 141 142 /* 143 * Various supported device vendors/types and their names. 144 */ 145 static struct vge_type vge_devs[] = { 146 { VIA_VENDORID, VIA_DEVICEID_61XX, 147 "VIA Networking Velocity Gigabit Ethernet" }, 148 { 0, 0, NULL } 149 }; 150 151 static int vge_attach(device_t); 152 static int vge_detach(device_t); 153 static int vge_probe(device_t); 154 static int vge_resume(device_t); 155 static int vge_shutdown(device_t); 156 static int vge_suspend(device_t); 157 158 static void vge_cam_clear(struct vge_softc *); 159 static int vge_cam_set(struct vge_softc *, uint8_t *); 160 static void vge_clrwol(struct vge_softc *); 161 static void vge_discard_rxbuf(struct vge_softc *, int); 162 static int vge_dma_alloc(struct vge_softc *); 163 static void vge_dma_free(struct vge_softc *); 164 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 165 #ifdef VGE_EEPROM 166 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 167 #endif 168 static int vge_encap(struct vge_softc *, struct mbuf **); 169 #ifndef __NO_STRICT_ALIGNMENT 170 static __inline void 171 vge_fixup_rx(struct mbuf *); 172 #endif 173 static void vge_freebufs(struct vge_softc *); 174 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 175 static int vge_ifmedia_upd(struct ifnet *); 176 static void vge_init(void *); 177 static void vge_init_locked(struct vge_softc *); 178 static void vge_intr(void *); 179 static void vge_intr_holdoff(struct vge_softc *); 180 static int vge_ioctl(struct ifnet *, u_long, caddr_t); 181 static void vge_link_statchg(void *); 182 static int vge_miibus_readreg(device_t, int, int); 183 static void vge_miibus_statchg(device_t); 184 static int vge_miibus_writereg(device_t, int, int, int); 185 static void vge_miipoll_start(struct vge_softc *); 186 static void vge_miipoll_stop(struct vge_softc *); 187 static int vge_newbuf(struct vge_softc *, int); 188 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 189 static void vge_reset(struct vge_softc *); 190 static int vge_rx_list_init(struct vge_softc *); 191 static int vge_rxeof(struct vge_softc *, int); 192 static void vge_rxfilter(struct vge_softc *); 193 static void vge_setvlan(struct vge_softc *); 194 static void vge_setwol(struct vge_softc *); 195 static void vge_start(struct ifnet *); 196 static void vge_start_locked(struct ifnet *); 197 static void vge_stats_clear(struct vge_softc *); 198 static void vge_stats_update(struct vge_softc *); 199 static void vge_stop(struct vge_softc *); 200 static void vge_sysctl_node(struct vge_softc *); 201 static int vge_tx_list_init(struct vge_softc *); 202 static void vge_txeof(struct vge_softc *); 203 static void vge_watchdog(void *); 204 205 static device_method_t vge_methods[] = { 206 /* Device interface */ 207 DEVMETHOD(device_probe, vge_probe), 208 DEVMETHOD(device_attach, vge_attach), 209 DEVMETHOD(device_detach, vge_detach), 210 DEVMETHOD(device_suspend, vge_suspend), 211 DEVMETHOD(device_resume, vge_resume), 212 DEVMETHOD(device_shutdown, vge_shutdown), 213 214 /* bus interface */ 215 DEVMETHOD(bus_print_child, bus_generic_print_child), 216 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 217 218 /* MII interface */ 219 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 220 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 221 DEVMETHOD(miibus_statchg, vge_miibus_statchg), 222 223 { 0, 0 } 224 }; 225 226 static driver_t vge_driver = { 227 "vge", 228 vge_methods, 229 sizeof(struct vge_softc) 230 }; 231 232 static devclass_t vge_devclass; 233 234 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 235 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 236 237 #ifdef VGE_EEPROM 238 /* 239 * Read a word of data stored in the EEPROM at address 'addr.' 240 */ 241 static void 242 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 243 { 244 int i; 245 uint16_t word = 0; 246 247 /* 248 * Enter EEPROM embedded programming mode. In order to 249 * access the EEPROM at all, we first have to set the 250 * EELOAD bit in the CHIPCFG2 register. 251 */ 252 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 253 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 254 255 /* Select the address of the word we want to read */ 256 CSR_WRITE_1(sc, VGE_EEADDR, addr); 257 258 /* Issue read command */ 259 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 260 261 /* Wait for the done bit to be set. */ 262 for (i = 0; i < VGE_TIMEOUT; i++) { 263 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 264 break; 265 } 266 267 if (i == VGE_TIMEOUT) { 268 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 269 *dest = 0; 270 return; 271 } 272 273 /* Read the result */ 274 word = CSR_READ_2(sc, VGE_EERDDAT); 275 276 /* Turn off EEPROM access mode. */ 277 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 278 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 279 280 *dest = word; 281 } 282 #endif 283 284 /* 285 * Read a sequence of words from the EEPROM. 286 */ 287 static void 288 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 289 { 290 int i; 291 #ifdef VGE_EEPROM 292 uint16_t word = 0, *ptr; 293 294 for (i = 0; i < cnt; i++) { 295 vge_eeprom_getword(sc, off + i, &word); 296 ptr = (uint16_t *)(dest + (i * 2)); 297 if (swap) 298 *ptr = ntohs(word); 299 else 300 *ptr = word; 301 } 302 #else 303 for (i = 0; i < ETHER_ADDR_LEN; i++) 304 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 305 #endif 306 } 307 308 static void 309 vge_miipoll_stop(struct vge_softc *sc) 310 { 311 int i; 312 313 CSR_WRITE_1(sc, VGE_MIICMD, 0); 314 315 for (i = 0; i < VGE_TIMEOUT; i++) { 316 DELAY(1); 317 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 318 break; 319 } 320 321 if (i == VGE_TIMEOUT) 322 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 323 } 324 325 static void 326 vge_miipoll_start(struct vge_softc *sc) 327 { 328 int i; 329 330 /* First, make sure we're idle. */ 331 332 CSR_WRITE_1(sc, VGE_MIICMD, 0); 333 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 334 335 for (i = 0; i < VGE_TIMEOUT; i++) { 336 DELAY(1); 337 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 338 break; 339 } 340 341 if (i == VGE_TIMEOUT) { 342 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 343 return; 344 } 345 346 /* Now enable auto poll mode. */ 347 348 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 349 350 /* And make sure it started. */ 351 352 for (i = 0; i < VGE_TIMEOUT; i++) { 353 DELAY(1); 354 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 355 break; 356 } 357 358 if (i == VGE_TIMEOUT) 359 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 360 } 361 362 static int 363 vge_miibus_readreg(device_t dev, int phy, int reg) 364 { 365 struct vge_softc *sc; 366 int i; 367 uint16_t rval = 0; 368 369 sc = device_get_softc(dev); 370 371 if (phy != sc->vge_phyaddr) 372 return (0); 373 374 vge_miipoll_stop(sc); 375 376 /* Specify the register we want to read. */ 377 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 378 379 /* Issue read command. */ 380 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 381 382 /* Wait for the read command bit to self-clear. */ 383 for (i = 0; i < VGE_TIMEOUT; i++) { 384 DELAY(1); 385 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 386 break; 387 } 388 389 if (i == VGE_TIMEOUT) 390 device_printf(sc->vge_dev, "MII read timed out\n"); 391 else 392 rval = CSR_READ_2(sc, VGE_MIIDATA); 393 394 vge_miipoll_start(sc); 395 396 return (rval); 397 } 398 399 static int 400 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 401 { 402 struct vge_softc *sc; 403 int i, rval = 0; 404 405 sc = device_get_softc(dev); 406 407 if (phy != sc->vge_phyaddr) 408 return (0); 409 410 vge_miipoll_stop(sc); 411 412 /* Specify the register we want to write. */ 413 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 414 415 /* Specify the data we want to write. */ 416 CSR_WRITE_2(sc, VGE_MIIDATA, data); 417 418 /* Issue write command. */ 419 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 420 421 /* Wait for the write command bit to self-clear. */ 422 for (i = 0; i < VGE_TIMEOUT; i++) { 423 DELAY(1); 424 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 425 break; 426 } 427 428 if (i == VGE_TIMEOUT) { 429 device_printf(sc->vge_dev, "MII write timed out\n"); 430 rval = EIO; 431 } 432 433 vge_miipoll_start(sc); 434 435 return (rval); 436 } 437 438 static void 439 vge_cam_clear(struct vge_softc *sc) 440 { 441 int i; 442 443 /* 444 * Turn off all the mask bits. This tells the chip 445 * that none of the entries in the CAM filter are valid. 446 * desired entries will be enabled as we fill the filter in. 447 */ 448 449 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 450 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 451 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 452 for (i = 0; i < 8; i++) 453 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 454 455 /* Clear the VLAN filter too. */ 456 457 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 458 for (i = 0; i < 8; i++) 459 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 460 461 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 462 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 463 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 464 465 sc->vge_camidx = 0; 466 } 467 468 static int 469 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 470 { 471 int i, error = 0; 472 473 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 474 return (ENOSPC); 475 476 /* Select the CAM data page. */ 477 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 478 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 479 480 /* Set the filter entry we want to update and enable writing. */ 481 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 482 483 /* Write the address to the CAM registers */ 484 for (i = 0; i < ETHER_ADDR_LEN; i++) 485 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 486 487 /* Issue a write command. */ 488 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 489 490 /* Wake for it to clear. */ 491 for (i = 0; i < VGE_TIMEOUT; i++) { 492 DELAY(1); 493 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 494 break; 495 } 496 497 if (i == VGE_TIMEOUT) { 498 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 499 error = EIO; 500 goto fail; 501 } 502 503 /* Select the CAM mask page. */ 504 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 505 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 506 507 /* Set the mask bit that enables this filter. */ 508 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 509 1<<(sc->vge_camidx & 7)); 510 511 sc->vge_camidx++; 512 513 fail: 514 /* Turn off access to CAM. */ 515 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 516 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 517 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 518 519 return (error); 520 } 521 522 static void 523 vge_setvlan(struct vge_softc *sc) 524 { 525 struct ifnet *ifp; 526 uint8_t cfg; 527 528 VGE_LOCK_ASSERT(sc); 529 530 ifp = sc->vge_ifp; 531 cfg = CSR_READ_1(sc, VGE_RXCFG); 532 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 533 cfg |= VGE_VTAG_OPT2; 534 else 535 cfg &= ~VGE_VTAG_OPT2; 536 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 537 } 538 539 /* 540 * Program the multicast filter. We use the 64-entry CAM filter 541 * for perfect filtering. If there's more than 64 multicast addresses, 542 * we use the hash filter instead. 543 */ 544 static void 545 vge_rxfilter(struct vge_softc *sc) 546 { 547 struct ifnet *ifp; 548 struct ifmultiaddr *ifma; 549 uint32_t h, hashes[2]; 550 uint8_t rxcfg; 551 int error = 0; 552 553 VGE_LOCK_ASSERT(sc); 554 555 /* First, zot all the multicast entries. */ 556 hashes[0] = 0; 557 hashes[1] = 0; 558 559 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 560 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 561 VGE_RXCTL_RX_PROMISC); 562 /* 563 * Always allow VLAN oversized frames and frames for 564 * this host. 565 */ 566 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 567 568 ifp = sc->vge_ifp; 569 if ((ifp->if_flags & IFF_BROADCAST) != 0) 570 rxcfg |= VGE_RXCTL_RX_BCAST; 571 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 572 if ((ifp->if_flags & IFF_PROMISC) != 0) 573 rxcfg |= VGE_RXCTL_RX_PROMISC; 574 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 575 hashes[0] = 0xFFFFFFFF; 576 hashes[1] = 0xFFFFFFFF; 577 } 578 goto done; 579 } 580 581 vge_cam_clear(sc); 582 /* Now program new ones */ 583 if_maddr_rlock(ifp); 584 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 585 if (ifma->ifma_addr->sa_family != AF_LINK) 586 continue; 587 error = vge_cam_set(sc, 588 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 589 if (error) 590 break; 591 } 592 593 /* If there were too many addresses, use the hash filter. */ 594 if (error) { 595 vge_cam_clear(sc); 596 597 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 598 if (ifma->ifma_addr->sa_family != AF_LINK) 599 continue; 600 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 601 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 602 if (h < 32) 603 hashes[0] |= (1 << h); 604 else 605 hashes[1] |= (1 << (h - 32)); 606 } 607 } 608 if_maddr_runlock(ifp); 609 610 done: 611 if (hashes[0] != 0 || hashes[1] != 0) 612 rxcfg |= VGE_RXCTL_RX_MCAST; 613 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 614 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 615 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 616 } 617 618 static void 619 vge_reset(struct vge_softc *sc) 620 { 621 int i; 622 623 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 624 625 for (i = 0; i < VGE_TIMEOUT; i++) { 626 DELAY(5); 627 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 628 break; 629 } 630 631 if (i == VGE_TIMEOUT) { 632 device_printf(sc->vge_dev, "soft reset timed out\n"); 633 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 634 DELAY(2000); 635 } 636 637 DELAY(5000); 638 } 639 640 /* 641 * Probe for a VIA gigabit chip. Check the PCI vendor and device 642 * IDs against our list and return a device name if we find a match. 643 */ 644 static int 645 vge_probe(device_t dev) 646 { 647 struct vge_type *t; 648 649 t = vge_devs; 650 651 while (t->vge_name != NULL) { 652 if ((pci_get_vendor(dev) == t->vge_vid) && 653 (pci_get_device(dev) == t->vge_did)) { 654 device_set_desc(dev, t->vge_name); 655 return (BUS_PROBE_DEFAULT); 656 } 657 t++; 658 } 659 660 return (ENXIO); 661 } 662 663 /* 664 * Map a single buffer address. 665 */ 666 667 struct vge_dmamap_arg { 668 bus_addr_t vge_busaddr; 669 }; 670 671 static void 672 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 673 { 674 struct vge_dmamap_arg *ctx; 675 676 if (error != 0) 677 return; 678 679 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 680 681 ctx = (struct vge_dmamap_arg *)arg; 682 ctx->vge_busaddr = segs[0].ds_addr; 683 } 684 685 static int 686 vge_dma_alloc(struct vge_softc *sc) 687 { 688 struct vge_dmamap_arg ctx; 689 struct vge_txdesc *txd; 690 struct vge_rxdesc *rxd; 691 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 692 int error, i; 693 694 lowaddr = BUS_SPACE_MAXADDR; 695 696 again: 697 /* Create parent ring tag. */ 698 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 699 1, 0, /* algnmnt, boundary */ 700 lowaddr, /* lowaddr */ 701 BUS_SPACE_MAXADDR, /* highaddr */ 702 NULL, NULL, /* filter, filterarg */ 703 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 704 0, /* nsegments */ 705 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 706 0, /* flags */ 707 NULL, NULL, /* lockfunc, lockarg */ 708 &sc->vge_cdata.vge_ring_tag); 709 if (error != 0) { 710 device_printf(sc->vge_dev, 711 "could not create parent DMA tag.\n"); 712 goto fail; 713 } 714 715 /* Create tag for Tx ring. */ 716 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 717 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 718 BUS_SPACE_MAXADDR, /* lowaddr */ 719 BUS_SPACE_MAXADDR, /* highaddr */ 720 NULL, NULL, /* filter, filterarg */ 721 VGE_TX_LIST_SZ, /* maxsize */ 722 1, /* nsegments */ 723 VGE_TX_LIST_SZ, /* maxsegsize */ 724 0, /* flags */ 725 NULL, NULL, /* lockfunc, lockarg */ 726 &sc->vge_cdata.vge_tx_ring_tag); 727 if (error != 0) { 728 device_printf(sc->vge_dev, 729 "could not allocate Tx ring DMA tag.\n"); 730 goto fail; 731 } 732 733 /* Create tag for Rx ring. */ 734 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 735 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 736 BUS_SPACE_MAXADDR, /* lowaddr */ 737 BUS_SPACE_MAXADDR, /* highaddr */ 738 NULL, NULL, /* filter, filterarg */ 739 VGE_RX_LIST_SZ, /* maxsize */ 740 1, /* nsegments */ 741 VGE_RX_LIST_SZ, /* maxsegsize */ 742 0, /* flags */ 743 NULL, NULL, /* lockfunc, lockarg */ 744 &sc->vge_cdata.vge_rx_ring_tag); 745 if (error != 0) { 746 device_printf(sc->vge_dev, 747 "could not allocate Rx ring DMA tag.\n"); 748 goto fail; 749 } 750 751 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 752 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 753 (void **)&sc->vge_rdata.vge_tx_ring, 754 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 755 &sc->vge_cdata.vge_tx_ring_map); 756 if (error != 0) { 757 device_printf(sc->vge_dev, 758 "could not allocate DMA'able memory for Tx ring.\n"); 759 goto fail; 760 } 761 762 ctx.vge_busaddr = 0; 763 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 764 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 765 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 766 if (error != 0 || ctx.vge_busaddr == 0) { 767 device_printf(sc->vge_dev, 768 "could not load DMA'able memory for Tx ring.\n"); 769 goto fail; 770 } 771 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 772 773 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 774 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 775 (void **)&sc->vge_rdata.vge_rx_ring, 776 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 777 &sc->vge_cdata.vge_rx_ring_map); 778 if (error != 0) { 779 device_printf(sc->vge_dev, 780 "could not allocate DMA'able memory for Rx ring.\n"); 781 goto fail; 782 } 783 784 ctx.vge_busaddr = 0; 785 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 786 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 787 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 788 if (error != 0 || ctx.vge_busaddr == 0) { 789 device_printf(sc->vge_dev, 790 "could not load DMA'able memory for Rx ring.\n"); 791 goto fail; 792 } 793 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 794 795 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 796 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 797 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 798 if ((VGE_ADDR_HI(tx_ring_end) != 799 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 800 (VGE_ADDR_HI(rx_ring_end) != 801 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 802 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 803 device_printf(sc->vge_dev, "4GB boundary crossed, " 804 "switching to 32bit DMA address mode.\n"); 805 vge_dma_free(sc); 806 /* Limit DMA address space to 32bit and try again. */ 807 lowaddr = BUS_SPACE_MAXADDR_32BIT; 808 goto again; 809 } 810 811 /* Create parent buffer tag. */ 812 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 813 1, 0, /* algnmnt, boundary */ 814 VGE_BUF_DMA_MAXADDR, /* lowaddr */ 815 BUS_SPACE_MAXADDR, /* highaddr */ 816 NULL, NULL, /* filter, filterarg */ 817 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 818 0, /* nsegments */ 819 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 820 0, /* flags */ 821 NULL, NULL, /* lockfunc, lockarg */ 822 &sc->vge_cdata.vge_buffer_tag); 823 if (error != 0) { 824 device_printf(sc->vge_dev, 825 "could not create parent buffer DMA tag.\n"); 826 goto fail; 827 } 828 829 /* Create tag for Tx buffers. */ 830 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 831 1, 0, /* algnmnt, boundary */ 832 BUS_SPACE_MAXADDR, /* lowaddr */ 833 BUS_SPACE_MAXADDR, /* highaddr */ 834 NULL, NULL, /* filter, filterarg */ 835 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 836 VGE_MAXTXSEGS, /* nsegments */ 837 MCLBYTES, /* maxsegsize */ 838 0, /* flags */ 839 NULL, NULL, /* lockfunc, lockarg */ 840 &sc->vge_cdata.vge_tx_tag); 841 if (error != 0) { 842 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 843 goto fail; 844 } 845 846 /* Create tag for Rx buffers. */ 847 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 848 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 849 BUS_SPACE_MAXADDR, /* lowaddr */ 850 BUS_SPACE_MAXADDR, /* highaddr */ 851 NULL, NULL, /* filter, filterarg */ 852 MCLBYTES, /* maxsize */ 853 1, /* nsegments */ 854 MCLBYTES, /* maxsegsize */ 855 0, /* flags */ 856 NULL, NULL, /* lockfunc, lockarg */ 857 &sc->vge_cdata.vge_rx_tag); 858 if (error != 0) { 859 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 860 goto fail; 861 } 862 863 /* Create DMA maps for Tx buffers. */ 864 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 865 txd = &sc->vge_cdata.vge_txdesc[i]; 866 txd->tx_m = NULL; 867 txd->tx_dmamap = NULL; 868 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 869 &txd->tx_dmamap); 870 if (error != 0) { 871 device_printf(sc->vge_dev, 872 "could not create Tx dmamap.\n"); 873 goto fail; 874 } 875 } 876 /* Create DMA maps for Rx buffers. */ 877 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 878 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 879 device_printf(sc->vge_dev, 880 "could not create spare Rx dmamap.\n"); 881 goto fail; 882 } 883 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 884 rxd = &sc->vge_cdata.vge_rxdesc[i]; 885 rxd->rx_m = NULL; 886 rxd->rx_dmamap = NULL; 887 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 888 &rxd->rx_dmamap); 889 if (error != 0) { 890 device_printf(sc->vge_dev, 891 "could not create Rx dmamap.\n"); 892 goto fail; 893 } 894 } 895 896 fail: 897 return (error); 898 } 899 900 static void 901 vge_dma_free(struct vge_softc *sc) 902 { 903 struct vge_txdesc *txd; 904 struct vge_rxdesc *rxd; 905 int i; 906 907 /* Tx ring. */ 908 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 909 if (sc->vge_cdata.vge_tx_ring_map) 910 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 911 sc->vge_cdata.vge_tx_ring_map); 912 if (sc->vge_cdata.vge_tx_ring_map && 913 sc->vge_rdata.vge_tx_ring) 914 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 915 sc->vge_rdata.vge_tx_ring, 916 sc->vge_cdata.vge_tx_ring_map); 917 sc->vge_rdata.vge_tx_ring = NULL; 918 sc->vge_cdata.vge_tx_ring_map = NULL; 919 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 920 sc->vge_cdata.vge_tx_ring_tag = NULL; 921 } 922 /* Rx ring. */ 923 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 924 if (sc->vge_cdata.vge_rx_ring_map) 925 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 926 sc->vge_cdata.vge_rx_ring_map); 927 if (sc->vge_cdata.vge_rx_ring_map && 928 sc->vge_rdata.vge_rx_ring) 929 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 930 sc->vge_rdata.vge_rx_ring, 931 sc->vge_cdata.vge_rx_ring_map); 932 sc->vge_rdata.vge_rx_ring = NULL; 933 sc->vge_cdata.vge_rx_ring_map = NULL; 934 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 935 sc->vge_cdata.vge_rx_ring_tag = NULL; 936 } 937 /* Tx buffers. */ 938 if (sc->vge_cdata.vge_tx_tag != NULL) { 939 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 940 txd = &sc->vge_cdata.vge_txdesc[i]; 941 if (txd->tx_dmamap != NULL) { 942 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 943 txd->tx_dmamap); 944 txd->tx_dmamap = NULL; 945 } 946 } 947 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 948 sc->vge_cdata.vge_tx_tag = NULL; 949 } 950 /* Rx buffers. */ 951 if (sc->vge_cdata.vge_rx_tag != NULL) { 952 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 953 rxd = &sc->vge_cdata.vge_rxdesc[i]; 954 if (rxd->rx_dmamap != NULL) { 955 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 956 rxd->rx_dmamap); 957 rxd->rx_dmamap = NULL; 958 } 959 } 960 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 961 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 962 sc->vge_cdata.vge_rx_sparemap); 963 sc->vge_cdata.vge_rx_sparemap = NULL; 964 } 965 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 966 sc->vge_cdata.vge_rx_tag = NULL; 967 } 968 969 if (sc->vge_cdata.vge_buffer_tag != NULL) { 970 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 971 sc->vge_cdata.vge_buffer_tag = NULL; 972 } 973 if (sc->vge_cdata.vge_ring_tag != NULL) { 974 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 975 sc->vge_cdata.vge_ring_tag = NULL; 976 } 977 } 978 979 /* 980 * Attach the interface. Allocate softc structures, do ifmedia 981 * setup and ethernet/BPF attach. 982 */ 983 static int 984 vge_attach(device_t dev) 985 { 986 u_char eaddr[ETHER_ADDR_LEN]; 987 struct vge_softc *sc; 988 struct ifnet *ifp; 989 int error = 0, cap, i, msic, rid; 990 991 sc = device_get_softc(dev); 992 sc->vge_dev = dev; 993 994 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 995 MTX_DEF); 996 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 997 998 /* 999 * Map control/status registers. 1000 */ 1001 pci_enable_busmaster(dev); 1002 1003 rid = PCIR_BAR(1); 1004 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1005 RF_ACTIVE); 1006 1007 if (sc->vge_res == NULL) { 1008 device_printf(dev, "couldn't map ports/memory\n"); 1009 error = ENXIO; 1010 goto fail; 1011 } 1012 1013 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) == 0) { 1014 sc->vge_flags |= VGE_FLAG_PCIE; 1015 sc->vge_expcap = cap; 1016 } else 1017 sc->vge_flags |= VGE_FLAG_JUMBO; 1018 if (pci_find_extcap(dev, PCIY_PMG, &cap) == 0) { 1019 sc->vge_flags |= VGE_FLAG_PMCAP; 1020 sc->vge_pmcap = cap; 1021 } 1022 rid = 0; 1023 msic = pci_msi_count(dev); 1024 if (msi_disable == 0 && msic > 0) { 1025 msic = 1; 1026 if (pci_alloc_msi(dev, &msic) == 0) { 1027 if (msic == 1) { 1028 sc->vge_flags |= VGE_FLAG_MSI; 1029 device_printf(dev, "Using %d MSI message\n", 1030 msic); 1031 rid = 1; 1032 } else 1033 pci_release_msi(dev); 1034 } 1035 } 1036 1037 /* Allocate interrupt */ 1038 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1039 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1040 if (sc->vge_irq == NULL) { 1041 device_printf(dev, "couldn't map interrupt\n"); 1042 error = ENXIO; 1043 goto fail; 1044 } 1045 1046 /* Reset the adapter. */ 1047 vge_reset(sc); 1048 /* Reload EEPROM. */ 1049 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1050 for (i = 0; i < VGE_TIMEOUT; i++) { 1051 DELAY(5); 1052 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1053 break; 1054 } 1055 if (i == VGE_TIMEOUT) 1056 device_printf(dev, "EEPROM reload timed out\n"); 1057 /* 1058 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1059 * MAC will receive magic packet which in turn confuses 1060 * controller. 1061 */ 1062 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1063 1064 /* 1065 * Get station address from the EEPROM. 1066 */ 1067 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1068 /* 1069 * Save configured PHY address. 1070 * It seems the PHY address of PCIe controllers just 1071 * reflects media jump strapping status so we assume the 1072 * internal PHY address of PCIe controller is at 1. 1073 */ 1074 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1075 sc->vge_phyaddr = 1; 1076 else 1077 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1078 VGE_MIICFG_PHYADDR; 1079 /* Clear WOL and take hardware from powerdown. */ 1080 vge_clrwol(sc); 1081 vge_sysctl_node(sc); 1082 error = vge_dma_alloc(sc); 1083 if (error) 1084 goto fail; 1085 1086 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1087 if (ifp == NULL) { 1088 device_printf(dev, "can not if_alloc()\n"); 1089 error = ENOSPC; 1090 goto fail; 1091 } 1092 1093 /* Do MII setup */ 1094 if (mii_phy_probe(dev, &sc->vge_miibus, 1095 vge_ifmedia_upd, vge_ifmedia_sts)) { 1096 device_printf(dev, "MII without any phy!\n"); 1097 error = ENXIO; 1098 goto fail; 1099 } 1100 1101 ifp->if_softc = sc; 1102 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1103 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1104 ifp->if_ioctl = vge_ioctl; 1105 ifp->if_capabilities = IFCAP_VLAN_MTU; 1106 ifp->if_start = vge_start; 1107 ifp->if_hwassist = VGE_CSUM_FEATURES; 1108 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1109 IFCAP_VLAN_HWTAGGING; 1110 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) 1111 ifp->if_capabilities |= IFCAP_WOL; 1112 ifp->if_capenable = ifp->if_capabilities; 1113 #ifdef DEVICE_POLLING 1114 ifp->if_capabilities |= IFCAP_POLLING; 1115 #endif 1116 ifp->if_init = vge_init; 1117 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1); 1118 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1; 1119 IFQ_SET_READY(&ifp->if_snd); 1120 1121 /* 1122 * Call MI attach routine. 1123 */ 1124 ether_ifattach(ifp, eaddr); 1125 1126 /* Tell the upper layer(s) we support long frames. */ 1127 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1128 1129 /* Hook interrupt last to avoid having to lock softc */ 1130 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1131 NULL, vge_intr, sc, &sc->vge_intrhand); 1132 1133 if (error) { 1134 device_printf(dev, "couldn't set up irq\n"); 1135 ether_ifdetach(ifp); 1136 goto fail; 1137 } 1138 1139 fail: 1140 if (error) 1141 vge_detach(dev); 1142 1143 return (error); 1144 } 1145 1146 /* 1147 * Shutdown hardware and free up resources. This can be called any 1148 * time after the mutex has been initialized. It is called in both 1149 * the error case in attach and the normal detach case so it needs 1150 * to be careful about only freeing resources that have actually been 1151 * allocated. 1152 */ 1153 static int 1154 vge_detach(device_t dev) 1155 { 1156 struct vge_softc *sc; 1157 struct ifnet *ifp; 1158 1159 sc = device_get_softc(dev); 1160 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1161 ifp = sc->vge_ifp; 1162 1163 #ifdef DEVICE_POLLING 1164 if (ifp->if_capenable & IFCAP_POLLING) 1165 ether_poll_deregister(ifp); 1166 #endif 1167 1168 /* These should only be active if attach succeeded */ 1169 if (device_is_attached(dev)) { 1170 ether_ifdetach(ifp); 1171 VGE_LOCK(sc); 1172 vge_stop(sc); 1173 VGE_UNLOCK(sc); 1174 callout_drain(&sc->vge_watchdog); 1175 } 1176 if (sc->vge_miibus) 1177 device_delete_child(dev, sc->vge_miibus); 1178 bus_generic_detach(dev); 1179 1180 if (sc->vge_intrhand) 1181 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1182 if (sc->vge_irq) 1183 bus_release_resource(dev, SYS_RES_IRQ, 1184 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1185 if (sc->vge_flags & VGE_FLAG_MSI) 1186 pci_release_msi(dev); 1187 if (sc->vge_res) 1188 bus_release_resource(dev, SYS_RES_MEMORY, 1189 PCIR_BAR(1), sc->vge_res); 1190 if (ifp) 1191 if_free(ifp); 1192 1193 vge_dma_free(sc); 1194 mtx_destroy(&sc->vge_mtx); 1195 1196 return (0); 1197 } 1198 1199 static void 1200 vge_discard_rxbuf(struct vge_softc *sc, int prod) 1201 { 1202 struct vge_rxdesc *rxd; 1203 int i; 1204 1205 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1206 rxd->rx_desc->vge_sts = 0; 1207 rxd->rx_desc->vge_ctl = 0; 1208 1209 /* 1210 * Note: the manual fails to document the fact that for 1211 * proper opration, the driver needs to replentish the RX 1212 * DMA ring 4 descriptors at a time (rather than one at a 1213 * time, like most chips). We can allocate the new buffers 1214 * but we should not set the OWN bits until we're ready 1215 * to hand back 4 of them in one shot. 1216 */ 1217 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1218 for (i = VGE_RXCHUNK; i > 0; i--) { 1219 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1220 rxd = rxd->rxd_prev; 1221 } 1222 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1223 } 1224 } 1225 1226 static int 1227 vge_newbuf(struct vge_softc *sc, int prod) 1228 { 1229 struct vge_rxdesc *rxd; 1230 struct mbuf *m; 1231 bus_dma_segment_t segs[1]; 1232 bus_dmamap_t map; 1233 int i, nsegs; 1234 1235 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1236 if (m == NULL) 1237 return (ENOBUFS); 1238 /* 1239 * This is part of an evil trick to deal with strict-alignment 1240 * architectures. The VIA chip requires RX buffers to be aligned 1241 * on 32-bit boundaries, but that will hose strict-alignment 1242 * architectures. To get around this, we leave some empty space 1243 * at the start of each buffer and for non-strict-alignment hosts, 1244 * we copy the buffer back two bytes to achieve word alignment. 1245 * This is slightly more efficient than allocating a new buffer, 1246 * copying the contents, and discarding the old buffer. 1247 */ 1248 m->m_len = m->m_pkthdr.len = MCLBYTES; 1249 m_adj(m, VGE_RX_BUF_ALIGN); 1250 1251 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1252 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1253 m_freem(m); 1254 return (ENOBUFS); 1255 } 1256 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1257 1258 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1259 if (rxd->rx_m != NULL) { 1260 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1261 BUS_DMASYNC_POSTREAD); 1262 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1263 } 1264 map = rxd->rx_dmamap; 1265 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1266 sc->vge_cdata.vge_rx_sparemap = map; 1267 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1268 BUS_DMASYNC_PREREAD); 1269 rxd->rx_m = m; 1270 1271 rxd->rx_desc->vge_sts = 0; 1272 rxd->rx_desc->vge_ctl = 0; 1273 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1274 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1275 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1276 1277 /* 1278 * Note: the manual fails to document the fact that for 1279 * proper operation, the driver needs to replenish the RX 1280 * DMA ring 4 descriptors at a time (rather than one at a 1281 * time, like most chips). We can allocate the new buffers 1282 * but we should not set the OWN bits until we're ready 1283 * to hand back 4 of them in one shot. 1284 */ 1285 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1286 for (i = VGE_RXCHUNK; i > 0; i--) { 1287 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1288 rxd = rxd->rxd_prev; 1289 } 1290 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1291 } 1292 1293 return (0); 1294 } 1295 1296 static int 1297 vge_tx_list_init(struct vge_softc *sc) 1298 { 1299 struct vge_ring_data *rd; 1300 struct vge_txdesc *txd; 1301 int i; 1302 1303 VGE_LOCK_ASSERT(sc); 1304 1305 sc->vge_cdata.vge_tx_prodidx = 0; 1306 sc->vge_cdata.vge_tx_considx = 0; 1307 sc->vge_cdata.vge_tx_cnt = 0; 1308 1309 rd = &sc->vge_rdata; 1310 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1311 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1312 txd = &sc->vge_cdata.vge_txdesc[i]; 1313 txd->tx_m = NULL; 1314 txd->tx_desc = &rd->vge_tx_ring[i]; 1315 } 1316 1317 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1318 sc->vge_cdata.vge_tx_ring_map, 1319 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1320 1321 return (0); 1322 } 1323 1324 static int 1325 vge_rx_list_init(struct vge_softc *sc) 1326 { 1327 struct vge_ring_data *rd; 1328 struct vge_rxdesc *rxd; 1329 int i; 1330 1331 VGE_LOCK_ASSERT(sc); 1332 1333 sc->vge_cdata.vge_rx_prodidx = 0; 1334 sc->vge_cdata.vge_head = NULL; 1335 sc->vge_cdata.vge_tail = NULL; 1336 sc->vge_cdata.vge_rx_commit = 0; 1337 1338 rd = &sc->vge_rdata; 1339 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1340 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1341 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1342 rxd->rx_m = NULL; 1343 rxd->rx_desc = &rd->vge_rx_ring[i]; 1344 if (i == 0) 1345 rxd->rxd_prev = 1346 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1347 else 1348 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1349 if (vge_newbuf(sc, i) != 0) 1350 return (ENOBUFS); 1351 } 1352 1353 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1354 sc->vge_cdata.vge_rx_ring_map, 1355 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1356 1357 sc->vge_cdata.vge_rx_commit = 0; 1358 1359 return (0); 1360 } 1361 1362 static void 1363 vge_freebufs(struct vge_softc *sc) 1364 { 1365 struct vge_txdesc *txd; 1366 struct vge_rxdesc *rxd; 1367 struct ifnet *ifp; 1368 int i; 1369 1370 VGE_LOCK_ASSERT(sc); 1371 1372 ifp = sc->vge_ifp; 1373 /* 1374 * Free RX and TX mbufs still in the queues. 1375 */ 1376 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1377 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1378 if (rxd->rx_m != NULL) { 1379 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1380 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1381 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1382 rxd->rx_dmamap); 1383 m_freem(rxd->rx_m); 1384 rxd->rx_m = NULL; 1385 } 1386 } 1387 1388 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1389 txd = &sc->vge_cdata.vge_txdesc[i]; 1390 if (txd->tx_m != NULL) { 1391 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1392 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1393 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1394 txd->tx_dmamap); 1395 m_freem(txd->tx_m); 1396 txd->tx_m = NULL; 1397 ifp->if_oerrors++; 1398 } 1399 } 1400 } 1401 1402 #ifndef __NO_STRICT_ALIGNMENT 1403 static __inline void 1404 vge_fixup_rx(struct mbuf *m) 1405 { 1406 int i; 1407 uint16_t *src, *dst; 1408 1409 src = mtod(m, uint16_t *); 1410 dst = src - 1; 1411 1412 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1413 *dst++ = *src++; 1414 1415 m->m_data -= ETHER_ALIGN; 1416 } 1417 #endif 1418 1419 /* 1420 * RX handler. We support the reception of jumbo frames that have 1421 * been fragmented across multiple 2K mbuf cluster buffers. 1422 */ 1423 static int 1424 vge_rxeof(struct vge_softc *sc, int count) 1425 { 1426 struct mbuf *m; 1427 struct ifnet *ifp; 1428 int prod, prog, total_len; 1429 struct vge_rxdesc *rxd; 1430 struct vge_rx_desc *cur_rx; 1431 uint32_t rxstat, rxctl; 1432 1433 VGE_LOCK_ASSERT(sc); 1434 1435 ifp = sc->vge_ifp; 1436 1437 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1438 sc->vge_cdata.vge_rx_ring_map, 1439 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1440 1441 prod = sc->vge_cdata.vge_rx_prodidx; 1442 for (prog = 0; count > 0 && 1443 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1444 VGE_RX_DESC_INC(prod)) { 1445 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1446 rxstat = le32toh(cur_rx->vge_sts); 1447 if ((rxstat & VGE_RDSTS_OWN) != 0) 1448 break; 1449 count--; 1450 prog++; 1451 rxctl = le32toh(cur_rx->vge_ctl); 1452 total_len = VGE_RXBYTES(rxstat); 1453 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1454 m = rxd->rx_m; 1455 1456 /* 1457 * If the 'start of frame' bit is set, this indicates 1458 * either the first fragment in a multi-fragment receive, 1459 * or an intermediate fragment. Either way, we want to 1460 * accumulate the buffers. 1461 */ 1462 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1463 if (vge_newbuf(sc, prod) != 0) { 1464 ifp->if_iqdrops++; 1465 VGE_CHAIN_RESET(sc); 1466 vge_discard_rxbuf(sc, prod); 1467 continue; 1468 } 1469 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1470 if (sc->vge_cdata.vge_head == NULL) { 1471 sc->vge_cdata.vge_head = m; 1472 sc->vge_cdata.vge_tail = m; 1473 } else { 1474 m->m_flags &= ~M_PKTHDR; 1475 sc->vge_cdata.vge_tail->m_next = m; 1476 sc->vge_cdata.vge_tail = m; 1477 } 1478 continue; 1479 } 1480 1481 /* 1482 * Bad/error frames will have the RXOK bit cleared. 1483 * However, there's one error case we want to allow: 1484 * if a VLAN tagged frame arrives and the chip can't 1485 * match it against the CAM filter, it considers this 1486 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1487 * We don't want to drop the frame though: our VLAN 1488 * filtering is done in software. 1489 * We also want to receive bad-checksummed frames and 1490 * and frames with bad-length. 1491 */ 1492 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1493 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1494 VGE_RDSTS_CSUMERR)) == 0) { 1495 ifp->if_ierrors++; 1496 /* 1497 * If this is part of a multi-fragment packet, 1498 * discard all the pieces. 1499 */ 1500 VGE_CHAIN_RESET(sc); 1501 vge_discard_rxbuf(sc, prod); 1502 continue; 1503 } 1504 1505 if (vge_newbuf(sc, prod) != 0) { 1506 ifp->if_iqdrops++; 1507 VGE_CHAIN_RESET(sc); 1508 vge_discard_rxbuf(sc, prod); 1509 continue; 1510 } 1511 1512 /* Chain received mbufs. */ 1513 if (sc->vge_cdata.vge_head != NULL) { 1514 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1515 /* 1516 * Special case: if there's 4 bytes or less 1517 * in this buffer, the mbuf can be discarded: 1518 * the last 4 bytes is the CRC, which we don't 1519 * care about anyway. 1520 */ 1521 if (m->m_len <= ETHER_CRC_LEN) { 1522 sc->vge_cdata.vge_tail->m_len -= 1523 (ETHER_CRC_LEN - m->m_len); 1524 m_freem(m); 1525 } else { 1526 m->m_len -= ETHER_CRC_LEN; 1527 m->m_flags &= ~M_PKTHDR; 1528 sc->vge_cdata.vge_tail->m_next = m; 1529 } 1530 m = sc->vge_cdata.vge_head; 1531 m->m_flags |= M_PKTHDR; 1532 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1533 } else { 1534 m->m_flags |= M_PKTHDR; 1535 m->m_pkthdr.len = m->m_len = 1536 (total_len - ETHER_CRC_LEN); 1537 } 1538 1539 #ifndef __NO_STRICT_ALIGNMENT 1540 vge_fixup_rx(m); 1541 #endif 1542 m->m_pkthdr.rcvif = ifp; 1543 1544 /* Do RX checksumming if enabled */ 1545 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1546 (rxctl & VGE_RDCTL_FRAG) == 0) { 1547 /* Check IP header checksum */ 1548 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1549 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1550 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1551 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1552 1553 /* Check TCP/UDP checksum */ 1554 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1555 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1556 m->m_pkthdr.csum_flags |= 1557 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1558 m->m_pkthdr.csum_data = 0xffff; 1559 } 1560 } 1561 1562 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1563 /* 1564 * The 32-bit rxctl register is stored in little-endian. 1565 * However, the 16-bit vlan tag is stored in big-endian, 1566 * so we have to byte swap it. 1567 */ 1568 m->m_pkthdr.ether_vtag = 1569 bswap16(rxctl & VGE_RDCTL_VLANID); 1570 m->m_flags |= M_VLANTAG; 1571 } 1572 1573 VGE_UNLOCK(sc); 1574 (*ifp->if_input)(ifp, m); 1575 VGE_LOCK(sc); 1576 sc->vge_cdata.vge_head = NULL; 1577 sc->vge_cdata.vge_tail = NULL; 1578 } 1579 1580 if (prog > 0) { 1581 sc->vge_cdata.vge_rx_prodidx = prod; 1582 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1583 sc->vge_cdata.vge_rx_ring_map, 1584 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1585 /* Update residue counter. */ 1586 if (sc->vge_cdata.vge_rx_commit != 0) { 1587 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1588 sc->vge_cdata.vge_rx_commit); 1589 sc->vge_cdata.vge_rx_commit = 0; 1590 } 1591 } 1592 return (prog); 1593 } 1594 1595 static void 1596 vge_txeof(struct vge_softc *sc) 1597 { 1598 struct ifnet *ifp; 1599 struct vge_tx_desc *cur_tx; 1600 struct vge_txdesc *txd; 1601 uint32_t txstat; 1602 int cons, prod; 1603 1604 VGE_LOCK_ASSERT(sc); 1605 1606 ifp = sc->vge_ifp; 1607 1608 if (sc->vge_cdata.vge_tx_cnt == 0) 1609 return; 1610 1611 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1612 sc->vge_cdata.vge_tx_ring_map, 1613 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1614 1615 /* 1616 * Go through our tx list and free mbufs for those 1617 * frames that have been transmitted. 1618 */ 1619 cons = sc->vge_cdata.vge_tx_considx; 1620 prod = sc->vge_cdata.vge_tx_prodidx; 1621 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1622 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1623 txstat = le32toh(cur_tx->vge_sts); 1624 if ((txstat & VGE_TDSTS_OWN) != 0) 1625 break; 1626 sc->vge_cdata.vge_tx_cnt--; 1627 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1628 1629 txd = &sc->vge_cdata.vge_txdesc[cons]; 1630 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1631 BUS_DMASYNC_POSTWRITE); 1632 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1633 1634 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1635 __func__)); 1636 m_freem(txd->tx_m); 1637 txd->tx_m = NULL; 1638 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1639 } 1640 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1641 sc->vge_cdata.vge_tx_ring_map, 1642 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1643 sc->vge_cdata.vge_tx_considx = cons; 1644 if (sc->vge_cdata.vge_tx_cnt == 0) 1645 sc->vge_timer = 0; 1646 } 1647 1648 static void 1649 vge_link_statchg(void *xsc) 1650 { 1651 struct vge_softc *sc; 1652 struct ifnet *ifp; 1653 struct mii_data *mii; 1654 1655 sc = xsc; 1656 ifp = sc->vge_ifp; 1657 VGE_LOCK_ASSERT(sc); 1658 mii = device_get_softc(sc->vge_miibus); 1659 1660 mii_pollstat(mii); 1661 if ((sc->vge_flags & VGE_FLAG_LINK) != 0) { 1662 if (!(mii->mii_media_status & IFM_ACTIVE)) { 1663 sc->vge_flags &= ~VGE_FLAG_LINK; 1664 if_link_state_change(sc->vge_ifp, 1665 LINK_STATE_DOWN); 1666 } 1667 } else { 1668 if (mii->mii_media_status & IFM_ACTIVE && 1669 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1670 sc->vge_flags |= VGE_FLAG_LINK; 1671 if_link_state_change(sc->vge_ifp, 1672 LINK_STATE_UP); 1673 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1674 vge_start_locked(ifp); 1675 } 1676 } 1677 } 1678 1679 #ifdef DEVICE_POLLING 1680 static int 1681 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1682 { 1683 struct vge_softc *sc = ifp->if_softc; 1684 int rx_npkts = 0; 1685 1686 VGE_LOCK(sc); 1687 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1688 goto done; 1689 1690 rx_npkts = vge_rxeof(sc, count); 1691 vge_txeof(sc); 1692 1693 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1694 vge_start_locked(ifp); 1695 1696 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1697 uint32_t status; 1698 status = CSR_READ_4(sc, VGE_ISR); 1699 if (status == 0xFFFFFFFF) 1700 goto done; 1701 if (status) 1702 CSR_WRITE_4(sc, VGE_ISR, status); 1703 1704 /* 1705 * XXX check behaviour on receiver stalls. 1706 */ 1707 1708 if (status & VGE_ISR_TXDMA_STALL || 1709 status & VGE_ISR_RXDMA_STALL) { 1710 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1711 vge_init_locked(sc); 1712 } 1713 1714 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1715 vge_rxeof(sc, count); 1716 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1717 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1718 } 1719 } 1720 done: 1721 VGE_UNLOCK(sc); 1722 return (rx_npkts); 1723 } 1724 #endif /* DEVICE_POLLING */ 1725 1726 static void 1727 vge_intr(void *arg) 1728 { 1729 struct vge_softc *sc; 1730 struct ifnet *ifp; 1731 uint32_t status; 1732 1733 sc = arg; 1734 VGE_LOCK(sc); 1735 1736 ifp = sc->vge_ifp; 1737 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1738 (ifp->if_flags & IFF_UP) == 0) { 1739 VGE_UNLOCK(sc); 1740 return; 1741 } 1742 1743 #ifdef DEVICE_POLLING 1744 if (ifp->if_capenable & IFCAP_POLLING) { 1745 VGE_UNLOCK(sc); 1746 return; 1747 } 1748 #endif 1749 1750 /* Disable interrupts */ 1751 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1752 status = CSR_READ_4(sc, VGE_ISR); 1753 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD); 1754 /* If the card has gone away the read returns 0xffff. */ 1755 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0) 1756 goto done; 1757 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1758 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1759 vge_rxeof(sc, VGE_RX_DESC_CNT); 1760 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1761 vge_rxeof(sc, VGE_RX_DESC_CNT); 1762 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1763 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1764 } 1765 1766 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO)) 1767 vge_txeof(sc); 1768 1769 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1770 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1771 vge_init_locked(sc); 1772 } 1773 1774 if (status & VGE_ISR_LINKSTS) 1775 vge_link_statchg(sc); 1776 } 1777 done: 1778 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1779 /* Re-enable interrupts */ 1780 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1781 1782 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1783 vge_start_locked(ifp); 1784 } 1785 VGE_UNLOCK(sc); 1786 } 1787 1788 static int 1789 vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1790 { 1791 struct vge_txdesc *txd; 1792 struct vge_tx_frag *frag; 1793 struct mbuf *m; 1794 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1795 int error, i, nsegs, padlen; 1796 uint32_t cflags; 1797 1798 VGE_LOCK_ASSERT(sc); 1799 1800 M_ASSERTPKTHDR((*m_head)); 1801 1802 /* Argh. This chip does not autopad short frames. */ 1803 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1804 m = *m_head; 1805 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1806 if (M_WRITABLE(m) == 0) { 1807 /* Get a writable copy. */ 1808 m = m_dup(*m_head, M_DONTWAIT); 1809 m_freem(*m_head); 1810 if (m == NULL) { 1811 *m_head = NULL; 1812 return (ENOBUFS); 1813 } 1814 *m_head = m; 1815 } 1816 if (M_TRAILINGSPACE(m) < padlen) { 1817 m = m_defrag(m, M_DONTWAIT); 1818 if (m == NULL) { 1819 m_freem(*m_head); 1820 *m_head = NULL; 1821 return (ENOBUFS); 1822 } 1823 } 1824 /* 1825 * Manually pad short frames, and zero the pad space 1826 * to avoid leaking data. 1827 */ 1828 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1829 m->m_pkthdr.len += padlen; 1830 m->m_len = m->m_pkthdr.len; 1831 *m_head = m; 1832 } 1833 1834 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1835 1836 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1837 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1838 if (error == EFBIG) { 1839 m = m_collapse(*m_head, M_DONTWAIT, VGE_MAXTXSEGS); 1840 if (m == NULL) { 1841 m_freem(*m_head); 1842 *m_head = NULL; 1843 return (ENOMEM); 1844 } 1845 *m_head = m; 1846 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1847 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1848 if (error != 0) { 1849 m_freem(*m_head); 1850 *m_head = NULL; 1851 return (error); 1852 } 1853 } else if (error != 0) 1854 return (error); 1855 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1856 BUS_DMASYNC_PREWRITE); 1857 1858 m = *m_head; 1859 cflags = 0; 1860 1861 /* Configure checksum offload. */ 1862 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1863 cflags |= VGE_TDCTL_IPCSUM; 1864 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1865 cflags |= VGE_TDCTL_TCPCSUM; 1866 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1867 cflags |= VGE_TDCTL_UDPCSUM; 1868 1869 /* Configure VLAN. */ 1870 if ((m->m_flags & M_VLANTAG) != 0) 1871 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1872 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1873 /* 1874 * XXX 1875 * Velocity family seems to support TSO but no information 1876 * for MSS configuration is available. Also the number of 1877 * fragments supported by a descriptor is too small to hold 1878 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1879 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1880 * longer chain of buffers but no additional information is 1881 * available. 1882 * 1883 * When telling the chip how many segments there are, we 1884 * must use nsegs + 1 instead of just nsegs. Darned if I 1885 * know why. This also means we can't use the last fragment 1886 * field of Tx descriptor. 1887 */ 1888 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1889 VGE_TD_LS_NORM); 1890 for (i = 0; i < nsegs; i++) { 1891 frag = &txd->tx_desc->vge_frag[i]; 1892 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1893 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1894 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1895 } 1896 1897 sc->vge_cdata.vge_tx_cnt++; 1898 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1899 1900 /* 1901 * Finally request interrupt and give the first descriptor 1902 * ownership to hardware. 1903 */ 1904 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1905 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1906 txd->tx_m = m; 1907 1908 return (0); 1909 } 1910 1911 /* 1912 * Main transmit routine. 1913 */ 1914 1915 static void 1916 vge_start(struct ifnet *ifp) 1917 { 1918 struct vge_softc *sc; 1919 1920 sc = ifp->if_softc; 1921 VGE_LOCK(sc); 1922 vge_start_locked(ifp); 1923 VGE_UNLOCK(sc); 1924 } 1925 1926 1927 static void 1928 vge_start_locked(struct ifnet *ifp) 1929 { 1930 struct vge_softc *sc; 1931 struct vge_txdesc *txd; 1932 struct mbuf *m_head; 1933 int enq, idx; 1934 1935 sc = ifp->if_softc; 1936 1937 VGE_LOCK_ASSERT(sc); 1938 1939 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1940 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1941 IFF_DRV_RUNNING) 1942 return; 1943 1944 idx = sc->vge_cdata.vge_tx_prodidx; 1945 VGE_TX_DESC_DEC(idx); 1946 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1947 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1948 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1949 if (m_head == NULL) 1950 break; 1951 /* 1952 * Pack the data into the transmit ring. If we 1953 * don't have room, set the OACTIVE flag and wait 1954 * for the NIC to drain the ring. 1955 */ 1956 if (vge_encap(sc, &m_head)) { 1957 if (m_head == NULL) 1958 break; 1959 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1960 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1961 break; 1962 } 1963 1964 txd = &sc->vge_cdata.vge_txdesc[idx]; 1965 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1966 VGE_TX_DESC_INC(idx); 1967 1968 enq++; 1969 /* 1970 * If there's a BPF listener, bounce a copy of this frame 1971 * to him. 1972 */ 1973 ETHER_BPF_MTAP(ifp, m_head); 1974 } 1975 1976 if (enq > 0) { 1977 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1978 sc->vge_cdata.vge_tx_ring_map, 1979 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1980 /* Issue a transmit command. */ 1981 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 1982 /* 1983 * Set a timeout in case the chip goes out to lunch. 1984 */ 1985 sc->vge_timer = 5; 1986 } 1987 } 1988 1989 static void 1990 vge_init(void *xsc) 1991 { 1992 struct vge_softc *sc = xsc; 1993 1994 VGE_LOCK(sc); 1995 vge_init_locked(sc); 1996 VGE_UNLOCK(sc); 1997 } 1998 1999 static void 2000 vge_init_locked(struct vge_softc *sc) 2001 { 2002 struct ifnet *ifp = sc->vge_ifp; 2003 struct mii_data *mii; 2004 int error, i; 2005 2006 VGE_LOCK_ASSERT(sc); 2007 mii = device_get_softc(sc->vge_miibus); 2008 2009 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2010 return; 2011 2012 /* 2013 * Cancel pending I/O and free all RX/TX buffers. 2014 */ 2015 vge_stop(sc); 2016 vge_reset(sc); 2017 2018 /* 2019 * Initialize the RX and TX descriptors and mbufs. 2020 */ 2021 2022 error = vge_rx_list_init(sc); 2023 if (error != 0) { 2024 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2025 return; 2026 } 2027 vge_tx_list_init(sc); 2028 /* Clear MAC statistics. */ 2029 vge_stats_clear(sc); 2030 /* Set our station address */ 2031 for (i = 0; i < ETHER_ADDR_LEN; i++) 2032 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 2033 2034 /* 2035 * Set receive FIFO threshold. Also allow transmission and 2036 * reception of VLAN tagged frames. 2037 */ 2038 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2039 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2040 2041 /* Set DMA burst length */ 2042 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2043 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2044 2045 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2046 2047 /* Set collision backoff algorithm */ 2048 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2049 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2050 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2051 2052 /* Disable LPSEL field in priority resolution */ 2053 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2054 2055 /* 2056 * Load the addresses of the DMA queues into the chip. 2057 * Note that we only use one transmit queue. 2058 */ 2059 2060 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2061 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2062 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2063 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2064 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2065 2066 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2067 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2068 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2069 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2070 2071 /* Configure interrupt moderation. */ 2072 vge_intr_holdoff(sc); 2073 2074 /* Enable and wake up the RX descriptor queue */ 2075 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2076 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2077 2078 /* Enable the TX descriptor queue */ 2079 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2080 2081 /* Init the cam filter. */ 2082 vge_cam_clear(sc); 2083 2084 /* Set up receiver filter. */ 2085 vge_rxfilter(sc); 2086 vge_setvlan(sc); 2087 2088 /* Enable flow control */ 2089 2090 CSR_WRITE_1(sc, VGE_CRS2, 0x8B); 2091 2092 /* Enable jumbo frame reception (if desired) */ 2093 2094 /* Start the MAC. */ 2095 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2096 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2097 CSR_WRITE_1(sc, VGE_CRS0, 2098 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2099 2100 #ifdef DEVICE_POLLING 2101 /* 2102 * Disable interrupts if we are polling. 2103 */ 2104 if (ifp->if_capenable & IFCAP_POLLING) { 2105 CSR_WRITE_4(sc, VGE_IMR, 0); 2106 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2107 } else /* otherwise ... */ 2108 #endif 2109 { 2110 /* 2111 * Enable interrupts. 2112 */ 2113 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2114 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2115 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2116 } 2117 2118 sc->vge_flags &= ~VGE_FLAG_LINK; 2119 mii_mediachg(mii); 2120 2121 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2122 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2123 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2124 } 2125 2126 /* 2127 * Set media options. 2128 */ 2129 static int 2130 vge_ifmedia_upd(struct ifnet *ifp) 2131 { 2132 struct vge_softc *sc; 2133 struct mii_data *mii; 2134 int error; 2135 2136 sc = ifp->if_softc; 2137 VGE_LOCK(sc); 2138 mii = device_get_softc(sc->vge_miibus); 2139 error = mii_mediachg(mii); 2140 VGE_UNLOCK(sc); 2141 2142 return (error); 2143 } 2144 2145 /* 2146 * Report current media status. 2147 */ 2148 static void 2149 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2150 { 2151 struct vge_softc *sc; 2152 struct mii_data *mii; 2153 2154 sc = ifp->if_softc; 2155 mii = device_get_softc(sc->vge_miibus); 2156 2157 VGE_LOCK(sc); 2158 if ((ifp->if_flags & IFF_UP) == 0) { 2159 VGE_UNLOCK(sc); 2160 return; 2161 } 2162 mii_pollstat(mii); 2163 VGE_UNLOCK(sc); 2164 ifmr->ifm_active = mii->mii_media_active; 2165 ifmr->ifm_status = mii->mii_media_status; 2166 } 2167 2168 static void 2169 vge_miibus_statchg(device_t dev) 2170 { 2171 struct vge_softc *sc; 2172 struct mii_data *mii; 2173 struct ifmedia_entry *ife; 2174 2175 sc = device_get_softc(dev); 2176 mii = device_get_softc(sc->vge_miibus); 2177 ife = mii->mii_media.ifm_cur; 2178 2179 /* 2180 * If the user manually selects a media mode, we need to turn 2181 * on the forced MAC mode bit in the DIAGCTL register. If the 2182 * user happens to choose a full duplex mode, we also need to 2183 * set the 'force full duplex' bit. This applies only to 2184 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2185 * mode is disabled, and in 1000baseT mode, full duplex is 2186 * always implied, so we turn on the forced mode bit but leave 2187 * the FDX bit cleared. 2188 */ 2189 2190 switch (IFM_SUBTYPE(ife->ifm_media)) { 2191 case IFM_AUTO: 2192 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2193 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2194 break; 2195 case IFM_1000_T: 2196 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2197 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2198 break; 2199 case IFM_100_TX: 2200 case IFM_10_T: 2201 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2202 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2203 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2204 } else { 2205 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2206 } 2207 break; 2208 default: 2209 device_printf(dev, "unknown media type: %x\n", 2210 IFM_SUBTYPE(ife->ifm_media)); 2211 break; 2212 } 2213 } 2214 2215 static int 2216 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2217 { 2218 struct vge_softc *sc = ifp->if_softc; 2219 struct ifreq *ifr = (struct ifreq *) data; 2220 struct mii_data *mii; 2221 int error = 0, mask; 2222 2223 switch (command) { 2224 case SIOCSIFMTU: 2225 VGE_LOCK(sc); 2226 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU) 2227 error = EINVAL; 2228 else if (ifp->if_mtu != ifr->ifr_mtu) { 2229 if (ifr->ifr_mtu > ETHERMTU && 2230 (sc->vge_flags & VGE_FLAG_JUMBO) == 0) 2231 error = EINVAL; 2232 else 2233 ifp->if_mtu = ifr->ifr_mtu; 2234 } 2235 VGE_UNLOCK(sc); 2236 break; 2237 case SIOCSIFFLAGS: 2238 VGE_LOCK(sc); 2239 if ((ifp->if_flags & IFF_UP) != 0) { 2240 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2241 ((ifp->if_flags ^ sc->vge_if_flags) & 2242 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2243 vge_rxfilter(sc); 2244 else 2245 vge_init_locked(sc); 2246 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2247 vge_stop(sc); 2248 sc->vge_if_flags = ifp->if_flags; 2249 VGE_UNLOCK(sc); 2250 break; 2251 case SIOCADDMULTI: 2252 case SIOCDELMULTI: 2253 VGE_LOCK(sc); 2254 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2255 vge_rxfilter(sc); 2256 VGE_UNLOCK(sc); 2257 break; 2258 case SIOCGIFMEDIA: 2259 case SIOCSIFMEDIA: 2260 mii = device_get_softc(sc->vge_miibus); 2261 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2262 break; 2263 case SIOCSIFCAP: 2264 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2265 #ifdef DEVICE_POLLING 2266 if (mask & IFCAP_POLLING) { 2267 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2268 error = ether_poll_register(vge_poll, ifp); 2269 if (error) 2270 return (error); 2271 VGE_LOCK(sc); 2272 /* Disable interrupts */ 2273 CSR_WRITE_4(sc, VGE_IMR, 0); 2274 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2275 ifp->if_capenable |= IFCAP_POLLING; 2276 VGE_UNLOCK(sc); 2277 } else { 2278 error = ether_poll_deregister(ifp); 2279 /* Enable interrupts. */ 2280 VGE_LOCK(sc); 2281 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2282 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2283 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2284 ifp->if_capenable &= ~IFCAP_POLLING; 2285 VGE_UNLOCK(sc); 2286 } 2287 } 2288 #endif /* DEVICE_POLLING */ 2289 VGE_LOCK(sc); 2290 if ((mask & IFCAP_TXCSUM) != 0 && 2291 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2292 ifp->if_capenable ^= IFCAP_TXCSUM; 2293 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2294 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2295 else 2296 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2297 } 2298 if ((mask & IFCAP_RXCSUM) != 0 && 2299 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2300 ifp->if_capenable ^= IFCAP_RXCSUM; 2301 if ((mask & IFCAP_WOL_UCAST) != 0 && 2302 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2303 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2304 if ((mask & IFCAP_WOL_MCAST) != 0 && 2305 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2306 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2307 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2308 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2309 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2310 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2311 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2312 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2313 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2314 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2315 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2316 vge_setvlan(sc); 2317 } 2318 VGE_UNLOCK(sc); 2319 VLAN_CAPABILITIES(ifp); 2320 break; 2321 default: 2322 error = ether_ioctl(ifp, command, data); 2323 break; 2324 } 2325 2326 return (error); 2327 } 2328 2329 static void 2330 vge_watchdog(void *arg) 2331 { 2332 struct vge_softc *sc; 2333 struct ifnet *ifp; 2334 2335 sc = arg; 2336 VGE_LOCK_ASSERT(sc); 2337 vge_stats_update(sc); 2338 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2339 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2340 return; 2341 2342 ifp = sc->vge_ifp; 2343 if_printf(ifp, "watchdog timeout\n"); 2344 ifp->if_oerrors++; 2345 2346 vge_txeof(sc); 2347 vge_rxeof(sc, VGE_RX_DESC_CNT); 2348 2349 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2350 vge_init_locked(sc); 2351 } 2352 2353 /* 2354 * Stop the adapter and free any mbufs allocated to the 2355 * RX and TX lists. 2356 */ 2357 static void 2358 vge_stop(struct vge_softc *sc) 2359 { 2360 struct ifnet *ifp; 2361 2362 VGE_LOCK_ASSERT(sc); 2363 ifp = sc->vge_ifp; 2364 sc->vge_timer = 0; 2365 callout_stop(&sc->vge_watchdog); 2366 2367 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2368 2369 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2370 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2371 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2372 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2373 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2374 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2375 2376 vge_stats_update(sc); 2377 VGE_CHAIN_RESET(sc); 2378 vge_txeof(sc); 2379 vge_freebufs(sc); 2380 } 2381 2382 /* 2383 * Device suspend routine. Stop the interface and save some PCI 2384 * settings in case the BIOS doesn't restore them properly on 2385 * resume. 2386 */ 2387 static int 2388 vge_suspend(device_t dev) 2389 { 2390 struct vge_softc *sc; 2391 2392 sc = device_get_softc(dev); 2393 2394 VGE_LOCK(sc); 2395 vge_stop(sc); 2396 vge_setwol(sc); 2397 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2398 VGE_UNLOCK(sc); 2399 2400 return (0); 2401 } 2402 2403 /* 2404 * Device resume routine. Restore some PCI settings in case the BIOS 2405 * doesn't, re-enable busmastering, and restart the interface if 2406 * appropriate. 2407 */ 2408 static int 2409 vge_resume(device_t dev) 2410 { 2411 struct vge_softc *sc; 2412 struct ifnet *ifp; 2413 uint16_t pmstat; 2414 2415 sc = device_get_softc(dev); 2416 VGE_LOCK(sc); 2417 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) { 2418 /* Disable PME and clear PME status. */ 2419 pmstat = pci_read_config(sc->vge_dev, 2420 sc->vge_pmcap + PCIR_POWER_STATUS, 2); 2421 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2422 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2423 pci_write_config(sc->vge_dev, 2424 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2425 } 2426 } 2427 vge_clrwol(sc); 2428 /* Restart MII auto-polling. */ 2429 vge_miipoll_start(sc); 2430 ifp = sc->vge_ifp; 2431 /* Reinitialize interface if necessary. */ 2432 if ((ifp->if_flags & IFF_UP) != 0) { 2433 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2434 vge_init_locked(sc); 2435 } 2436 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2437 VGE_UNLOCK(sc); 2438 2439 return (0); 2440 } 2441 2442 /* 2443 * Stop all chip I/O so that the kernel's probe routines don't 2444 * get confused by errant DMAs when rebooting. 2445 */ 2446 static int 2447 vge_shutdown(device_t dev) 2448 { 2449 2450 return (vge_suspend(dev)); 2451 } 2452 2453 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2454 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2455 2456 static void 2457 vge_sysctl_node(struct vge_softc *sc) 2458 { 2459 struct sysctl_ctx_list *ctx; 2460 struct sysctl_oid_list *child, *parent; 2461 struct sysctl_oid *tree; 2462 struct vge_hw_stats *stats; 2463 2464 stats = &sc->vge_stats; 2465 ctx = device_get_sysctl_ctx(sc->vge_dev); 2466 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2467 2468 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff", 2469 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff"); 2470 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt", 2471 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet"); 2472 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt", 2473 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet"); 2474 2475 /* Pull in device tunables. */ 2476 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT; 2477 resource_int_value(device_get_name(sc->vge_dev), 2478 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff); 2479 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT; 2480 resource_int_value(device_get_name(sc->vge_dev), 2481 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt); 2482 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT; 2483 resource_int_value(device_get_name(sc->vge_dev), 2484 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt); 2485 2486 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2487 NULL, "VGE statistics"); 2488 parent = SYSCTL_CHILDREN(tree); 2489 2490 /* Rx statistics. */ 2491 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2492 NULL, "RX MAC statistics"); 2493 child = SYSCTL_CHILDREN(tree); 2494 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2495 &stats->rx_frames, "frames"); 2496 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2497 &stats->rx_good_frames, "Good frames"); 2498 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2499 &stats->rx_fifo_oflows, "FIFO overflows"); 2500 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2501 &stats->rx_runts, "Too short frames"); 2502 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2503 &stats->rx_runts_errs, "Too short frames with errors"); 2504 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2505 &stats->rx_pkts_64, "64 bytes frames"); 2506 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2507 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2508 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2509 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2510 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2511 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2512 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2513 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2514 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2515 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2516 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2517 &stats->rx_pkts_1519_max, "1519 to max frames"); 2518 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2519 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2520 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2521 &stats->rx_jumbos, "Jumbo frames"); 2522 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2523 &stats->rx_crcerrs, "CRC errors"); 2524 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2525 &stats->rx_pause_frames, "CRC errors"); 2526 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2527 &stats->rx_alignerrs, "Alignment errors"); 2528 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2529 &stats->rx_nobufs, "Frames with no buffer event"); 2530 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2531 &stats->rx_symerrs, "Frames with symbol errors"); 2532 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2533 &stats->rx_lenerrs, "Frames with length mismatched"); 2534 2535 /* Tx statistics. */ 2536 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2537 NULL, "TX MAC statistics"); 2538 child = SYSCTL_CHILDREN(tree); 2539 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2540 &stats->tx_good_frames, "Good frames"); 2541 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2542 &stats->tx_pkts_64, "64 bytes frames"); 2543 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2544 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2545 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2546 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2547 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2548 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2549 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2550 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2551 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2552 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2553 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2554 &stats->tx_jumbos, "Jumbo frames"); 2555 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2556 &stats->tx_colls, "Collisions"); 2557 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2558 &stats->tx_latecolls, "Late collisions"); 2559 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2560 &stats->tx_pause, "Pause frames"); 2561 #ifdef VGE_ENABLE_SQEERR 2562 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2563 &stats->tx_sqeerrs, "SQE errors"); 2564 #endif 2565 /* Clear MAC statistics. */ 2566 vge_stats_clear(sc); 2567 } 2568 2569 #undef VGE_SYSCTL_STAT_ADD32 2570 2571 static void 2572 vge_stats_clear(struct vge_softc *sc) 2573 { 2574 int i; 2575 2576 CSR_WRITE_1(sc, VGE_MIBCSR, 2577 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2578 CSR_WRITE_1(sc, VGE_MIBCSR, 2579 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2580 for (i = VGE_TIMEOUT; i > 0; i--) { 2581 DELAY(1); 2582 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2583 break; 2584 } 2585 if (i == 0) 2586 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2587 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2588 ~VGE_MIBCSR_FREEZE); 2589 } 2590 2591 static void 2592 vge_stats_update(struct vge_softc *sc) 2593 { 2594 struct vge_hw_stats *stats; 2595 struct ifnet *ifp; 2596 uint32_t mib[VGE_MIB_CNT], val; 2597 int i; 2598 2599 VGE_LOCK_ASSERT(sc); 2600 2601 stats = &sc->vge_stats; 2602 ifp = sc->vge_ifp; 2603 2604 CSR_WRITE_1(sc, VGE_MIBCSR, 2605 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2606 for (i = VGE_TIMEOUT; i > 0; i--) { 2607 DELAY(1); 2608 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2609 break; 2610 } 2611 if (i == 0) { 2612 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2613 vge_stats_clear(sc); 2614 return; 2615 } 2616 2617 bzero(mib, sizeof(mib)); 2618 reset_idx: 2619 /* Set MIB read index to 0. */ 2620 CSR_WRITE_1(sc, VGE_MIBCSR, 2621 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2622 for (i = 0; i < VGE_MIB_CNT; i++) { 2623 val = CSR_READ_4(sc, VGE_MIBDATA); 2624 if (i != VGE_MIB_DATA_IDX(val)) { 2625 /* Reading interrupted. */ 2626 goto reset_idx; 2627 } 2628 mib[i] = val & VGE_MIB_DATA_MASK; 2629 } 2630 2631 /* Rx stats. */ 2632 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2633 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2634 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2635 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2636 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2637 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2638 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2639 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2640 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2641 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2642 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2643 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2644 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2645 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2646 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2647 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2648 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2649 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2650 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2651 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2652 2653 /* Tx stats. */ 2654 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2655 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2656 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2657 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2658 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2659 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2660 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2661 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2662 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2663 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2664 #ifdef VGE_ENABLE_SQEERR 2665 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2666 #endif 2667 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2668 2669 /* Update counters in ifnet. */ 2670 ifp->if_opackets += mib[VGE_MIB_TX_GOOD_FRAMES]; 2671 2672 ifp->if_collisions += mib[VGE_MIB_TX_COLLS] + 2673 mib[VGE_MIB_TX_LATECOLLS]; 2674 2675 ifp->if_oerrors += mib[VGE_MIB_TX_COLLS] + 2676 mib[VGE_MIB_TX_LATECOLLS]; 2677 2678 ifp->if_ipackets += mib[VGE_MIB_RX_GOOD_FRAMES]; 2679 2680 ifp->if_ierrors += mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2681 mib[VGE_MIB_RX_RUNTS] + 2682 mib[VGE_MIB_RX_RUNTS_ERRS] + 2683 mib[VGE_MIB_RX_CRCERRS] + 2684 mib[VGE_MIB_RX_ALIGNERRS] + 2685 mib[VGE_MIB_RX_NOBUFS] + 2686 mib[VGE_MIB_RX_SYMERRS] + 2687 mib[VGE_MIB_RX_LENERRS]; 2688 } 2689 2690 static void 2691 vge_intr_holdoff(struct vge_softc *sc) 2692 { 2693 uint8_t intctl; 2694 2695 VGE_LOCK_ASSERT(sc); 2696 2697 /* 2698 * Set Tx interrupt supression threshold. 2699 * It's possible to use single-shot timer in VGE_CRS1 register 2700 * in Tx path such that driver can remove most of Tx completion 2701 * interrupts. However this requires additional access to 2702 * VGE_CRS1 register to reload the timer in addintion to 2703 * activating Tx kick command. Another downside is we don't know 2704 * what single-shot timer value should be used in advance so 2705 * reclaiming transmitted mbufs could be delayed a lot which in 2706 * turn slows down Tx operation. 2707 */ 2708 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR); 2709 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt); 2710 2711 /* Set Rx interrupt suppresion threshold. */ 2712 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2713 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt); 2714 2715 intctl = CSR_READ_1(sc, VGE_INTCTL1); 2716 intctl &= ~VGE_INTCTL_SC_RELOAD; 2717 intctl |= VGE_INTCTL_HC_RELOAD; 2718 if (sc->vge_tx_coal_pkt <= 0) 2719 intctl |= VGE_INTCTL_TXINTSUP_DISABLE; 2720 else 2721 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE; 2722 if (sc->vge_rx_coal_pkt <= 0) 2723 intctl |= VGE_INTCTL_RXINTSUP_DISABLE; 2724 else 2725 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE; 2726 CSR_WRITE_1(sc, VGE_INTCTL1, intctl); 2727 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF); 2728 if (sc->vge_int_holdoff > 0) { 2729 /* Set interrupt holdoff timer. */ 2730 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2731 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 2732 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff)); 2733 /* Enable holdoff timer. */ 2734 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2735 } 2736 } 2737 2738 static void 2739 vge_setlinkspeed(struct vge_softc *sc) 2740 { 2741 struct mii_data *mii; 2742 int aneg, i; 2743 2744 VGE_LOCK_ASSERT(sc); 2745 2746 mii = device_get_softc(sc->vge_miibus); 2747 mii_pollstat(mii); 2748 aneg = 0; 2749 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2750 (IFM_ACTIVE | IFM_AVALID)) { 2751 switch IFM_SUBTYPE(mii->mii_media_active) { 2752 case IFM_10_T: 2753 case IFM_100_TX: 2754 return; 2755 case IFM_1000_T: 2756 aneg++; 2757 default: 2758 break; 2759 } 2760 } 2761 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0); 2762 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR, 2763 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2764 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2765 BMCR_AUTOEN | BMCR_STARTNEG); 2766 DELAY(1000); 2767 if (aneg != 0) { 2768 /* Poll link state until vge(4) get a 10/100 link. */ 2769 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2770 mii_pollstat(mii); 2771 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2772 == (IFM_ACTIVE | IFM_AVALID)) { 2773 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2774 case IFM_10_T: 2775 case IFM_100_TX: 2776 return; 2777 default: 2778 break; 2779 } 2780 } 2781 VGE_UNLOCK(sc); 2782 pause("vgelnk", hz); 2783 VGE_LOCK(sc); 2784 } 2785 if (i == MII_ANEGTICKS_GIGE) 2786 device_printf(sc->vge_dev, "establishing link failed, " 2787 "WOL may not work!"); 2788 } 2789 /* 2790 * No link, force MAC to have 100Mbps, full-duplex link. 2791 * This is the last resort and may/may not work. 2792 */ 2793 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2794 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2795 } 2796 2797 static void 2798 vge_setwol(struct vge_softc *sc) 2799 { 2800 struct ifnet *ifp; 2801 uint16_t pmstat; 2802 uint8_t val; 2803 2804 VGE_LOCK_ASSERT(sc); 2805 2806 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) { 2807 /* No PME capability, PHY power down. */ 2808 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2809 BMCR_PDOWN); 2810 vge_miipoll_stop(sc); 2811 return; 2812 } 2813 2814 ifp = sc->vge_ifp; 2815 2816 /* Clear WOL on pattern match. */ 2817 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2818 /* Disable WOL on magic/unicast packet. */ 2819 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2820 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2821 VGE_WOLCFG_PMEOVR); 2822 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2823 vge_setlinkspeed(sc); 2824 val = 0; 2825 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2826 val |= VGE_WOLCR1_UCAST; 2827 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2828 val |= VGE_WOLCR1_MAGIC; 2829 CSR_WRITE_1(sc, VGE_WOLCR1S, val); 2830 val = 0; 2831 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2832 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB; 2833 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR); 2834 /* Disable MII auto-polling. */ 2835 vge_miipoll_stop(sc); 2836 } 2837 CSR_SETBIT_1(sc, VGE_DIAGCTL, 2838 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE); 2839 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2840 2841 /* Clear WOL status on pattern match. */ 2842 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2843 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2844 2845 val = CSR_READ_1(sc, VGE_PWRSTAT); 2846 val |= VGE_STICKHW_SWPTAG; 2847 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2848 /* Put hardware into sleep. */ 2849 val = CSR_READ_1(sc, VGE_PWRSTAT); 2850 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1; 2851 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2852 /* Request PME if WOL is requested. */ 2853 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap + 2854 PCIR_POWER_STATUS, 2); 2855 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2856 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2857 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2858 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS, 2859 pmstat, 2); 2860 } 2861 2862 static void 2863 vge_clrwol(struct vge_softc *sc) 2864 { 2865 uint8_t val; 2866 2867 val = CSR_READ_1(sc, VGE_PWRSTAT); 2868 val &= ~VGE_STICKHW_SWPTAG; 2869 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2870 /* Disable WOL and clear power state indicator. */ 2871 val = CSR_READ_1(sc, VGE_PWRSTAT); 2872 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); 2873 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2874 2875 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2876 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2877 2878 /* Clear WOL on pattern match. */ 2879 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2880 /* Disable WOL on magic/unicast packet. */ 2881 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2882 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2883 VGE_WOLCFG_PMEOVR); 2884 /* Clear WOL status on pattern match. */ 2885 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2886 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2887 } 2888