1 /*- 2 * Copyright (c) 2004 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44 /* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83 #ifdef HAVE_KERNEL_OPTION_HEADERS 84 #include "opt_device_polling.h" 85 #endif 86 87 #include <sys/param.h> 88 #include <sys/endian.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/module.h> 94 #include <sys/kernel.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/ethernet.h> 101 #include <net/if_dl.h> 102 #include <net/if_var.h> 103 #include <net/if_media.h> 104 #include <net/if_types.h> 105 #include <net/if_vlan_var.h> 106 107 #include <net/bpf.h> 108 109 #include <machine/bus.h> 110 #include <machine/resource.h> 111 #include <sys/bus.h> 112 #include <sys/rman.h> 113 114 #include <dev/mii/mii.h> 115 #include <dev/mii/miivar.h> 116 117 #include <dev/pci/pcireg.h> 118 #include <dev/pci/pcivar.h> 119 120 MODULE_DEPEND(vge, pci, 1, 1, 1); 121 MODULE_DEPEND(vge, ether, 1, 1, 1); 122 MODULE_DEPEND(vge, miibus, 1, 1, 1); 123 124 /* "device miibus" required. See GENERIC if you get errors here. */ 125 #include "miibus_if.h" 126 127 #include <dev/vge/if_vgereg.h> 128 #include <dev/vge/if_vgevar.h> 129 130 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 131 132 /* Tunables */ 133 static int msi_disable = 0; 134 TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 135 136 /* 137 * The SQE error counter of MIB seems to report bogus value. 138 * Vendor's workaround does not seem to work on PCIe based 139 * controllers. Disable it until we find better workaround. 140 */ 141 #undef VGE_ENABLE_SQEERR 142 143 /* 144 * Various supported device vendors/types and their names. 145 */ 146 static struct vge_type vge_devs[] = { 147 { VIA_VENDORID, VIA_DEVICEID_61XX, 148 "VIA Networking Velocity Gigabit Ethernet" }, 149 { 0, 0, NULL } 150 }; 151 152 static int vge_attach(device_t); 153 static int vge_detach(device_t); 154 static int vge_probe(device_t); 155 static int vge_resume(device_t); 156 static int vge_shutdown(device_t); 157 static int vge_suspend(device_t); 158 159 static void vge_cam_clear(struct vge_softc *); 160 static int vge_cam_set(struct vge_softc *, uint8_t *); 161 static void vge_clrwol(struct vge_softc *); 162 static void vge_discard_rxbuf(struct vge_softc *, int); 163 static int vge_dma_alloc(struct vge_softc *); 164 static void vge_dma_free(struct vge_softc *); 165 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 166 #ifdef VGE_EEPROM 167 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 168 #endif 169 static int vge_encap(struct vge_softc *, struct mbuf **); 170 #ifndef __NO_STRICT_ALIGNMENT 171 static __inline void 172 vge_fixup_rx(struct mbuf *); 173 #endif 174 static void vge_freebufs(struct vge_softc *); 175 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176 static int vge_ifmedia_upd(struct ifnet *); 177 static int vge_ifmedia_upd_locked(struct vge_softc *); 178 static void vge_init(void *); 179 static void vge_init_locked(struct vge_softc *); 180 static void vge_intr(void *); 181 static void vge_intr_holdoff(struct vge_softc *); 182 static int vge_ioctl(struct ifnet *, u_long, caddr_t); 183 static void vge_link_statchg(void *); 184 static int vge_miibus_readreg(device_t, int, int); 185 static int vge_miibus_writereg(device_t, int, int, int); 186 static void vge_miipoll_start(struct vge_softc *); 187 static void vge_miipoll_stop(struct vge_softc *); 188 static int vge_newbuf(struct vge_softc *, int); 189 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 190 static void vge_reset(struct vge_softc *); 191 static int vge_rx_list_init(struct vge_softc *); 192 static int vge_rxeof(struct vge_softc *, int); 193 static void vge_rxfilter(struct vge_softc *); 194 static void vge_setmedia(struct vge_softc *); 195 static void vge_setvlan(struct vge_softc *); 196 static void vge_setwol(struct vge_softc *); 197 static void vge_start(struct ifnet *); 198 static void vge_start_locked(struct ifnet *); 199 static void vge_stats_clear(struct vge_softc *); 200 static void vge_stats_update(struct vge_softc *); 201 static void vge_stop(struct vge_softc *); 202 static void vge_sysctl_node(struct vge_softc *); 203 static int vge_tx_list_init(struct vge_softc *); 204 static void vge_txeof(struct vge_softc *); 205 static void vge_watchdog(void *); 206 207 static device_method_t vge_methods[] = { 208 /* Device interface */ 209 DEVMETHOD(device_probe, vge_probe), 210 DEVMETHOD(device_attach, vge_attach), 211 DEVMETHOD(device_detach, vge_detach), 212 DEVMETHOD(device_suspend, vge_suspend), 213 DEVMETHOD(device_resume, vge_resume), 214 DEVMETHOD(device_shutdown, vge_shutdown), 215 216 /* MII interface */ 217 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 218 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 219 220 DEVMETHOD_END 221 }; 222 223 static driver_t vge_driver = { 224 "vge", 225 vge_methods, 226 sizeof(struct vge_softc) 227 }; 228 229 static devclass_t vge_devclass; 230 231 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 232 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 233 234 #ifdef VGE_EEPROM 235 /* 236 * Read a word of data stored in the EEPROM at address 'addr.' 237 */ 238 static void 239 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 240 { 241 int i; 242 uint16_t word = 0; 243 244 /* 245 * Enter EEPROM embedded programming mode. In order to 246 * access the EEPROM at all, we first have to set the 247 * EELOAD bit in the CHIPCFG2 register. 248 */ 249 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 250 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 251 252 /* Select the address of the word we want to read */ 253 CSR_WRITE_1(sc, VGE_EEADDR, addr); 254 255 /* Issue read command */ 256 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 257 258 /* Wait for the done bit to be set. */ 259 for (i = 0; i < VGE_TIMEOUT; i++) { 260 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 261 break; 262 } 263 264 if (i == VGE_TIMEOUT) { 265 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 266 *dest = 0; 267 return; 268 } 269 270 /* Read the result */ 271 word = CSR_READ_2(sc, VGE_EERDDAT); 272 273 /* Turn off EEPROM access mode. */ 274 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 275 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 276 277 *dest = word; 278 } 279 #endif 280 281 /* 282 * Read a sequence of words from the EEPROM. 283 */ 284 static void 285 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 286 { 287 int i; 288 #ifdef VGE_EEPROM 289 uint16_t word = 0, *ptr; 290 291 for (i = 0; i < cnt; i++) { 292 vge_eeprom_getword(sc, off + i, &word); 293 ptr = (uint16_t *)(dest + (i * 2)); 294 if (swap) 295 *ptr = ntohs(word); 296 else 297 *ptr = word; 298 } 299 #else 300 for (i = 0; i < ETHER_ADDR_LEN; i++) 301 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 302 #endif 303 } 304 305 static void 306 vge_miipoll_stop(struct vge_softc *sc) 307 { 308 int i; 309 310 CSR_WRITE_1(sc, VGE_MIICMD, 0); 311 312 for (i = 0; i < VGE_TIMEOUT; i++) { 313 DELAY(1); 314 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 315 break; 316 } 317 318 if (i == VGE_TIMEOUT) 319 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 320 } 321 322 static void 323 vge_miipoll_start(struct vge_softc *sc) 324 { 325 int i; 326 327 /* First, make sure we're idle. */ 328 329 CSR_WRITE_1(sc, VGE_MIICMD, 0); 330 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 331 332 for (i = 0; i < VGE_TIMEOUT; i++) { 333 DELAY(1); 334 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 335 break; 336 } 337 338 if (i == VGE_TIMEOUT) { 339 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 340 return; 341 } 342 343 /* Now enable auto poll mode. */ 344 345 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 346 347 /* And make sure it started. */ 348 349 for (i = 0; i < VGE_TIMEOUT; i++) { 350 DELAY(1); 351 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 352 break; 353 } 354 355 if (i == VGE_TIMEOUT) 356 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 357 } 358 359 static int 360 vge_miibus_readreg(device_t dev, int phy, int reg) 361 { 362 struct vge_softc *sc; 363 int i; 364 uint16_t rval = 0; 365 366 sc = device_get_softc(dev); 367 368 vge_miipoll_stop(sc); 369 370 /* Specify the register we want to read. */ 371 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 372 373 /* Issue read command. */ 374 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 375 376 /* Wait for the read command bit to self-clear. */ 377 for (i = 0; i < VGE_TIMEOUT; i++) { 378 DELAY(1); 379 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 380 break; 381 } 382 383 if (i == VGE_TIMEOUT) 384 device_printf(sc->vge_dev, "MII read timed out\n"); 385 else 386 rval = CSR_READ_2(sc, VGE_MIIDATA); 387 388 vge_miipoll_start(sc); 389 390 return (rval); 391 } 392 393 static int 394 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 395 { 396 struct vge_softc *sc; 397 int i, rval = 0; 398 399 sc = device_get_softc(dev); 400 401 vge_miipoll_stop(sc); 402 403 /* Specify the register we want to write. */ 404 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 405 406 /* Specify the data we want to write. */ 407 CSR_WRITE_2(sc, VGE_MIIDATA, data); 408 409 /* Issue write command. */ 410 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 411 412 /* Wait for the write command bit to self-clear. */ 413 for (i = 0; i < VGE_TIMEOUT; i++) { 414 DELAY(1); 415 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 416 break; 417 } 418 419 if (i == VGE_TIMEOUT) { 420 device_printf(sc->vge_dev, "MII write timed out\n"); 421 rval = EIO; 422 } 423 424 vge_miipoll_start(sc); 425 426 return (rval); 427 } 428 429 static void 430 vge_cam_clear(struct vge_softc *sc) 431 { 432 int i; 433 434 /* 435 * Turn off all the mask bits. This tells the chip 436 * that none of the entries in the CAM filter are valid. 437 * desired entries will be enabled as we fill the filter in. 438 */ 439 440 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 441 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 442 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 443 for (i = 0; i < 8; i++) 444 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 445 446 /* Clear the VLAN filter too. */ 447 448 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 449 for (i = 0; i < 8; i++) 450 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 451 452 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 453 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 454 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 455 456 sc->vge_camidx = 0; 457 } 458 459 static int 460 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 461 { 462 int i, error = 0; 463 464 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 465 return (ENOSPC); 466 467 /* Select the CAM data page. */ 468 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 469 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 470 471 /* Set the filter entry we want to update and enable writing. */ 472 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 473 474 /* Write the address to the CAM registers */ 475 for (i = 0; i < ETHER_ADDR_LEN; i++) 476 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 477 478 /* Issue a write command. */ 479 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 480 481 /* Wake for it to clear. */ 482 for (i = 0; i < VGE_TIMEOUT; i++) { 483 DELAY(1); 484 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 485 break; 486 } 487 488 if (i == VGE_TIMEOUT) { 489 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 490 error = EIO; 491 goto fail; 492 } 493 494 /* Select the CAM mask page. */ 495 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 496 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 497 498 /* Set the mask bit that enables this filter. */ 499 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 500 1<<(sc->vge_camidx & 7)); 501 502 sc->vge_camidx++; 503 504 fail: 505 /* Turn off access to CAM. */ 506 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 507 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 508 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 509 510 return (error); 511 } 512 513 static void 514 vge_setvlan(struct vge_softc *sc) 515 { 516 struct ifnet *ifp; 517 uint8_t cfg; 518 519 VGE_LOCK_ASSERT(sc); 520 521 ifp = sc->vge_ifp; 522 cfg = CSR_READ_1(sc, VGE_RXCFG); 523 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 524 cfg |= VGE_VTAG_OPT2; 525 else 526 cfg &= ~VGE_VTAG_OPT2; 527 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 528 } 529 530 /* 531 * Program the multicast filter. We use the 64-entry CAM filter 532 * for perfect filtering. If there's more than 64 multicast addresses, 533 * we use the hash filter instead. 534 */ 535 static void 536 vge_rxfilter(struct vge_softc *sc) 537 { 538 struct ifnet *ifp; 539 struct ifmultiaddr *ifma; 540 uint32_t h, hashes[2]; 541 uint8_t rxcfg; 542 int error = 0; 543 544 VGE_LOCK_ASSERT(sc); 545 546 /* First, zot all the multicast entries. */ 547 hashes[0] = 0; 548 hashes[1] = 0; 549 550 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 551 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 552 VGE_RXCTL_RX_PROMISC); 553 /* 554 * Always allow VLAN oversized frames and frames for 555 * this host. 556 */ 557 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 558 559 ifp = sc->vge_ifp; 560 if ((ifp->if_flags & IFF_BROADCAST) != 0) 561 rxcfg |= VGE_RXCTL_RX_BCAST; 562 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 563 if ((ifp->if_flags & IFF_PROMISC) != 0) 564 rxcfg |= VGE_RXCTL_RX_PROMISC; 565 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 566 hashes[0] = 0xFFFFFFFF; 567 hashes[1] = 0xFFFFFFFF; 568 } 569 goto done; 570 } 571 572 vge_cam_clear(sc); 573 /* Now program new ones */ 574 if_maddr_rlock(ifp); 575 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 576 if (ifma->ifma_addr->sa_family != AF_LINK) 577 continue; 578 error = vge_cam_set(sc, 579 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 580 if (error) 581 break; 582 } 583 584 /* If there were too many addresses, use the hash filter. */ 585 if (error) { 586 vge_cam_clear(sc); 587 588 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 589 if (ifma->ifma_addr->sa_family != AF_LINK) 590 continue; 591 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 592 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 593 if (h < 32) 594 hashes[0] |= (1 << h); 595 else 596 hashes[1] |= (1 << (h - 32)); 597 } 598 } 599 if_maddr_runlock(ifp); 600 601 done: 602 if (hashes[0] != 0 || hashes[1] != 0) 603 rxcfg |= VGE_RXCTL_RX_MCAST; 604 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 605 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 606 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 607 } 608 609 static void 610 vge_reset(struct vge_softc *sc) 611 { 612 int i; 613 614 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 615 616 for (i = 0; i < VGE_TIMEOUT; i++) { 617 DELAY(5); 618 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 619 break; 620 } 621 622 if (i == VGE_TIMEOUT) { 623 device_printf(sc->vge_dev, "soft reset timed out\n"); 624 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 625 DELAY(2000); 626 } 627 628 DELAY(5000); 629 } 630 631 /* 632 * Probe for a VIA gigabit chip. Check the PCI vendor and device 633 * IDs against our list and return a device name if we find a match. 634 */ 635 static int 636 vge_probe(device_t dev) 637 { 638 struct vge_type *t; 639 640 t = vge_devs; 641 642 while (t->vge_name != NULL) { 643 if ((pci_get_vendor(dev) == t->vge_vid) && 644 (pci_get_device(dev) == t->vge_did)) { 645 device_set_desc(dev, t->vge_name); 646 return (BUS_PROBE_DEFAULT); 647 } 648 t++; 649 } 650 651 return (ENXIO); 652 } 653 654 /* 655 * Map a single buffer address. 656 */ 657 658 struct vge_dmamap_arg { 659 bus_addr_t vge_busaddr; 660 }; 661 662 static void 663 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 664 { 665 struct vge_dmamap_arg *ctx; 666 667 if (error != 0) 668 return; 669 670 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 671 672 ctx = (struct vge_dmamap_arg *)arg; 673 ctx->vge_busaddr = segs[0].ds_addr; 674 } 675 676 static int 677 vge_dma_alloc(struct vge_softc *sc) 678 { 679 struct vge_dmamap_arg ctx; 680 struct vge_txdesc *txd; 681 struct vge_rxdesc *rxd; 682 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 683 int error, i; 684 685 /* 686 * It seems old PCI controllers do not support DAC. DAC 687 * configuration can be enabled by accessing VGE_CHIPCFG3 688 * register but honor EEPROM configuration instead of 689 * blindly overriding DAC configuration. PCIe based 690 * controllers are supposed to support 64bit DMA so enable 691 * 64bit DMA on these controllers. 692 */ 693 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 694 lowaddr = BUS_SPACE_MAXADDR; 695 else 696 lowaddr = BUS_SPACE_MAXADDR_32BIT; 697 698 again: 699 /* Create parent ring tag. */ 700 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 701 1, 0, /* algnmnt, boundary */ 702 lowaddr, /* lowaddr */ 703 BUS_SPACE_MAXADDR, /* highaddr */ 704 NULL, NULL, /* filter, filterarg */ 705 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 706 0, /* nsegments */ 707 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 708 0, /* flags */ 709 NULL, NULL, /* lockfunc, lockarg */ 710 &sc->vge_cdata.vge_ring_tag); 711 if (error != 0) { 712 device_printf(sc->vge_dev, 713 "could not create parent DMA tag.\n"); 714 goto fail; 715 } 716 717 /* Create tag for Tx ring. */ 718 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 719 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 720 BUS_SPACE_MAXADDR, /* lowaddr */ 721 BUS_SPACE_MAXADDR, /* highaddr */ 722 NULL, NULL, /* filter, filterarg */ 723 VGE_TX_LIST_SZ, /* maxsize */ 724 1, /* nsegments */ 725 VGE_TX_LIST_SZ, /* maxsegsize */ 726 0, /* flags */ 727 NULL, NULL, /* lockfunc, lockarg */ 728 &sc->vge_cdata.vge_tx_ring_tag); 729 if (error != 0) { 730 device_printf(sc->vge_dev, 731 "could not allocate Tx ring DMA tag.\n"); 732 goto fail; 733 } 734 735 /* Create tag for Rx ring. */ 736 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 737 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 738 BUS_SPACE_MAXADDR, /* lowaddr */ 739 BUS_SPACE_MAXADDR, /* highaddr */ 740 NULL, NULL, /* filter, filterarg */ 741 VGE_RX_LIST_SZ, /* maxsize */ 742 1, /* nsegments */ 743 VGE_RX_LIST_SZ, /* maxsegsize */ 744 0, /* flags */ 745 NULL, NULL, /* lockfunc, lockarg */ 746 &sc->vge_cdata.vge_rx_ring_tag); 747 if (error != 0) { 748 device_printf(sc->vge_dev, 749 "could not allocate Rx ring DMA tag.\n"); 750 goto fail; 751 } 752 753 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 754 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 755 (void **)&sc->vge_rdata.vge_tx_ring, 756 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 757 &sc->vge_cdata.vge_tx_ring_map); 758 if (error != 0) { 759 device_printf(sc->vge_dev, 760 "could not allocate DMA'able memory for Tx ring.\n"); 761 goto fail; 762 } 763 764 ctx.vge_busaddr = 0; 765 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 766 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 767 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 768 if (error != 0 || ctx.vge_busaddr == 0) { 769 device_printf(sc->vge_dev, 770 "could not load DMA'able memory for Tx ring.\n"); 771 goto fail; 772 } 773 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 774 775 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 776 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 777 (void **)&sc->vge_rdata.vge_rx_ring, 778 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 779 &sc->vge_cdata.vge_rx_ring_map); 780 if (error != 0) { 781 device_printf(sc->vge_dev, 782 "could not allocate DMA'able memory for Rx ring.\n"); 783 goto fail; 784 } 785 786 ctx.vge_busaddr = 0; 787 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 788 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 789 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 790 if (error != 0 || ctx.vge_busaddr == 0) { 791 device_printf(sc->vge_dev, 792 "could not load DMA'able memory for Rx ring.\n"); 793 goto fail; 794 } 795 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 796 797 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 798 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 799 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 800 if ((VGE_ADDR_HI(tx_ring_end) != 801 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 802 (VGE_ADDR_HI(rx_ring_end) != 803 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 804 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 805 device_printf(sc->vge_dev, "4GB boundary crossed, " 806 "switching to 32bit DMA address mode.\n"); 807 vge_dma_free(sc); 808 /* Limit DMA address space to 32bit and try again. */ 809 lowaddr = BUS_SPACE_MAXADDR_32BIT; 810 goto again; 811 } 812 813 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 814 lowaddr = VGE_BUF_DMA_MAXADDR; 815 else 816 lowaddr = BUS_SPACE_MAXADDR_32BIT; 817 /* Create parent buffer tag. */ 818 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 819 1, 0, /* algnmnt, boundary */ 820 lowaddr, /* lowaddr */ 821 BUS_SPACE_MAXADDR, /* highaddr */ 822 NULL, NULL, /* filter, filterarg */ 823 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 824 0, /* nsegments */ 825 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 826 0, /* flags */ 827 NULL, NULL, /* lockfunc, lockarg */ 828 &sc->vge_cdata.vge_buffer_tag); 829 if (error != 0) { 830 device_printf(sc->vge_dev, 831 "could not create parent buffer DMA tag.\n"); 832 goto fail; 833 } 834 835 /* Create tag for Tx buffers. */ 836 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 837 1, 0, /* algnmnt, boundary */ 838 BUS_SPACE_MAXADDR, /* lowaddr */ 839 BUS_SPACE_MAXADDR, /* highaddr */ 840 NULL, NULL, /* filter, filterarg */ 841 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 842 VGE_MAXTXSEGS, /* nsegments */ 843 MCLBYTES, /* maxsegsize */ 844 0, /* flags */ 845 NULL, NULL, /* lockfunc, lockarg */ 846 &sc->vge_cdata.vge_tx_tag); 847 if (error != 0) { 848 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 849 goto fail; 850 } 851 852 /* Create tag for Rx buffers. */ 853 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 854 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 855 BUS_SPACE_MAXADDR, /* lowaddr */ 856 BUS_SPACE_MAXADDR, /* highaddr */ 857 NULL, NULL, /* filter, filterarg */ 858 MCLBYTES, /* maxsize */ 859 1, /* nsegments */ 860 MCLBYTES, /* maxsegsize */ 861 0, /* flags */ 862 NULL, NULL, /* lockfunc, lockarg */ 863 &sc->vge_cdata.vge_rx_tag); 864 if (error != 0) { 865 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 866 goto fail; 867 } 868 869 /* Create DMA maps for Tx buffers. */ 870 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 871 txd = &sc->vge_cdata.vge_txdesc[i]; 872 txd->tx_m = NULL; 873 txd->tx_dmamap = NULL; 874 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 875 &txd->tx_dmamap); 876 if (error != 0) { 877 device_printf(sc->vge_dev, 878 "could not create Tx dmamap.\n"); 879 goto fail; 880 } 881 } 882 /* Create DMA maps for Rx buffers. */ 883 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 884 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 885 device_printf(sc->vge_dev, 886 "could not create spare Rx dmamap.\n"); 887 goto fail; 888 } 889 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 890 rxd = &sc->vge_cdata.vge_rxdesc[i]; 891 rxd->rx_m = NULL; 892 rxd->rx_dmamap = NULL; 893 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 894 &rxd->rx_dmamap); 895 if (error != 0) { 896 device_printf(sc->vge_dev, 897 "could not create Rx dmamap.\n"); 898 goto fail; 899 } 900 } 901 902 fail: 903 return (error); 904 } 905 906 static void 907 vge_dma_free(struct vge_softc *sc) 908 { 909 struct vge_txdesc *txd; 910 struct vge_rxdesc *rxd; 911 int i; 912 913 /* Tx ring. */ 914 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 915 if (sc->vge_rdata.vge_tx_ring_paddr) 916 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 917 sc->vge_cdata.vge_tx_ring_map); 918 if (sc->vge_rdata.vge_tx_ring) 919 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 920 sc->vge_rdata.vge_tx_ring, 921 sc->vge_cdata.vge_tx_ring_map); 922 sc->vge_rdata.vge_tx_ring = NULL; 923 sc->vge_rdata.vge_tx_ring_paddr = 0; 924 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 925 sc->vge_cdata.vge_tx_ring_tag = NULL; 926 } 927 /* Rx ring. */ 928 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 929 if (sc->vge_rdata.vge_rx_ring_paddr) 930 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 931 sc->vge_cdata.vge_rx_ring_map); 932 if (sc->vge_rdata.vge_rx_ring) 933 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 934 sc->vge_rdata.vge_rx_ring, 935 sc->vge_cdata.vge_rx_ring_map); 936 sc->vge_rdata.vge_rx_ring = NULL; 937 sc->vge_rdata.vge_rx_ring_paddr = 0; 938 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 939 sc->vge_cdata.vge_rx_ring_tag = NULL; 940 } 941 /* Tx buffers. */ 942 if (sc->vge_cdata.vge_tx_tag != NULL) { 943 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 944 txd = &sc->vge_cdata.vge_txdesc[i]; 945 if (txd->tx_dmamap != NULL) { 946 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 947 txd->tx_dmamap); 948 txd->tx_dmamap = NULL; 949 } 950 } 951 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 952 sc->vge_cdata.vge_tx_tag = NULL; 953 } 954 /* Rx buffers. */ 955 if (sc->vge_cdata.vge_rx_tag != NULL) { 956 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 957 rxd = &sc->vge_cdata.vge_rxdesc[i]; 958 if (rxd->rx_dmamap != NULL) { 959 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 960 rxd->rx_dmamap); 961 rxd->rx_dmamap = NULL; 962 } 963 } 964 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 965 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 966 sc->vge_cdata.vge_rx_sparemap); 967 sc->vge_cdata.vge_rx_sparemap = NULL; 968 } 969 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 970 sc->vge_cdata.vge_rx_tag = NULL; 971 } 972 973 if (sc->vge_cdata.vge_buffer_tag != NULL) { 974 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 975 sc->vge_cdata.vge_buffer_tag = NULL; 976 } 977 if (sc->vge_cdata.vge_ring_tag != NULL) { 978 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 979 sc->vge_cdata.vge_ring_tag = NULL; 980 } 981 } 982 983 /* 984 * Attach the interface. Allocate softc structures, do ifmedia 985 * setup and ethernet/BPF attach. 986 */ 987 static int 988 vge_attach(device_t dev) 989 { 990 u_char eaddr[ETHER_ADDR_LEN]; 991 struct vge_softc *sc; 992 struct ifnet *ifp; 993 int error = 0, cap, i, msic, rid; 994 995 sc = device_get_softc(dev); 996 sc->vge_dev = dev; 997 998 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 999 MTX_DEF); 1000 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 1001 1002 /* 1003 * Map control/status registers. 1004 */ 1005 pci_enable_busmaster(dev); 1006 1007 rid = PCIR_BAR(1); 1008 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1009 RF_ACTIVE); 1010 1011 if (sc->vge_res == NULL) { 1012 device_printf(dev, "couldn't map ports/memory\n"); 1013 error = ENXIO; 1014 goto fail; 1015 } 1016 1017 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 1018 sc->vge_flags |= VGE_FLAG_PCIE; 1019 sc->vge_expcap = cap; 1020 } else 1021 sc->vge_flags |= VGE_FLAG_JUMBO; 1022 if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) { 1023 sc->vge_flags |= VGE_FLAG_PMCAP; 1024 sc->vge_pmcap = cap; 1025 } 1026 rid = 0; 1027 msic = pci_msi_count(dev); 1028 if (msi_disable == 0 && msic > 0) { 1029 msic = 1; 1030 if (pci_alloc_msi(dev, &msic) == 0) { 1031 if (msic == 1) { 1032 sc->vge_flags |= VGE_FLAG_MSI; 1033 device_printf(dev, "Using %d MSI message\n", 1034 msic); 1035 rid = 1; 1036 } else 1037 pci_release_msi(dev); 1038 } 1039 } 1040 1041 /* Allocate interrupt */ 1042 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1043 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1044 if (sc->vge_irq == NULL) { 1045 device_printf(dev, "couldn't map interrupt\n"); 1046 error = ENXIO; 1047 goto fail; 1048 } 1049 1050 /* Reset the adapter. */ 1051 vge_reset(sc); 1052 /* Reload EEPROM. */ 1053 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1054 for (i = 0; i < VGE_TIMEOUT; i++) { 1055 DELAY(5); 1056 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1057 break; 1058 } 1059 if (i == VGE_TIMEOUT) 1060 device_printf(dev, "EEPROM reload timed out\n"); 1061 /* 1062 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1063 * MAC will receive magic packet which in turn confuses 1064 * controller. 1065 */ 1066 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1067 1068 /* 1069 * Get station address from the EEPROM. 1070 */ 1071 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1072 /* 1073 * Save configured PHY address. 1074 * It seems the PHY address of PCIe controllers just 1075 * reflects media jump strapping status so we assume the 1076 * internal PHY address of PCIe controller is at 1. 1077 */ 1078 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1079 sc->vge_phyaddr = 1; 1080 else 1081 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1082 VGE_MIICFG_PHYADDR; 1083 /* Clear WOL and take hardware from powerdown. */ 1084 vge_clrwol(sc); 1085 vge_sysctl_node(sc); 1086 error = vge_dma_alloc(sc); 1087 if (error) 1088 goto fail; 1089 1090 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1091 if (ifp == NULL) { 1092 device_printf(dev, "can not if_alloc()\n"); 1093 error = ENOSPC; 1094 goto fail; 1095 } 1096 1097 vge_miipoll_start(sc); 1098 /* Do MII setup */ 1099 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd, 1100 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY, 1101 MIIF_DOPAUSE); 1102 if (error != 0) { 1103 device_printf(dev, "attaching PHYs failed\n"); 1104 goto fail; 1105 } 1106 1107 ifp->if_softc = sc; 1108 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1109 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1110 ifp->if_ioctl = vge_ioctl; 1111 ifp->if_capabilities = IFCAP_VLAN_MTU; 1112 ifp->if_start = vge_start; 1113 ifp->if_hwassist = VGE_CSUM_FEATURES; 1114 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1115 IFCAP_VLAN_HWTAGGING; 1116 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) 1117 ifp->if_capabilities |= IFCAP_WOL; 1118 ifp->if_capenable = ifp->if_capabilities; 1119 #ifdef DEVICE_POLLING 1120 ifp->if_capabilities |= IFCAP_POLLING; 1121 #endif 1122 ifp->if_init = vge_init; 1123 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1); 1124 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1; 1125 IFQ_SET_READY(&ifp->if_snd); 1126 1127 /* 1128 * Call MI attach routine. 1129 */ 1130 ether_ifattach(ifp, eaddr); 1131 1132 /* Tell the upper layer(s) we support long frames. */ 1133 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1134 1135 /* Hook interrupt last to avoid having to lock softc */ 1136 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1137 NULL, vge_intr, sc, &sc->vge_intrhand); 1138 1139 if (error) { 1140 device_printf(dev, "couldn't set up irq\n"); 1141 ether_ifdetach(ifp); 1142 goto fail; 1143 } 1144 1145 fail: 1146 if (error) 1147 vge_detach(dev); 1148 1149 return (error); 1150 } 1151 1152 /* 1153 * Shutdown hardware and free up resources. This can be called any 1154 * time after the mutex has been initialized. It is called in both 1155 * the error case in attach and the normal detach case so it needs 1156 * to be careful about only freeing resources that have actually been 1157 * allocated. 1158 */ 1159 static int 1160 vge_detach(device_t dev) 1161 { 1162 struct vge_softc *sc; 1163 struct ifnet *ifp; 1164 1165 sc = device_get_softc(dev); 1166 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1167 ifp = sc->vge_ifp; 1168 1169 #ifdef DEVICE_POLLING 1170 if (ifp->if_capenable & IFCAP_POLLING) 1171 ether_poll_deregister(ifp); 1172 #endif 1173 1174 /* These should only be active if attach succeeded */ 1175 if (device_is_attached(dev)) { 1176 ether_ifdetach(ifp); 1177 VGE_LOCK(sc); 1178 vge_stop(sc); 1179 VGE_UNLOCK(sc); 1180 callout_drain(&sc->vge_watchdog); 1181 } 1182 if (sc->vge_miibus) 1183 device_delete_child(dev, sc->vge_miibus); 1184 bus_generic_detach(dev); 1185 1186 if (sc->vge_intrhand) 1187 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1188 if (sc->vge_irq) 1189 bus_release_resource(dev, SYS_RES_IRQ, 1190 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1191 if (sc->vge_flags & VGE_FLAG_MSI) 1192 pci_release_msi(dev); 1193 if (sc->vge_res) 1194 bus_release_resource(dev, SYS_RES_MEMORY, 1195 PCIR_BAR(1), sc->vge_res); 1196 if (ifp) 1197 if_free(ifp); 1198 1199 vge_dma_free(sc); 1200 mtx_destroy(&sc->vge_mtx); 1201 1202 return (0); 1203 } 1204 1205 static void 1206 vge_discard_rxbuf(struct vge_softc *sc, int prod) 1207 { 1208 struct vge_rxdesc *rxd; 1209 int i; 1210 1211 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1212 rxd->rx_desc->vge_sts = 0; 1213 rxd->rx_desc->vge_ctl = 0; 1214 1215 /* 1216 * Note: the manual fails to document the fact that for 1217 * proper opration, the driver needs to replentish the RX 1218 * DMA ring 4 descriptors at a time (rather than one at a 1219 * time, like most chips). We can allocate the new buffers 1220 * but we should not set the OWN bits until we're ready 1221 * to hand back 4 of them in one shot. 1222 */ 1223 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1224 for (i = VGE_RXCHUNK; i > 0; i--) { 1225 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1226 rxd = rxd->rxd_prev; 1227 } 1228 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1229 } 1230 } 1231 1232 static int 1233 vge_newbuf(struct vge_softc *sc, int prod) 1234 { 1235 struct vge_rxdesc *rxd; 1236 struct mbuf *m; 1237 bus_dma_segment_t segs[1]; 1238 bus_dmamap_t map; 1239 int i, nsegs; 1240 1241 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1242 if (m == NULL) 1243 return (ENOBUFS); 1244 /* 1245 * This is part of an evil trick to deal with strict-alignment 1246 * architectures. The VIA chip requires RX buffers to be aligned 1247 * on 32-bit boundaries, but that will hose strict-alignment 1248 * architectures. To get around this, we leave some empty space 1249 * at the start of each buffer and for non-strict-alignment hosts, 1250 * we copy the buffer back two bytes to achieve word alignment. 1251 * This is slightly more efficient than allocating a new buffer, 1252 * copying the contents, and discarding the old buffer. 1253 */ 1254 m->m_len = m->m_pkthdr.len = MCLBYTES; 1255 m_adj(m, VGE_RX_BUF_ALIGN); 1256 1257 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1258 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1259 m_freem(m); 1260 return (ENOBUFS); 1261 } 1262 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1263 1264 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1265 if (rxd->rx_m != NULL) { 1266 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1267 BUS_DMASYNC_POSTREAD); 1268 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1269 } 1270 map = rxd->rx_dmamap; 1271 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1272 sc->vge_cdata.vge_rx_sparemap = map; 1273 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1274 BUS_DMASYNC_PREREAD); 1275 rxd->rx_m = m; 1276 1277 rxd->rx_desc->vge_sts = 0; 1278 rxd->rx_desc->vge_ctl = 0; 1279 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1280 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1281 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1282 1283 /* 1284 * Note: the manual fails to document the fact that for 1285 * proper operation, the driver needs to replenish the RX 1286 * DMA ring 4 descriptors at a time (rather than one at a 1287 * time, like most chips). We can allocate the new buffers 1288 * but we should not set the OWN bits until we're ready 1289 * to hand back 4 of them in one shot. 1290 */ 1291 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1292 for (i = VGE_RXCHUNK; i > 0; i--) { 1293 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1294 rxd = rxd->rxd_prev; 1295 } 1296 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1297 } 1298 1299 return (0); 1300 } 1301 1302 static int 1303 vge_tx_list_init(struct vge_softc *sc) 1304 { 1305 struct vge_ring_data *rd; 1306 struct vge_txdesc *txd; 1307 int i; 1308 1309 VGE_LOCK_ASSERT(sc); 1310 1311 sc->vge_cdata.vge_tx_prodidx = 0; 1312 sc->vge_cdata.vge_tx_considx = 0; 1313 sc->vge_cdata.vge_tx_cnt = 0; 1314 1315 rd = &sc->vge_rdata; 1316 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1317 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1318 txd = &sc->vge_cdata.vge_txdesc[i]; 1319 txd->tx_m = NULL; 1320 txd->tx_desc = &rd->vge_tx_ring[i]; 1321 } 1322 1323 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1324 sc->vge_cdata.vge_tx_ring_map, 1325 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1326 1327 return (0); 1328 } 1329 1330 static int 1331 vge_rx_list_init(struct vge_softc *sc) 1332 { 1333 struct vge_ring_data *rd; 1334 struct vge_rxdesc *rxd; 1335 int i; 1336 1337 VGE_LOCK_ASSERT(sc); 1338 1339 sc->vge_cdata.vge_rx_prodidx = 0; 1340 sc->vge_cdata.vge_head = NULL; 1341 sc->vge_cdata.vge_tail = NULL; 1342 sc->vge_cdata.vge_rx_commit = 0; 1343 1344 rd = &sc->vge_rdata; 1345 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1346 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1347 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1348 rxd->rx_m = NULL; 1349 rxd->rx_desc = &rd->vge_rx_ring[i]; 1350 if (i == 0) 1351 rxd->rxd_prev = 1352 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1353 else 1354 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1355 if (vge_newbuf(sc, i) != 0) 1356 return (ENOBUFS); 1357 } 1358 1359 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1360 sc->vge_cdata.vge_rx_ring_map, 1361 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1362 1363 sc->vge_cdata.vge_rx_commit = 0; 1364 1365 return (0); 1366 } 1367 1368 static void 1369 vge_freebufs(struct vge_softc *sc) 1370 { 1371 struct vge_txdesc *txd; 1372 struct vge_rxdesc *rxd; 1373 struct ifnet *ifp; 1374 int i; 1375 1376 VGE_LOCK_ASSERT(sc); 1377 1378 ifp = sc->vge_ifp; 1379 /* 1380 * Free RX and TX mbufs still in the queues. 1381 */ 1382 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1383 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1384 if (rxd->rx_m != NULL) { 1385 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1386 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1387 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1388 rxd->rx_dmamap); 1389 m_freem(rxd->rx_m); 1390 rxd->rx_m = NULL; 1391 } 1392 } 1393 1394 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1395 txd = &sc->vge_cdata.vge_txdesc[i]; 1396 if (txd->tx_m != NULL) { 1397 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1398 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1399 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1400 txd->tx_dmamap); 1401 m_freem(txd->tx_m); 1402 txd->tx_m = NULL; 1403 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1404 } 1405 } 1406 } 1407 1408 #ifndef __NO_STRICT_ALIGNMENT 1409 static __inline void 1410 vge_fixup_rx(struct mbuf *m) 1411 { 1412 int i; 1413 uint16_t *src, *dst; 1414 1415 src = mtod(m, uint16_t *); 1416 dst = src - 1; 1417 1418 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1419 *dst++ = *src++; 1420 1421 m->m_data -= ETHER_ALIGN; 1422 } 1423 #endif 1424 1425 /* 1426 * RX handler. We support the reception of jumbo frames that have 1427 * been fragmented across multiple 2K mbuf cluster buffers. 1428 */ 1429 static int 1430 vge_rxeof(struct vge_softc *sc, int count) 1431 { 1432 struct mbuf *m; 1433 struct ifnet *ifp; 1434 int prod, prog, total_len; 1435 struct vge_rxdesc *rxd; 1436 struct vge_rx_desc *cur_rx; 1437 uint32_t rxstat, rxctl; 1438 1439 VGE_LOCK_ASSERT(sc); 1440 1441 ifp = sc->vge_ifp; 1442 1443 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1444 sc->vge_cdata.vge_rx_ring_map, 1445 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1446 1447 prod = sc->vge_cdata.vge_rx_prodidx; 1448 for (prog = 0; count > 0 && 1449 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1450 VGE_RX_DESC_INC(prod)) { 1451 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1452 rxstat = le32toh(cur_rx->vge_sts); 1453 if ((rxstat & VGE_RDSTS_OWN) != 0) 1454 break; 1455 count--; 1456 prog++; 1457 rxctl = le32toh(cur_rx->vge_ctl); 1458 total_len = VGE_RXBYTES(rxstat); 1459 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1460 m = rxd->rx_m; 1461 1462 /* 1463 * If the 'start of frame' bit is set, this indicates 1464 * either the first fragment in a multi-fragment receive, 1465 * or an intermediate fragment. Either way, we want to 1466 * accumulate the buffers. 1467 */ 1468 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1469 if (vge_newbuf(sc, prod) != 0) { 1470 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1471 VGE_CHAIN_RESET(sc); 1472 vge_discard_rxbuf(sc, prod); 1473 continue; 1474 } 1475 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1476 if (sc->vge_cdata.vge_head == NULL) { 1477 sc->vge_cdata.vge_head = m; 1478 sc->vge_cdata.vge_tail = m; 1479 } else { 1480 m->m_flags &= ~M_PKTHDR; 1481 sc->vge_cdata.vge_tail->m_next = m; 1482 sc->vge_cdata.vge_tail = m; 1483 } 1484 continue; 1485 } 1486 1487 /* 1488 * Bad/error frames will have the RXOK bit cleared. 1489 * However, there's one error case we want to allow: 1490 * if a VLAN tagged frame arrives and the chip can't 1491 * match it against the CAM filter, it considers this 1492 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1493 * We don't want to drop the frame though: our VLAN 1494 * filtering is done in software. 1495 * We also want to receive bad-checksummed frames and 1496 * and frames with bad-length. 1497 */ 1498 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1499 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1500 VGE_RDSTS_CSUMERR)) == 0) { 1501 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1502 /* 1503 * If this is part of a multi-fragment packet, 1504 * discard all the pieces. 1505 */ 1506 VGE_CHAIN_RESET(sc); 1507 vge_discard_rxbuf(sc, prod); 1508 continue; 1509 } 1510 1511 if (vge_newbuf(sc, prod) != 0) { 1512 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1513 VGE_CHAIN_RESET(sc); 1514 vge_discard_rxbuf(sc, prod); 1515 continue; 1516 } 1517 1518 /* Chain received mbufs. */ 1519 if (sc->vge_cdata.vge_head != NULL) { 1520 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1521 /* 1522 * Special case: if there's 4 bytes or less 1523 * in this buffer, the mbuf can be discarded: 1524 * the last 4 bytes is the CRC, which we don't 1525 * care about anyway. 1526 */ 1527 if (m->m_len <= ETHER_CRC_LEN) { 1528 sc->vge_cdata.vge_tail->m_len -= 1529 (ETHER_CRC_LEN - m->m_len); 1530 m_freem(m); 1531 } else { 1532 m->m_len -= ETHER_CRC_LEN; 1533 m->m_flags &= ~M_PKTHDR; 1534 sc->vge_cdata.vge_tail->m_next = m; 1535 } 1536 m = sc->vge_cdata.vge_head; 1537 m->m_flags |= M_PKTHDR; 1538 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1539 } else { 1540 m->m_flags |= M_PKTHDR; 1541 m->m_pkthdr.len = m->m_len = 1542 (total_len - ETHER_CRC_LEN); 1543 } 1544 1545 #ifndef __NO_STRICT_ALIGNMENT 1546 vge_fixup_rx(m); 1547 #endif 1548 m->m_pkthdr.rcvif = ifp; 1549 1550 /* Do RX checksumming if enabled */ 1551 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1552 (rxctl & VGE_RDCTL_FRAG) == 0) { 1553 /* Check IP header checksum */ 1554 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1555 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1556 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1557 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1558 1559 /* Check TCP/UDP checksum */ 1560 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1561 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1562 m->m_pkthdr.csum_flags |= 1563 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1564 m->m_pkthdr.csum_data = 0xffff; 1565 } 1566 } 1567 1568 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1569 /* 1570 * The 32-bit rxctl register is stored in little-endian. 1571 * However, the 16-bit vlan tag is stored in big-endian, 1572 * so we have to byte swap it. 1573 */ 1574 m->m_pkthdr.ether_vtag = 1575 bswap16(rxctl & VGE_RDCTL_VLANID); 1576 m->m_flags |= M_VLANTAG; 1577 } 1578 1579 VGE_UNLOCK(sc); 1580 (*ifp->if_input)(ifp, m); 1581 VGE_LOCK(sc); 1582 sc->vge_cdata.vge_head = NULL; 1583 sc->vge_cdata.vge_tail = NULL; 1584 } 1585 1586 if (prog > 0) { 1587 sc->vge_cdata.vge_rx_prodidx = prod; 1588 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1589 sc->vge_cdata.vge_rx_ring_map, 1590 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1591 /* Update residue counter. */ 1592 if (sc->vge_cdata.vge_rx_commit != 0) { 1593 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1594 sc->vge_cdata.vge_rx_commit); 1595 sc->vge_cdata.vge_rx_commit = 0; 1596 } 1597 } 1598 return (prog); 1599 } 1600 1601 static void 1602 vge_txeof(struct vge_softc *sc) 1603 { 1604 struct ifnet *ifp; 1605 struct vge_tx_desc *cur_tx; 1606 struct vge_txdesc *txd; 1607 uint32_t txstat; 1608 int cons, prod; 1609 1610 VGE_LOCK_ASSERT(sc); 1611 1612 ifp = sc->vge_ifp; 1613 1614 if (sc->vge_cdata.vge_tx_cnt == 0) 1615 return; 1616 1617 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1618 sc->vge_cdata.vge_tx_ring_map, 1619 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1620 1621 /* 1622 * Go through our tx list and free mbufs for those 1623 * frames that have been transmitted. 1624 */ 1625 cons = sc->vge_cdata.vge_tx_considx; 1626 prod = sc->vge_cdata.vge_tx_prodidx; 1627 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1628 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1629 txstat = le32toh(cur_tx->vge_sts); 1630 if ((txstat & VGE_TDSTS_OWN) != 0) 1631 break; 1632 sc->vge_cdata.vge_tx_cnt--; 1633 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1634 1635 txd = &sc->vge_cdata.vge_txdesc[cons]; 1636 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1637 BUS_DMASYNC_POSTWRITE); 1638 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1639 1640 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1641 __func__)); 1642 m_freem(txd->tx_m); 1643 txd->tx_m = NULL; 1644 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1645 } 1646 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1647 sc->vge_cdata.vge_tx_ring_map, 1648 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1649 sc->vge_cdata.vge_tx_considx = cons; 1650 if (sc->vge_cdata.vge_tx_cnt == 0) 1651 sc->vge_timer = 0; 1652 } 1653 1654 static void 1655 vge_link_statchg(void *xsc) 1656 { 1657 struct vge_softc *sc; 1658 struct ifnet *ifp; 1659 uint8_t physts; 1660 1661 sc = xsc; 1662 ifp = sc->vge_ifp; 1663 VGE_LOCK_ASSERT(sc); 1664 1665 physts = CSR_READ_1(sc, VGE_PHYSTS0); 1666 if ((physts & VGE_PHYSTS_RESETSTS) == 0) { 1667 if ((physts & VGE_PHYSTS_LINK) == 0) { 1668 sc->vge_flags &= ~VGE_FLAG_LINK; 1669 if_link_state_change(sc->vge_ifp, 1670 LINK_STATE_DOWN); 1671 } else { 1672 sc->vge_flags |= VGE_FLAG_LINK; 1673 if_link_state_change(sc->vge_ifp, 1674 LINK_STATE_UP); 1675 CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE | 1676 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1677 if ((physts & VGE_PHYSTS_FDX) != 0) { 1678 if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0) 1679 CSR_WRITE_1(sc, VGE_CRS2, 1680 VGE_CR2_FDX_TXFLOWCTL_ENABLE); 1681 if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0) 1682 CSR_WRITE_1(sc, VGE_CRS2, 1683 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1684 } 1685 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1686 vge_start_locked(ifp); 1687 } 1688 } 1689 /* 1690 * Restart MII auto-polling because link state change interrupt 1691 * will disable it. 1692 */ 1693 vge_miipoll_start(sc); 1694 } 1695 1696 #ifdef DEVICE_POLLING 1697 static int 1698 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1699 { 1700 struct vge_softc *sc = ifp->if_softc; 1701 int rx_npkts = 0; 1702 1703 VGE_LOCK(sc); 1704 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1705 goto done; 1706 1707 rx_npkts = vge_rxeof(sc, count); 1708 vge_txeof(sc); 1709 1710 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1711 vge_start_locked(ifp); 1712 1713 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1714 uint32_t status; 1715 status = CSR_READ_4(sc, VGE_ISR); 1716 if (status == 0xFFFFFFFF) 1717 goto done; 1718 if (status) 1719 CSR_WRITE_4(sc, VGE_ISR, status); 1720 1721 /* 1722 * XXX check behaviour on receiver stalls. 1723 */ 1724 1725 if (status & VGE_ISR_TXDMA_STALL || 1726 status & VGE_ISR_RXDMA_STALL) { 1727 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1728 vge_init_locked(sc); 1729 } 1730 1731 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1732 vge_rxeof(sc, count); 1733 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1734 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1735 } 1736 } 1737 done: 1738 VGE_UNLOCK(sc); 1739 return (rx_npkts); 1740 } 1741 #endif /* DEVICE_POLLING */ 1742 1743 static void 1744 vge_intr(void *arg) 1745 { 1746 struct vge_softc *sc; 1747 struct ifnet *ifp; 1748 uint32_t status; 1749 1750 sc = arg; 1751 VGE_LOCK(sc); 1752 1753 ifp = sc->vge_ifp; 1754 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1755 (ifp->if_flags & IFF_UP) == 0) { 1756 VGE_UNLOCK(sc); 1757 return; 1758 } 1759 1760 #ifdef DEVICE_POLLING 1761 if (ifp->if_capenable & IFCAP_POLLING) { 1762 status = CSR_READ_4(sc, VGE_ISR); 1763 CSR_WRITE_4(sc, VGE_ISR, status); 1764 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0) 1765 vge_link_statchg(sc); 1766 VGE_UNLOCK(sc); 1767 return; 1768 } 1769 #endif 1770 1771 /* Disable interrupts */ 1772 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1773 status = CSR_READ_4(sc, VGE_ISR); 1774 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD); 1775 /* If the card has gone away the read returns 0xffff. */ 1776 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0) 1777 goto done; 1778 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1779 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1780 vge_rxeof(sc, VGE_RX_DESC_CNT); 1781 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1782 vge_rxeof(sc, VGE_RX_DESC_CNT); 1783 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1784 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1785 } 1786 1787 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO)) 1788 vge_txeof(sc); 1789 1790 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1791 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1792 vge_init_locked(sc); 1793 } 1794 1795 if (status & VGE_ISR_LINKSTS) 1796 vge_link_statchg(sc); 1797 } 1798 done: 1799 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1800 /* Re-enable interrupts */ 1801 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1802 1803 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1804 vge_start_locked(ifp); 1805 } 1806 VGE_UNLOCK(sc); 1807 } 1808 1809 static int 1810 vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1811 { 1812 struct vge_txdesc *txd; 1813 struct vge_tx_frag *frag; 1814 struct mbuf *m; 1815 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1816 int error, i, nsegs, padlen; 1817 uint32_t cflags; 1818 1819 VGE_LOCK_ASSERT(sc); 1820 1821 M_ASSERTPKTHDR((*m_head)); 1822 1823 /* Argh. This chip does not autopad short frames. */ 1824 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1825 m = *m_head; 1826 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1827 if (M_WRITABLE(m) == 0) { 1828 /* Get a writable copy. */ 1829 m = m_dup(*m_head, M_NOWAIT); 1830 m_freem(*m_head); 1831 if (m == NULL) { 1832 *m_head = NULL; 1833 return (ENOBUFS); 1834 } 1835 *m_head = m; 1836 } 1837 if (M_TRAILINGSPACE(m) < padlen) { 1838 m = m_defrag(m, M_NOWAIT); 1839 if (m == NULL) { 1840 m_freem(*m_head); 1841 *m_head = NULL; 1842 return (ENOBUFS); 1843 } 1844 } 1845 /* 1846 * Manually pad short frames, and zero the pad space 1847 * to avoid leaking data. 1848 */ 1849 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1850 m->m_pkthdr.len += padlen; 1851 m->m_len = m->m_pkthdr.len; 1852 *m_head = m; 1853 } 1854 1855 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1856 1857 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1858 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1859 if (error == EFBIG) { 1860 m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS); 1861 if (m == NULL) { 1862 m_freem(*m_head); 1863 *m_head = NULL; 1864 return (ENOMEM); 1865 } 1866 *m_head = m; 1867 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1868 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1869 if (error != 0) { 1870 m_freem(*m_head); 1871 *m_head = NULL; 1872 return (error); 1873 } 1874 } else if (error != 0) 1875 return (error); 1876 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1877 BUS_DMASYNC_PREWRITE); 1878 1879 m = *m_head; 1880 cflags = 0; 1881 1882 /* Configure checksum offload. */ 1883 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1884 cflags |= VGE_TDCTL_IPCSUM; 1885 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1886 cflags |= VGE_TDCTL_TCPCSUM; 1887 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1888 cflags |= VGE_TDCTL_UDPCSUM; 1889 1890 /* Configure VLAN. */ 1891 if ((m->m_flags & M_VLANTAG) != 0) 1892 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1893 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1894 /* 1895 * XXX 1896 * Velocity family seems to support TSO but no information 1897 * for MSS configuration is available. Also the number of 1898 * fragments supported by a descriptor is too small to hold 1899 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1900 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1901 * longer chain of buffers but no additional information is 1902 * available. 1903 * 1904 * When telling the chip how many segments there are, we 1905 * must use nsegs + 1 instead of just nsegs. Darned if I 1906 * know why. This also means we can't use the last fragment 1907 * field of Tx descriptor. 1908 */ 1909 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1910 VGE_TD_LS_NORM); 1911 for (i = 0; i < nsegs; i++) { 1912 frag = &txd->tx_desc->vge_frag[i]; 1913 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1914 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1915 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1916 } 1917 1918 sc->vge_cdata.vge_tx_cnt++; 1919 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1920 1921 /* 1922 * Finally request interrupt and give the first descriptor 1923 * ownership to hardware. 1924 */ 1925 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1926 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1927 txd->tx_m = m; 1928 1929 return (0); 1930 } 1931 1932 /* 1933 * Main transmit routine. 1934 */ 1935 1936 static void 1937 vge_start(struct ifnet *ifp) 1938 { 1939 struct vge_softc *sc; 1940 1941 sc = ifp->if_softc; 1942 VGE_LOCK(sc); 1943 vge_start_locked(ifp); 1944 VGE_UNLOCK(sc); 1945 } 1946 1947 1948 static void 1949 vge_start_locked(struct ifnet *ifp) 1950 { 1951 struct vge_softc *sc; 1952 struct vge_txdesc *txd; 1953 struct mbuf *m_head; 1954 int enq, idx; 1955 1956 sc = ifp->if_softc; 1957 1958 VGE_LOCK_ASSERT(sc); 1959 1960 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1961 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1962 IFF_DRV_RUNNING) 1963 return; 1964 1965 idx = sc->vge_cdata.vge_tx_prodidx; 1966 VGE_TX_DESC_DEC(idx); 1967 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1968 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1969 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1970 if (m_head == NULL) 1971 break; 1972 /* 1973 * Pack the data into the transmit ring. If we 1974 * don't have room, set the OACTIVE flag and wait 1975 * for the NIC to drain the ring. 1976 */ 1977 if (vge_encap(sc, &m_head)) { 1978 if (m_head == NULL) 1979 break; 1980 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1981 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1982 break; 1983 } 1984 1985 txd = &sc->vge_cdata.vge_txdesc[idx]; 1986 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1987 VGE_TX_DESC_INC(idx); 1988 1989 enq++; 1990 /* 1991 * If there's a BPF listener, bounce a copy of this frame 1992 * to him. 1993 */ 1994 ETHER_BPF_MTAP(ifp, m_head); 1995 } 1996 1997 if (enq > 0) { 1998 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1999 sc->vge_cdata.vge_tx_ring_map, 2000 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2001 /* Issue a transmit command. */ 2002 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 2003 /* 2004 * Set a timeout in case the chip goes out to lunch. 2005 */ 2006 sc->vge_timer = 5; 2007 } 2008 } 2009 2010 static void 2011 vge_init(void *xsc) 2012 { 2013 struct vge_softc *sc = xsc; 2014 2015 VGE_LOCK(sc); 2016 vge_init_locked(sc); 2017 VGE_UNLOCK(sc); 2018 } 2019 2020 static void 2021 vge_init_locked(struct vge_softc *sc) 2022 { 2023 struct ifnet *ifp = sc->vge_ifp; 2024 int error, i; 2025 2026 VGE_LOCK_ASSERT(sc); 2027 2028 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2029 return; 2030 2031 /* 2032 * Cancel pending I/O and free all RX/TX buffers. 2033 */ 2034 vge_stop(sc); 2035 vge_reset(sc); 2036 vge_miipoll_start(sc); 2037 2038 /* 2039 * Initialize the RX and TX descriptors and mbufs. 2040 */ 2041 2042 error = vge_rx_list_init(sc); 2043 if (error != 0) { 2044 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2045 return; 2046 } 2047 vge_tx_list_init(sc); 2048 /* Clear MAC statistics. */ 2049 vge_stats_clear(sc); 2050 /* Set our station address */ 2051 for (i = 0; i < ETHER_ADDR_LEN; i++) 2052 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 2053 2054 /* 2055 * Set receive FIFO threshold. Also allow transmission and 2056 * reception of VLAN tagged frames. 2057 */ 2058 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2059 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2060 2061 /* Set DMA burst length */ 2062 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2063 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2064 2065 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2066 2067 /* Set collision backoff algorithm */ 2068 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2069 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2070 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2071 2072 /* Disable LPSEL field in priority resolution */ 2073 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2074 2075 /* 2076 * Load the addresses of the DMA queues into the chip. 2077 * Note that we only use one transmit queue. 2078 */ 2079 2080 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2081 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2082 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2083 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2084 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2085 2086 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2087 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2088 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2089 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2090 2091 /* Configure interrupt moderation. */ 2092 vge_intr_holdoff(sc); 2093 2094 /* Enable and wake up the RX descriptor queue */ 2095 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2096 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2097 2098 /* Enable the TX descriptor queue */ 2099 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2100 2101 /* Init the cam filter. */ 2102 vge_cam_clear(sc); 2103 2104 /* Set up receiver filter. */ 2105 vge_rxfilter(sc); 2106 vge_setvlan(sc); 2107 2108 /* Initialize pause timer. */ 2109 CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF); 2110 /* 2111 * Initialize flow control parameters. 2112 * TX XON high threshold : 48 2113 * TX pause low threshold : 24 2114 * Disable hald-duplex flow control 2115 */ 2116 CSR_WRITE_1(sc, VGE_CRC2, 0xFF); 2117 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B); 2118 2119 /* Enable jumbo frame reception (if desired) */ 2120 2121 /* Start the MAC. */ 2122 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2123 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2124 CSR_WRITE_1(sc, VGE_CRS0, 2125 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2126 2127 #ifdef DEVICE_POLLING 2128 /* 2129 * Disable interrupts except link state change if we are polling. 2130 */ 2131 if (ifp->if_capenable & IFCAP_POLLING) { 2132 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2133 } else /* otherwise ... */ 2134 #endif 2135 { 2136 /* 2137 * Enable interrupts. 2138 */ 2139 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2140 } 2141 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2142 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2143 2144 sc->vge_flags &= ~VGE_FLAG_LINK; 2145 vge_ifmedia_upd_locked(sc); 2146 2147 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2148 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2149 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2150 } 2151 2152 /* 2153 * Set media options. 2154 */ 2155 static int 2156 vge_ifmedia_upd(struct ifnet *ifp) 2157 { 2158 struct vge_softc *sc; 2159 int error; 2160 2161 sc = ifp->if_softc; 2162 VGE_LOCK(sc); 2163 error = vge_ifmedia_upd_locked(sc); 2164 VGE_UNLOCK(sc); 2165 2166 return (error); 2167 } 2168 2169 static int 2170 vge_ifmedia_upd_locked(struct vge_softc *sc) 2171 { 2172 struct mii_data *mii; 2173 struct mii_softc *miisc; 2174 int error; 2175 2176 mii = device_get_softc(sc->vge_miibus); 2177 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2178 PHY_RESET(miisc); 2179 vge_setmedia(sc); 2180 error = mii_mediachg(mii); 2181 2182 return (error); 2183 } 2184 2185 /* 2186 * Report current media status. 2187 */ 2188 static void 2189 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2190 { 2191 struct vge_softc *sc; 2192 struct mii_data *mii; 2193 2194 sc = ifp->if_softc; 2195 mii = device_get_softc(sc->vge_miibus); 2196 2197 VGE_LOCK(sc); 2198 if ((ifp->if_flags & IFF_UP) == 0) { 2199 VGE_UNLOCK(sc); 2200 return; 2201 } 2202 mii_pollstat(mii); 2203 ifmr->ifm_active = mii->mii_media_active; 2204 ifmr->ifm_status = mii->mii_media_status; 2205 VGE_UNLOCK(sc); 2206 } 2207 2208 static void 2209 vge_setmedia(struct vge_softc *sc) 2210 { 2211 struct mii_data *mii; 2212 struct ifmedia_entry *ife; 2213 2214 mii = device_get_softc(sc->vge_miibus); 2215 ife = mii->mii_media.ifm_cur; 2216 2217 /* 2218 * If the user manually selects a media mode, we need to turn 2219 * on the forced MAC mode bit in the DIAGCTL register. If the 2220 * user happens to choose a full duplex mode, we also need to 2221 * set the 'force full duplex' bit. This applies only to 2222 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2223 * mode is disabled, and in 1000baseT mode, full duplex is 2224 * always implied, so we turn on the forced mode bit but leave 2225 * the FDX bit cleared. 2226 */ 2227 2228 switch (IFM_SUBTYPE(ife->ifm_media)) { 2229 case IFM_AUTO: 2230 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2231 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2232 break; 2233 case IFM_1000_T: 2234 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2235 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2236 break; 2237 case IFM_100_TX: 2238 case IFM_10_T: 2239 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2240 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2241 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2242 } else { 2243 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2244 } 2245 break; 2246 default: 2247 device_printf(sc->vge_dev, "unknown media type: %x\n", 2248 IFM_SUBTYPE(ife->ifm_media)); 2249 break; 2250 } 2251 } 2252 2253 static int 2254 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2255 { 2256 struct vge_softc *sc = ifp->if_softc; 2257 struct ifreq *ifr = (struct ifreq *) data; 2258 struct mii_data *mii; 2259 int error = 0, mask; 2260 2261 switch (command) { 2262 case SIOCSIFMTU: 2263 VGE_LOCK(sc); 2264 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU) 2265 error = EINVAL; 2266 else if (ifp->if_mtu != ifr->ifr_mtu) { 2267 if (ifr->ifr_mtu > ETHERMTU && 2268 (sc->vge_flags & VGE_FLAG_JUMBO) == 0) 2269 error = EINVAL; 2270 else 2271 ifp->if_mtu = ifr->ifr_mtu; 2272 } 2273 VGE_UNLOCK(sc); 2274 break; 2275 case SIOCSIFFLAGS: 2276 VGE_LOCK(sc); 2277 if ((ifp->if_flags & IFF_UP) != 0) { 2278 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2279 ((ifp->if_flags ^ sc->vge_if_flags) & 2280 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2281 vge_rxfilter(sc); 2282 else 2283 vge_init_locked(sc); 2284 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2285 vge_stop(sc); 2286 sc->vge_if_flags = ifp->if_flags; 2287 VGE_UNLOCK(sc); 2288 break; 2289 case SIOCADDMULTI: 2290 case SIOCDELMULTI: 2291 VGE_LOCK(sc); 2292 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2293 vge_rxfilter(sc); 2294 VGE_UNLOCK(sc); 2295 break; 2296 case SIOCGIFMEDIA: 2297 case SIOCSIFMEDIA: 2298 mii = device_get_softc(sc->vge_miibus); 2299 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2300 break; 2301 case SIOCSIFCAP: 2302 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2303 #ifdef DEVICE_POLLING 2304 if (mask & IFCAP_POLLING) { 2305 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2306 error = ether_poll_register(vge_poll, ifp); 2307 if (error) 2308 return (error); 2309 VGE_LOCK(sc); 2310 /* Disable interrupts */ 2311 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2312 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2313 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2314 ifp->if_capenable |= IFCAP_POLLING; 2315 VGE_UNLOCK(sc); 2316 } else { 2317 error = ether_poll_deregister(ifp); 2318 /* Enable interrupts. */ 2319 VGE_LOCK(sc); 2320 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2321 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2322 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2323 ifp->if_capenable &= ~IFCAP_POLLING; 2324 VGE_UNLOCK(sc); 2325 } 2326 } 2327 #endif /* DEVICE_POLLING */ 2328 VGE_LOCK(sc); 2329 if ((mask & IFCAP_TXCSUM) != 0 && 2330 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2331 ifp->if_capenable ^= IFCAP_TXCSUM; 2332 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2333 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2334 else 2335 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2336 } 2337 if ((mask & IFCAP_RXCSUM) != 0 && 2338 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2339 ifp->if_capenable ^= IFCAP_RXCSUM; 2340 if ((mask & IFCAP_WOL_UCAST) != 0 && 2341 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2342 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2343 if ((mask & IFCAP_WOL_MCAST) != 0 && 2344 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2345 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2346 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2347 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2348 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2349 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2350 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2351 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2352 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2353 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2354 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2355 vge_setvlan(sc); 2356 } 2357 VGE_UNLOCK(sc); 2358 VLAN_CAPABILITIES(ifp); 2359 break; 2360 default: 2361 error = ether_ioctl(ifp, command, data); 2362 break; 2363 } 2364 2365 return (error); 2366 } 2367 2368 static void 2369 vge_watchdog(void *arg) 2370 { 2371 struct vge_softc *sc; 2372 struct ifnet *ifp; 2373 2374 sc = arg; 2375 VGE_LOCK_ASSERT(sc); 2376 vge_stats_update(sc); 2377 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2378 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2379 return; 2380 2381 ifp = sc->vge_ifp; 2382 if_printf(ifp, "watchdog timeout\n"); 2383 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2384 2385 vge_txeof(sc); 2386 vge_rxeof(sc, VGE_RX_DESC_CNT); 2387 2388 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2389 vge_init_locked(sc); 2390 } 2391 2392 /* 2393 * Stop the adapter and free any mbufs allocated to the 2394 * RX and TX lists. 2395 */ 2396 static void 2397 vge_stop(struct vge_softc *sc) 2398 { 2399 struct ifnet *ifp; 2400 2401 VGE_LOCK_ASSERT(sc); 2402 ifp = sc->vge_ifp; 2403 sc->vge_timer = 0; 2404 callout_stop(&sc->vge_watchdog); 2405 2406 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2407 2408 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2409 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2410 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2411 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2412 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2413 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2414 2415 vge_stats_update(sc); 2416 VGE_CHAIN_RESET(sc); 2417 vge_txeof(sc); 2418 vge_freebufs(sc); 2419 } 2420 2421 /* 2422 * Device suspend routine. Stop the interface and save some PCI 2423 * settings in case the BIOS doesn't restore them properly on 2424 * resume. 2425 */ 2426 static int 2427 vge_suspend(device_t dev) 2428 { 2429 struct vge_softc *sc; 2430 2431 sc = device_get_softc(dev); 2432 2433 VGE_LOCK(sc); 2434 vge_stop(sc); 2435 vge_setwol(sc); 2436 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2437 VGE_UNLOCK(sc); 2438 2439 return (0); 2440 } 2441 2442 /* 2443 * Device resume routine. Restore some PCI settings in case the BIOS 2444 * doesn't, re-enable busmastering, and restart the interface if 2445 * appropriate. 2446 */ 2447 static int 2448 vge_resume(device_t dev) 2449 { 2450 struct vge_softc *sc; 2451 struct ifnet *ifp; 2452 uint16_t pmstat; 2453 2454 sc = device_get_softc(dev); 2455 VGE_LOCK(sc); 2456 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) { 2457 /* Disable PME and clear PME status. */ 2458 pmstat = pci_read_config(sc->vge_dev, 2459 sc->vge_pmcap + PCIR_POWER_STATUS, 2); 2460 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2461 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2462 pci_write_config(sc->vge_dev, 2463 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2464 } 2465 } 2466 vge_clrwol(sc); 2467 /* Restart MII auto-polling. */ 2468 vge_miipoll_start(sc); 2469 ifp = sc->vge_ifp; 2470 /* Reinitialize interface if necessary. */ 2471 if ((ifp->if_flags & IFF_UP) != 0) { 2472 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2473 vge_init_locked(sc); 2474 } 2475 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2476 VGE_UNLOCK(sc); 2477 2478 return (0); 2479 } 2480 2481 /* 2482 * Stop all chip I/O so that the kernel's probe routines don't 2483 * get confused by errant DMAs when rebooting. 2484 */ 2485 static int 2486 vge_shutdown(device_t dev) 2487 { 2488 2489 return (vge_suspend(dev)); 2490 } 2491 2492 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2493 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2494 2495 static void 2496 vge_sysctl_node(struct vge_softc *sc) 2497 { 2498 struct sysctl_ctx_list *ctx; 2499 struct sysctl_oid_list *child, *parent; 2500 struct sysctl_oid *tree; 2501 struct vge_hw_stats *stats; 2502 2503 stats = &sc->vge_stats; 2504 ctx = device_get_sysctl_ctx(sc->vge_dev); 2505 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2506 2507 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff", 2508 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff"); 2509 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt", 2510 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet"); 2511 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt", 2512 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet"); 2513 2514 /* Pull in device tunables. */ 2515 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT; 2516 resource_int_value(device_get_name(sc->vge_dev), 2517 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff); 2518 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT; 2519 resource_int_value(device_get_name(sc->vge_dev), 2520 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt); 2521 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT; 2522 resource_int_value(device_get_name(sc->vge_dev), 2523 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt); 2524 2525 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2526 NULL, "VGE statistics"); 2527 parent = SYSCTL_CHILDREN(tree); 2528 2529 /* Rx statistics. */ 2530 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2531 NULL, "RX MAC statistics"); 2532 child = SYSCTL_CHILDREN(tree); 2533 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2534 &stats->rx_frames, "frames"); 2535 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2536 &stats->rx_good_frames, "Good frames"); 2537 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2538 &stats->rx_fifo_oflows, "FIFO overflows"); 2539 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2540 &stats->rx_runts, "Too short frames"); 2541 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2542 &stats->rx_runts_errs, "Too short frames with errors"); 2543 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2544 &stats->rx_pkts_64, "64 bytes frames"); 2545 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2546 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2547 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2548 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2549 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2550 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2551 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2552 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2553 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2554 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2555 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2556 &stats->rx_pkts_1519_max, "1519 to max frames"); 2557 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2558 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2559 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2560 &stats->rx_jumbos, "Jumbo frames"); 2561 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2562 &stats->rx_crcerrs, "CRC errors"); 2563 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2564 &stats->rx_pause_frames, "CRC errors"); 2565 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2566 &stats->rx_alignerrs, "Alignment errors"); 2567 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2568 &stats->rx_nobufs, "Frames with no buffer event"); 2569 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2570 &stats->rx_symerrs, "Frames with symbol errors"); 2571 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2572 &stats->rx_lenerrs, "Frames with length mismatched"); 2573 2574 /* Tx statistics. */ 2575 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2576 NULL, "TX MAC statistics"); 2577 child = SYSCTL_CHILDREN(tree); 2578 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2579 &stats->tx_good_frames, "Good frames"); 2580 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2581 &stats->tx_pkts_64, "64 bytes frames"); 2582 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2583 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2584 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2585 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2586 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2587 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2588 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2589 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2590 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2591 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2592 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2593 &stats->tx_jumbos, "Jumbo frames"); 2594 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2595 &stats->tx_colls, "Collisions"); 2596 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2597 &stats->tx_latecolls, "Late collisions"); 2598 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2599 &stats->tx_pause, "Pause frames"); 2600 #ifdef VGE_ENABLE_SQEERR 2601 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2602 &stats->tx_sqeerrs, "SQE errors"); 2603 #endif 2604 /* Clear MAC statistics. */ 2605 vge_stats_clear(sc); 2606 } 2607 2608 #undef VGE_SYSCTL_STAT_ADD32 2609 2610 static void 2611 vge_stats_clear(struct vge_softc *sc) 2612 { 2613 int i; 2614 2615 CSR_WRITE_1(sc, VGE_MIBCSR, 2616 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2617 CSR_WRITE_1(sc, VGE_MIBCSR, 2618 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2619 for (i = VGE_TIMEOUT; i > 0; i--) { 2620 DELAY(1); 2621 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2622 break; 2623 } 2624 if (i == 0) 2625 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2626 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2627 ~VGE_MIBCSR_FREEZE); 2628 } 2629 2630 static void 2631 vge_stats_update(struct vge_softc *sc) 2632 { 2633 struct vge_hw_stats *stats; 2634 struct ifnet *ifp; 2635 uint32_t mib[VGE_MIB_CNT], val; 2636 int i; 2637 2638 VGE_LOCK_ASSERT(sc); 2639 2640 stats = &sc->vge_stats; 2641 ifp = sc->vge_ifp; 2642 2643 CSR_WRITE_1(sc, VGE_MIBCSR, 2644 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2645 for (i = VGE_TIMEOUT; i > 0; i--) { 2646 DELAY(1); 2647 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2648 break; 2649 } 2650 if (i == 0) { 2651 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2652 vge_stats_clear(sc); 2653 return; 2654 } 2655 2656 bzero(mib, sizeof(mib)); 2657 reset_idx: 2658 /* Set MIB read index to 0. */ 2659 CSR_WRITE_1(sc, VGE_MIBCSR, 2660 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2661 for (i = 0; i < VGE_MIB_CNT; i++) { 2662 val = CSR_READ_4(sc, VGE_MIBDATA); 2663 if (i != VGE_MIB_DATA_IDX(val)) { 2664 /* Reading interrupted. */ 2665 goto reset_idx; 2666 } 2667 mib[i] = val & VGE_MIB_DATA_MASK; 2668 } 2669 2670 /* Rx stats. */ 2671 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2672 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2673 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2674 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2675 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2676 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2677 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2678 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2679 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2680 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2681 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2682 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2683 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2684 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2685 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2686 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2687 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2688 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2689 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2690 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2691 2692 /* Tx stats. */ 2693 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2694 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2695 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2696 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2697 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2698 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2699 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2700 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2701 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2702 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2703 #ifdef VGE_ENABLE_SQEERR 2704 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2705 #endif 2706 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2707 2708 /* Update counters in ifnet. */ 2709 if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]); 2710 2711 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 2712 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2713 2714 if_inc_counter(ifp, IFCOUNTER_OERRORS, 2715 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2716 2717 if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]); 2718 2719 if_inc_counter(ifp, IFCOUNTER_IERRORS, 2720 mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2721 mib[VGE_MIB_RX_RUNTS] + 2722 mib[VGE_MIB_RX_RUNTS_ERRS] + 2723 mib[VGE_MIB_RX_CRCERRS] + 2724 mib[VGE_MIB_RX_ALIGNERRS] + 2725 mib[VGE_MIB_RX_NOBUFS] + 2726 mib[VGE_MIB_RX_SYMERRS] + 2727 mib[VGE_MIB_RX_LENERRS]); 2728 } 2729 2730 static void 2731 vge_intr_holdoff(struct vge_softc *sc) 2732 { 2733 uint8_t intctl; 2734 2735 VGE_LOCK_ASSERT(sc); 2736 2737 /* 2738 * Set Tx interrupt supression threshold. 2739 * It's possible to use single-shot timer in VGE_CRS1 register 2740 * in Tx path such that driver can remove most of Tx completion 2741 * interrupts. However this requires additional access to 2742 * VGE_CRS1 register to reload the timer in addintion to 2743 * activating Tx kick command. Another downside is we don't know 2744 * what single-shot timer value should be used in advance so 2745 * reclaiming transmitted mbufs could be delayed a lot which in 2746 * turn slows down Tx operation. 2747 */ 2748 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR); 2749 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt); 2750 2751 /* Set Rx interrupt suppresion threshold. */ 2752 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2753 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt); 2754 2755 intctl = CSR_READ_1(sc, VGE_INTCTL1); 2756 intctl &= ~VGE_INTCTL_SC_RELOAD; 2757 intctl |= VGE_INTCTL_HC_RELOAD; 2758 if (sc->vge_tx_coal_pkt <= 0) 2759 intctl |= VGE_INTCTL_TXINTSUP_DISABLE; 2760 else 2761 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE; 2762 if (sc->vge_rx_coal_pkt <= 0) 2763 intctl |= VGE_INTCTL_RXINTSUP_DISABLE; 2764 else 2765 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE; 2766 CSR_WRITE_1(sc, VGE_INTCTL1, intctl); 2767 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF); 2768 if (sc->vge_int_holdoff > 0) { 2769 /* Set interrupt holdoff timer. */ 2770 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2771 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 2772 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff)); 2773 /* Enable holdoff timer. */ 2774 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2775 } 2776 } 2777 2778 static void 2779 vge_setlinkspeed(struct vge_softc *sc) 2780 { 2781 struct mii_data *mii; 2782 int aneg, i; 2783 2784 VGE_LOCK_ASSERT(sc); 2785 2786 mii = device_get_softc(sc->vge_miibus); 2787 mii_pollstat(mii); 2788 aneg = 0; 2789 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2790 (IFM_ACTIVE | IFM_AVALID)) { 2791 switch IFM_SUBTYPE(mii->mii_media_active) { 2792 case IFM_10_T: 2793 case IFM_100_TX: 2794 return; 2795 case IFM_1000_T: 2796 aneg++; 2797 default: 2798 break; 2799 } 2800 } 2801 /* Clear forced MAC speed/duplex configuration. */ 2802 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2803 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2804 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0); 2805 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR, 2806 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2807 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2808 BMCR_AUTOEN | BMCR_STARTNEG); 2809 DELAY(1000); 2810 if (aneg != 0) { 2811 /* Poll link state until vge(4) get a 10/100 link. */ 2812 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2813 mii_pollstat(mii); 2814 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2815 == (IFM_ACTIVE | IFM_AVALID)) { 2816 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2817 case IFM_10_T: 2818 case IFM_100_TX: 2819 return; 2820 default: 2821 break; 2822 } 2823 } 2824 VGE_UNLOCK(sc); 2825 pause("vgelnk", hz); 2826 VGE_LOCK(sc); 2827 } 2828 if (i == MII_ANEGTICKS_GIGE) 2829 device_printf(sc->vge_dev, "establishing link failed, " 2830 "WOL may not work!"); 2831 } 2832 /* 2833 * No link, force MAC to have 100Mbps, full-duplex link. 2834 * This is the last resort and may/may not work. 2835 */ 2836 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2837 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2838 } 2839 2840 static void 2841 vge_setwol(struct vge_softc *sc) 2842 { 2843 struct ifnet *ifp; 2844 uint16_t pmstat; 2845 uint8_t val; 2846 2847 VGE_LOCK_ASSERT(sc); 2848 2849 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) { 2850 /* No PME capability, PHY power down. */ 2851 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2852 BMCR_PDOWN); 2853 vge_miipoll_stop(sc); 2854 return; 2855 } 2856 2857 ifp = sc->vge_ifp; 2858 2859 /* Clear WOL on pattern match. */ 2860 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2861 /* Disable WOL on magic/unicast packet. */ 2862 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2863 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2864 VGE_WOLCFG_PMEOVR); 2865 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2866 vge_setlinkspeed(sc); 2867 val = 0; 2868 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2869 val |= VGE_WOLCR1_UCAST; 2870 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2871 val |= VGE_WOLCR1_MAGIC; 2872 CSR_WRITE_1(sc, VGE_WOLCR1S, val); 2873 val = 0; 2874 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2875 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB; 2876 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR); 2877 /* Disable MII auto-polling. */ 2878 vge_miipoll_stop(sc); 2879 } 2880 CSR_SETBIT_1(sc, VGE_DIAGCTL, 2881 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE); 2882 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2883 2884 /* Clear WOL status on pattern match. */ 2885 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2886 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2887 2888 val = CSR_READ_1(sc, VGE_PWRSTAT); 2889 val |= VGE_STICKHW_SWPTAG; 2890 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2891 /* Put hardware into sleep. */ 2892 val = CSR_READ_1(sc, VGE_PWRSTAT); 2893 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1; 2894 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2895 /* Request PME if WOL is requested. */ 2896 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap + 2897 PCIR_POWER_STATUS, 2); 2898 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2899 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2900 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2901 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS, 2902 pmstat, 2); 2903 } 2904 2905 static void 2906 vge_clrwol(struct vge_softc *sc) 2907 { 2908 uint8_t val; 2909 2910 val = CSR_READ_1(sc, VGE_PWRSTAT); 2911 val &= ~VGE_STICKHW_SWPTAG; 2912 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2913 /* Disable WOL and clear power state indicator. */ 2914 val = CSR_READ_1(sc, VGE_PWRSTAT); 2915 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); 2916 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2917 2918 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2919 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2920 2921 /* Clear WOL on pattern match. */ 2922 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2923 /* Disable WOL on magic/unicast packet. */ 2924 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2925 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2926 VGE_WOLCFG_PMEOVR); 2927 /* Clear WOL status on pattern match. */ 2928 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2929 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2930 } 2931