1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /* 39 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 40 * 41 * Written by Bill Paul <wpaul@windriver.com> 42 * Senior Networking Software Engineer 43 * Wind River Systems 44 */ 45 46 /* 47 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 48 * combines a tri-speed ethernet MAC and PHY, with the following 49 * features: 50 * 51 * o Jumbo frame support up to 16K 52 * o Transmit and receive flow control 53 * o IPv4 checksum offload 54 * o VLAN tag insertion and stripping 55 * o TCP large send 56 * o 64-bit multicast hash table filter 57 * o 64 entry CAM filter 58 * o 16K RX FIFO and 48K TX FIFO memory 59 * o Interrupt moderation 60 * 61 * The VT6122 supports up to four transmit DMA queues. The descriptors 62 * in the transmit ring can address up to 7 data fragments; frames which 63 * span more than 7 data buffers must be coalesced, but in general the 64 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 65 * long. The receive descriptors address only a single buffer. 66 * 67 * There are two peculiar design issues with the VT6122. One is that 68 * receive data buffers must be aligned on a 32-bit boundary. This is 69 * not a problem where the VT6122 is used as a LOM device in x86-based 70 * systems, but on architectures that generate unaligned access traps, we 71 * have to do some copying. 72 * 73 * The other issue has to do with the way 64-bit addresses are handled. 74 * The DMA descriptors only allow you to specify 48 bits of addressing 75 * information. The remaining 16 bits are specified using one of the 76 * I/O registers. If you only have a 32-bit system, then this isn't 77 * an issue, but if you have a 64-bit system and more than 4GB of 78 * memory, you must have to make sure your network data buffers reside 79 * in the same 48-bit 'segment.' 80 * 81 * Special thanks to Ryan Fu at VIA Networking for providing documentation 82 * and sample NICs for testing. 83 */ 84 85 #ifdef HAVE_KERNEL_OPTION_HEADERS 86 #include "opt_device_polling.h" 87 #endif 88 89 #include <sys/param.h> 90 #include <sys/endian.h> 91 #include <sys/systm.h> 92 #include <sys/sockio.h> 93 #include <sys/mbuf.h> 94 #include <sys/malloc.h> 95 #include <sys/module.h> 96 #include <sys/kernel.h> 97 #include <sys/socket.h> 98 #include <sys/sysctl.h> 99 100 #include <net/if.h> 101 #include <net/if_arp.h> 102 #include <net/ethernet.h> 103 #include <net/if_dl.h> 104 #include <net/if_var.h> 105 #include <net/if_media.h> 106 #include <net/if_types.h> 107 #include <net/if_vlan_var.h> 108 109 #include <net/bpf.h> 110 111 #include <machine/bus.h> 112 #include <machine/resource.h> 113 #include <sys/bus.h> 114 #include <sys/rman.h> 115 116 #include <dev/mii/mii.h> 117 #include <dev/mii/miivar.h> 118 119 #include <dev/pci/pcireg.h> 120 #include <dev/pci/pcivar.h> 121 122 MODULE_DEPEND(vge, pci, 1, 1, 1); 123 MODULE_DEPEND(vge, ether, 1, 1, 1); 124 MODULE_DEPEND(vge, miibus, 1, 1, 1); 125 126 /* "device miibus" required. See GENERIC if you get errors here. */ 127 #include "miibus_if.h" 128 129 #include <dev/vge/if_vgereg.h> 130 #include <dev/vge/if_vgevar.h> 131 132 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 133 134 /* Tunables */ 135 static int msi_disable = 0; 136 TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 137 138 /* 139 * The SQE error counter of MIB seems to report bogus value. 140 * Vendor's workaround does not seem to work on PCIe based 141 * controllers. Disable it until we find better workaround. 142 */ 143 #undef VGE_ENABLE_SQEERR 144 145 /* 146 * Various supported device vendors/types and their names. 147 */ 148 static struct vge_type vge_devs[] = { 149 { VIA_VENDORID, VIA_DEVICEID_61XX, 150 "VIA Networking Velocity Gigabit Ethernet" }, 151 { 0, 0, NULL } 152 }; 153 154 static int vge_attach(device_t); 155 static int vge_detach(device_t); 156 static int vge_probe(device_t); 157 static int vge_resume(device_t); 158 static int vge_shutdown(device_t); 159 static int vge_suspend(device_t); 160 161 static void vge_cam_clear(struct vge_softc *); 162 static int vge_cam_set(struct vge_softc *, uint8_t *); 163 static void vge_clrwol(struct vge_softc *); 164 static void vge_discard_rxbuf(struct vge_softc *, int); 165 static int vge_dma_alloc(struct vge_softc *); 166 static void vge_dma_free(struct vge_softc *); 167 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 168 #ifdef VGE_EEPROM 169 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 170 #endif 171 static int vge_encap(struct vge_softc *, struct mbuf **); 172 #ifndef __NO_STRICT_ALIGNMENT 173 static __inline void 174 vge_fixup_rx(struct mbuf *); 175 #endif 176 static void vge_freebufs(struct vge_softc *); 177 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 178 static int vge_ifmedia_upd(struct ifnet *); 179 static int vge_ifmedia_upd_locked(struct vge_softc *); 180 static void vge_init(void *); 181 static void vge_init_locked(struct vge_softc *); 182 static void vge_intr(void *); 183 static void vge_intr_holdoff(struct vge_softc *); 184 static int vge_ioctl(struct ifnet *, u_long, caddr_t); 185 static void vge_link_statchg(void *); 186 static int vge_miibus_readreg(device_t, int, int); 187 static int vge_miibus_writereg(device_t, int, int, int); 188 static void vge_miipoll_start(struct vge_softc *); 189 static void vge_miipoll_stop(struct vge_softc *); 190 static int vge_newbuf(struct vge_softc *, int); 191 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 192 static void vge_reset(struct vge_softc *); 193 static int vge_rx_list_init(struct vge_softc *); 194 static int vge_rxeof(struct vge_softc *, int); 195 static void vge_rxfilter(struct vge_softc *); 196 static void vge_setmedia(struct vge_softc *); 197 static void vge_setvlan(struct vge_softc *); 198 static void vge_setwol(struct vge_softc *); 199 static void vge_start(struct ifnet *); 200 static void vge_start_locked(struct ifnet *); 201 static void vge_stats_clear(struct vge_softc *); 202 static void vge_stats_update(struct vge_softc *); 203 static void vge_stop(struct vge_softc *); 204 static void vge_sysctl_node(struct vge_softc *); 205 static int vge_tx_list_init(struct vge_softc *); 206 static void vge_txeof(struct vge_softc *); 207 static void vge_watchdog(void *); 208 209 static device_method_t vge_methods[] = { 210 /* Device interface */ 211 DEVMETHOD(device_probe, vge_probe), 212 DEVMETHOD(device_attach, vge_attach), 213 DEVMETHOD(device_detach, vge_detach), 214 DEVMETHOD(device_suspend, vge_suspend), 215 DEVMETHOD(device_resume, vge_resume), 216 DEVMETHOD(device_shutdown, vge_shutdown), 217 218 /* MII interface */ 219 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 220 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 221 222 DEVMETHOD_END 223 }; 224 225 static driver_t vge_driver = { 226 "vge", 227 vge_methods, 228 sizeof(struct vge_softc) 229 }; 230 231 static devclass_t vge_devclass; 232 233 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 234 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 235 236 #ifdef VGE_EEPROM 237 /* 238 * Read a word of data stored in the EEPROM at address 'addr.' 239 */ 240 static void 241 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 242 { 243 int i; 244 uint16_t word = 0; 245 246 /* 247 * Enter EEPROM embedded programming mode. In order to 248 * access the EEPROM at all, we first have to set the 249 * EELOAD bit in the CHIPCFG2 register. 250 */ 251 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 252 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 253 254 /* Select the address of the word we want to read */ 255 CSR_WRITE_1(sc, VGE_EEADDR, addr); 256 257 /* Issue read command */ 258 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 259 260 /* Wait for the done bit to be set. */ 261 for (i = 0; i < VGE_TIMEOUT; i++) { 262 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 263 break; 264 } 265 266 if (i == VGE_TIMEOUT) { 267 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 268 *dest = 0; 269 return; 270 } 271 272 /* Read the result */ 273 word = CSR_READ_2(sc, VGE_EERDDAT); 274 275 /* Turn off EEPROM access mode. */ 276 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 277 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 278 279 *dest = word; 280 } 281 #endif 282 283 /* 284 * Read a sequence of words from the EEPROM. 285 */ 286 static void 287 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 288 { 289 int i; 290 #ifdef VGE_EEPROM 291 uint16_t word = 0, *ptr; 292 293 for (i = 0; i < cnt; i++) { 294 vge_eeprom_getword(sc, off + i, &word); 295 ptr = (uint16_t *)(dest + (i * 2)); 296 if (swap) 297 *ptr = ntohs(word); 298 else 299 *ptr = word; 300 } 301 #else 302 for (i = 0; i < ETHER_ADDR_LEN; i++) 303 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 304 #endif 305 } 306 307 static void 308 vge_miipoll_stop(struct vge_softc *sc) 309 { 310 int i; 311 312 CSR_WRITE_1(sc, VGE_MIICMD, 0); 313 314 for (i = 0; i < VGE_TIMEOUT; i++) { 315 DELAY(1); 316 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 317 break; 318 } 319 320 if (i == VGE_TIMEOUT) 321 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 322 } 323 324 static void 325 vge_miipoll_start(struct vge_softc *sc) 326 { 327 int i; 328 329 /* First, make sure we're idle. */ 330 331 CSR_WRITE_1(sc, VGE_MIICMD, 0); 332 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 333 334 for (i = 0; i < VGE_TIMEOUT; i++) { 335 DELAY(1); 336 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 337 break; 338 } 339 340 if (i == VGE_TIMEOUT) { 341 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 342 return; 343 } 344 345 /* Now enable auto poll mode. */ 346 347 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 348 349 /* And make sure it started. */ 350 351 for (i = 0; i < VGE_TIMEOUT; i++) { 352 DELAY(1); 353 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 354 break; 355 } 356 357 if (i == VGE_TIMEOUT) 358 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 359 } 360 361 static int 362 vge_miibus_readreg(device_t dev, int phy, int reg) 363 { 364 struct vge_softc *sc; 365 int i; 366 uint16_t rval = 0; 367 368 sc = device_get_softc(dev); 369 370 vge_miipoll_stop(sc); 371 372 /* Specify the register we want to read. */ 373 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 374 375 /* Issue read command. */ 376 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 377 378 /* Wait for the read command bit to self-clear. */ 379 for (i = 0; i < VGE_TIMEOUT; i++) { 380 DELAY(1); 381 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 382 break; 383 } 384 385 if (i == VGE_TIMEOUT) 386 device_printf(sc->vge_dev, "MII read timed out\n"); 387 else 388 rval = CSR_READ_2(sc, VGE_MIIDATA); 389 390 vge_miipoll_start(sc); 391 392 return (rval); 393 } 394 395 static int 396 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 397 { 398 struct vge_softc *sc; 399 int i, rval = 0; 400 401 sc = device_get_softc(dev); 402 403 vge_miipoll_stop(sc); 404 405 /* Specify the register we want to write. */ 406 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 407 408 /* Specify the data we want to write. */ 409 CSR_WRITE_2(sc, VGE_MIIDATA, data); 410 411 /* Issue write command. */ 412 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 413 414 /* Wait for the write command bit to self-clear. */ 415 for (i = 0; i < VGE_TIMEOUT; i++) { 416 DELAY(1); 417 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 418 break; 419 } 420 421 if (i == VGE_TIMEOUT) { 422 device_printf(sc->vge_dev, "MII write timed out\n"); 423 rval = EIO; 424 } 425 426 vge_miipoll_start(sc); 427 428 return (rval); 429 } 430 431 static void 432 vge_cam_clear(struct vge_softc *sc) 433 { 434 int i; 435 436 /* 437 * Turn off all the mask bits. This tells the chip 438 * that none of the entries in the CAM filter are valid. 439 * desired entries will be enabled as we fill the filter in. 440 */ 441 442 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 443 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 444 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 445 for (i = 0; i < 8; i++) 446 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 447 448 /* Clear the VLAN filter too. */ 449 450 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 451 for (i = 0; i < 8; i++) 452 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 453 454 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 455 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 456 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 457 458 sc->vge_camidx = 0; 459 } 460 461 static int 462 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 463 { 464 int i, error = 0; 465 466 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 467 return (ENOSPC); 468 469 /* Select the CAM data page. */ 470 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 471 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 472 473 /* Set the filter entry we want to update and enable writing. */ 474 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 475 476 /* Write the address to the CAM registers */ 477 for (i = 0; i < ETHER_ADDR_LEN; i++) 478 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 479 480 /* Issue a write command. */ 481 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 482 483 /* Wake for it to clear. */ 484 for (i = 0; i < VGE_TIMEOUT; i++) { 485 DELAY(1); 486 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 487 break; 488 } 489 490 if (i == VGE_TIMEOUT) { 491 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 492 error = EIO; 493 goto fail; 494 } 495 496 /* Select the CAM mask page. */ 497 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 498 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 499 500 /* Set the mask bit that enables this filter. */ 501 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 502 1<<(sc->vge_camidx & 7)); 503 504 sc->vge_camidx++; 505 506 fail: 507 /* Turn off access to CAM. */ 508 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 509 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 510 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 511 512 return (error); 513 } 514 515 static void 516 vge_setvlan(struct vge_softc *sc) 517 { 518 struct ifnet *ifp; 519 uint8_t cfg; 520 521 VGE_LOCK_ASSERT(sc); 522 523 ifp = sc->vge_ifp; 524 cfg = CSR_READ_1(sc, VGE_RXCFG); 525 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 526 cfg |= VGE_VTAG_OPT2; 527 else 528 cfg &= ~VGE_VTAG_OPT2; 529 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 530 } 531 532 /* 533 * Program the multicast filter. We use the 64-entry CAM filter 534 * for perfect filtering. If there's more than 64 multicast addresses, 535 * we use the hash filter instead. 536 */ 537 static void 538 vge_rxfilter(struct vge_softc *sc) 539 { 540 struct ifnet *ifp; 541 struct ifmultiaddr *ifma; 542 uint32_t h, hashes[2]; 543 uint8_t rxcfg; 544 int error = 0; 545 546 VGE_LOCK_ASSERT(sc); 547 548 /* First, zot all the multicast entries. */ 549 hashes[0] = 0; 550 hashes[1] = 0; 551 552 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 553 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 554 VGE_RXCTL_RX_PROMISC); 555 /* 556 * Always allow VLAN oversized frames and frames for 557 * this host. 558 */ 559 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 560 561 ifp = sc->vge_ifp; 562 if ((ifp->if_flags & IFF_BROADCAST) != 0) 563 rxcfg |= VGE_RXCTL_RX_BCAST; 564 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 565 if ((ifp->if_flags & IFF_PROMISC) != 0) 566 rxcfg |= VGE_RXCTL_RX_PROMISC; 567 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 568 hashes[0] = 0xFFFFFFFF; 569 hashes[1] = 0xFFFFFFFF; 570 } 571 goto done; 572 } 573 574 vge_cam_clear(sc); 575 /* Now program new ones */ 576 if_maddr_rlock(ifp); 577 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 578 if (ifma->ifma_addr->sa_family != AF_LINK) 579 continue; 580 error = vge_cam_set(sc, 581 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 582 if (error) 583 break; 584 } 585 586 /* If there were too many addresses, use the hash filter. */ 587 if (error) { 588 vge_cam_clear(sc); 589 590 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 591 if (ifma->ifma_addr->sa_family != AF_LINK) 592 continue; 593 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 594 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 595 if (h < 32) 596 hashes[0] |= (1 << h); 597 else 598 hashes[1] |= (1 << (h - 32)); 599 } 600 } 601 if_maddr_runlock(ifp); 602 603 done: 604 if (hashes[0] != 0 || hashes[1] != 0) 605 rxcfg |= VGE_RXCTL_RX_MCAST; 606 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 607 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 608 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 609 } 610 611 static void 612 vge_reset(struct vge_softc *sc) 613 { 614 int i; 615 616 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 617 618 for (i = 0; i < VGE_TIMEOUT; i++) { 619 DELAY(5); 620 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 621 break; 622 } 623 624 if (i == VGE_TIMEOUT) { 625 device_printf(sc->vge_dev, "soft reset timed out\n"); 626 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 627 DELAY(2000); 628 } 629 630 DELAY(5000); 631 } 632 633 /* 634 * Probe for a VIA gigabit chip. Check the PCI vendor and device 635 * IDs against our list and return a device name if we find a match. 636 */ 637 static int 638 vge_probe(device_t dev) 639 { 640 struct vge_type *t; 641 642 t = vge_devs; 643 644 while (t->vge_name != NULL) { 645 if ((pci_get_vendor(dev) == t->vge_vid) && 646 (pci_get_device(dev) == t->vge_did)) { 647 device_set_desc(dev, t->vge_name); 648 return (BUS_PROBE_DEFAULT); 649 } 650 t++; 651 } 652 653 return (ENXIO); 654 } 655 656 /* 657 * Map a single buffer address. 658 */ 659 660 struct vge_dmamap_arg { 661 bus_addr_t vge_busaddr; 662 }; 663 664 static void 665 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 666 { 667 struct vge_dmamap_arg *ctx; 668 669 if (error != 0) 670 return; 671 672 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 673 674 ctx = (struct vge_dmamap_arg *)arg; 675 ctx->vge_busaddr = segs[0].ds_addr; 676 } 677 678 static int 679 vge_dma_alloc(struct vge_softc *sc) 680 { 681 struct vge_dmamap_arg ctx; 682 struct vge_txdesc *txd; 683 struct vge_rxdesc *rxd; 684 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 685 int error, i; 686 687 /* 688 * It seems old PCI controllers do not support DAC. DAC 689 * configuration can be enabled by accessing VGE_CHIPCFG3 690 * register but honor EEPROM configuration instead of 691 * blindly overriding DAC configuration. PCIe based 692 * controllers are supposed to support 64bit DMA so enable 693 * 64bit DMA on these controllers. 694 */ 695 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 696 lowaddr = BUS_SPACE_MAXADDR; 697 else 698 lowaddr = BUS_SPACE_MAXADDR_32BIT; 699 700 again: 701 /* Create parent ring tag. */ 702 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 703 1, 0, /* algnmnt, boundary */ 704 lowaddr, /* lowaddr */ 705 BUS_SPACE_MAXADDR, /* highaddr */ 706 NULL, NULL, /* filter, filterarg */ 707 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 708 0, /* nsegments */ 709 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 710 0, /* flags */ 711 NULL, NULL, /* lockfunc, lockarg */ 712 &sc->vge_cdata.vge_ring_tag); 713 if (error != 0) { 714 device_printf(sc->vge_dev, 715 "could not create parent DMA tag.\n"); 716 goto fail; 717 } 718 719 /* Create tag for Tx ring. */ 720 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 721 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 722 BUS_SPACE_MAXADDR, /* lowaddr */ 723 BUS_SPACE_MAXADDR, /* highaddr */ 724 NULL, NULL, /* filter, filterarg */ 725 VGE_TX_LIST_SZ, /* maxsize */ 726 1, /* nsegments */ 727 VGE_TX_LIST_SZ, /* maxsegsize */ 728 0, /* flags */ 729 NULL, NULL, /* lockfunc, lockarg */ 730 &sc->vge_cdata.vge_tx_ring_tag); 731 if (error != 0) { 732 device_printf(sc->vge_dev, 733 "could not allocate Tx ring DMA tag.\n"); 734 goto fail; 735 } 736 737 /* Create tag for Rx ring. */ 738 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 739 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 740 BUS_SPACE_MAXADDR, /* lowaddr */ 741 BUS_SPACE_MAXADDR, /* highaddr */ 742 NULL, NULL, /* filter, filterarg */ 743 VGE_RX_LIST_SZ, /* maxsize */ 744 1, /* nsegments */ 745 VGE_RX_LIST_SZ, /* maxsegsize */ 746 0, /* flags */ 747 NULL, NULL, /* lockfunc, lockarg */ 748 &sc->vge_cdata.vge_rx_ring_tag); 749 if (error != 0) { 750 device_printf(sc->vge_dev, 751 "could not allocate Rx ring DMA tag.\n"); 752 goto fail; 753 } 754 755 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 756 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 757 (void **)&sc->vge_rdata.vge_tx_ring, 758 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 759 &sc->vge_cdata.vge_tx_ring_map); 760 if (error != 0) { 761 device_printf(sc->vge_dev, 762 "could not allocate DMA'able memory for Tx ring.\n"); 763 goto fail; 764 } 765 766 ctx.vge_busaddr = 0; 767 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 768 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 769 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 770 if (error != 0 || ctx.vge_busaddr == 0) { 771 device_printf(sc->vge_dev, 772 "could not load DMA'able memory for Tx ring.\n"); 773 goto fail; 774 } 775 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 776 777 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 778 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 779 (void **)&sc->vge_rdata.vge_rx_ring, 780 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 781 &sc->vge_cdata.vge_rx_ring_map); 782 if (error != 0) { 783 device_printf(sc->vge_dev, 784 "could not allocate DMA'able memory for Rx ring.\n"); 785 goto fail; 786 } 787 788 ctx.vge_busaddr = 0; 789 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 790 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 791 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 792 if (error != 0 || ctx.vge_busaddr == 0) { 793 device_printf(sc->vge_dev, 794 "could not load DMA'able memory for Rx ring.\n"); 795 goto fail; 796 } 797 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 798 799 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 800 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 801 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 802 if ((VGE_ADDR_HI(tx_ring_end) != 803 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 804 (VGE_ADDR_HI(rx_ring_end) != 805 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 806 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 807 device_printf(sc->vge_dev, "4GB boundary crossed, " 808 "switching to 32bit DMA address mode.\n"); 809 vge_dma_free(sc); 810 /* Limit DMA address space to 32bit and try again. */ 811 lowaddr = BUS_SPACE_MAXADDR_32BIT; 812 goto again; 813 } 814 815 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 816 lowaddr = VGE_BUF_DMA_MAXADDR; 817 else 818 lowaddr = BUS_SPACE_MAXADDR_32BIT; 819 /* Create parent buffer tag. */ 820 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 821 1, 0, /* algnmnt, boundary */ 822 lowaddr, /* lowaddr */ 823 BUS_SPACE_MAXADDR, /* highaddr */ 824 NULL, NULL, /* filter, filterarg */ 825 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 826 0, /* nsegments */ 827 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 828 0, /* flags */ 829 NULL, NULL, /* lockfunc, lockarg */ 830 &sc->vge_cdata.vge_buffer_tag); 831 if (error != 0) { 832 device_printf(sc->vge_dev, 833 "could not create parent buffer DMA tag.\n"); 834 goto fail; 835 } 836 837 /* Create tag for Tx buffers. */ 838 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 839 1, 0, /* algnmnt, boundary */ 840 BUS_SPACE_MAXADDR, /* lowaddr */ 841 BUS_SPACE_MAXADDR, /* highaddr */ 842 NULL, NULL, /* filter, filterarg */ 843 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 844 VGE_MAXTXSEGS, /* nsegments */ 845 MCLBYTES, /* maxsegsize */ 846 0, /* flags */ 847 NULL, NULL, /* lockfunc, lockarg */ 848 &sc->vge_cdata.vge_tx_tag); 849 if (error != 0) { 850 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 851 goto fail; 852 } 853 854 /* Create tag for Rx buffers. */ 855 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 856 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 857 BUS_SPACE_MAXADDR, /* lowaddr */ 858 BUS_SPACE_MAXADDR, /* highaddr */ 859 NULL, NULL, /* filter, filterarg */ 860 MCLBYTES, /* maxsize */ 861 1, /* nsegments */ 862 MCLBYTES, /* maxsegsize */ 863 0, /* flags */ 864 NULL, NULL, /* lockfunc, lockarg */ 865 &sc->vge_cdata.vge_rx_tag); 866 if (error != 0) { 867 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 868 goto fail; 869 } 870 871 /* Create DMA maps for Tx buffers. */ 872 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 873 txd = &sc->vge_cdata.vge_txdesc[i]; 874 txd->tx_m = NULL; 875 txd->tx_dmamap = NULL; 876 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 877 &txd->tx_dmamap); 878 if (error != 0) { 879 device_printf(sc->vge_dev, 880 "could not create Tx dmamap.\n"); 881 goto fail; 882 } 883 } 884 /* Create DMA maps for Rx buffers. */ 885 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 886 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 887 device_printf(sc->vge_dev, 888 "could not create spare Rx dmamap.\n"); 889 goto fail; 890 } 891 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 892 rxd = &sc->vge_cdata.vge_rxdesc[i]; 893 rxd->rx_m = NULL; 894 rxd->rx_dmamap = NULL; 895 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 896 &rxd->rx_dmamap); 897 if (error != 0) { 898 device_printf(sc->vge_dev, 899 "could not create Rx dmamap.\n"); 900 goto fail; 901 } 902 } 903 904 fail: 905 return (error); 906 } 907 908 static void 909 vge_dma_free(struct vge_softc *sc) 910 { 911 struct vge_txdesc *txd; 912 struct vge_rxdesc *rxd; 913 int i; 914 915 /* Tx ring. */ 916 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 917 if (sc->vge_rdata.vge_tx_ring_paddr) 918 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 919 sc->vge_cdata.vge_tx_ring_map); 920 if (sc->vge_rdata.vge_tx_ring) 921 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 922 sc->vge_rdata.vge_tx_ring, 923 sc->vge_cdata.vge_tx_ring_map); 924 sc->vge_rdata.vge_tx_ring = NULL; 925 sc->vge_rdata.vge_tx_ring_paddr = 0; 926 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 927 sc->vge_cdata.vge_tx_ring_tag = NULL; 928 } 929 /* Rx ring. */ 930 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 931 if (sc->vge_rdata.vge_rx_ring_paddr) 932 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 933 sc->vge_cdata.vge_rx_ring_map); 934 if (sc->vge_rdata.vge_rx_ring) 935 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 936 sc->vge_rdata.vge_rx_ring, 937 sc->vge_cdata.vge_rx_ring_map); 938 sc->vge_rdata.vge_rx_ring = NULL; 939 sc->vge_rdata.vge_rx_ring_paddr = 0; 940 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 941 sc->vge_cdata.vge_rx_ring_tag = NULL; 942 } 943 /* Tx buffers. */ 944 if (sc->vge_cdata.vge_tx_tag != NULL) { 945 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 946 txd = &sc->vge_cdata.vge_txdesc[i]; 947 if (txd->tx_dmamap != NULL) { 948 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 949 txd->tx_dmamap); 950 txd->tx_dmamap = NULL; 951 } 952 } 953 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 954 sc->vge_cdata.vge_tx_tag = NULL; 955 } 956 /* Rx buffers. */ 957 if (sc->vge_cdata.vge_rx_tag != NULL) { 958 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 959 rxd = &sc->vge_cdata.vge_rxdesc[i]; 960 if (rxd->rx_dmamap != NULL) { 961 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 962 rxd->rx_dmamap); 963 rxd->rx_dmamap = NULL; 964 } 965 } 966 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 967 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 968 sc->vge_cdata.vge_rx_sparemap); 969 sc->vge_cdata.vge_rx_sparemap = NULL; 970 } 971 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 972 sc->vge_cdata.vge_rx_tag = NULL; 973 } 974 975 if (sc->vge_cdata.vge_buffer_tag != NULL) { 976 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 977 sc->vge_cdata.vge_buffer_tag = NULL; 978 } 979 if (sc->vge_cdata.vge_ring_tag != NULL) { 980 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 981 sc->vge_cdata.vge_ring_tag = NULL; 982 } 983 } 984 985 /* 986 * Attach the interface. Allocate softc structures, do ifmedia 987 * setup and ethernet/BPF attach. 988 */ 989 static int 990 vge_attach(device_t dev) 991 { 992 u_char eaddr[ETHER_ADDR_LEN]; 993 struct vge_softc *sc; 994 struct ifnet *ifp; 995 int error = 0, cap, i, msic, rid; 996 997 sc = device_get_softc(dev); 998 sc->vge_dev = dev; 999 1000 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1001 MTX_DEF); 1002 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 1003 1004 /* 1005 * Map control/status registers. 1006 */ 1007 pci_enable_busmaster(dev); 1008 1009 rid = PCIR_BAR(1); 1010 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1011 RF_ACTIVE); 1012 1013 if (sc->vge_res == NULL) { 1014 device_printf(dev, "couldn't map ports/memory\n"); 1015 error = ENXIO; 1016 goto fail; 1017 } 1018 1019 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 1020 sc->vge_flags |= VGE_FLAG_PCIE; 1021 sc->vge_expcap = cap; 1022 } else 1023 sc->vge_flags |= VGE_FLAG_JUMBO; 1024 if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) { 1025 sc->vge_flags |= VGE_FLAG_PMCAP; 1026 sc->vge_pmcap = cap; 1027 } 1028 rid = 0; 1029 msic = pci_msi_count(dev); 1030 if (msi_disable == 0 && msic > 0) { 1031 msic = 1; 1032 if (pci_alloc_msi(dev, &msic) == 0) { 1033 if (msic == 1) { 1034 sc->vge_flags |= VGE_FLAG_MSI; 1035 device_printf(dev, "Using %d MSI message\n", 1036 msic); 1037 rid = 1; 1038 } else 1039 pci_release_msi(dev); 1040 } 1041 } 1042 1043 /* Allocate interrupt */ 1044 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1045 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1046 if (sc->vge_irq == NULL) { 1047 device_printf(dev, "couldn't map interrupt\n"); 1048 error = ENXIO; 1049 goto fail; 1050 } 1051 1052 /* Reset the adapter. */ 1053 vge_reset(sc); 1054 /* Reload EEPROM. */ 1055 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1056 for (i = 0; i < VGE_TIMEOUT; i++) { 1057 DELAY(5); 1058 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1059 break; 1060 } 1061 if (i == VGE_TIMEOUT) 1062 device_printf(dev, "EEPROM reload timed out\n"); 1063 /* 1064 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1065 * MAC will receive magic packet which in turn confuses 1066 * controller. 1067 */ 1068 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1069 1070 /* 1071 * Get station address from the EEPROM. 1072 */ 1073 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1074 /* 1075 * Save configured PHY address. 1076 * It seems the PHY address of PCIe controllers just 1077 * reflects media jump strapping status so we assume the 1078 * internal PHY address of PCIe controller is at 1. 1079 */ 1080 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1081 sc->vge_phyaddr = 1; 1082 else 1083 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1084 VGE_MIICFG_PHYADDR; 1085 /* Clear WOL and take hardware from powerdown. */ 1086 vge_clrwol(sc); 1087 vge_sysctl_node(sc); 1088 error = vge_dma_alloc(sc); 1089 if (error) 1090 goto fail; 1091 1092 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1093 if (ifp == NULL) { 1094 device_printf(dev, "can not if_alloc()\n"); 1095 error = ENOSPC; 1096 goto fail; 1097 } 1098 1099 vge_miipoll_start(sc); 1100 /* Do MII setup */ 1101 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd, 1102 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY, 1103 MIIF_DOPAUSE); 1104 if (error != 0) { 1105 device_printf(dev, "attaching PHYs failed\n"); 1106 goto fail; 1107 } 1108 1109 ifp->if_softc = sc; 1110 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1111 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1112 ifp->if_ioctl = vge_ioctl; 1113 ifp->if_capabilities = IFCAP_VLAN_MTU; 1114 ifp->if_start = vge_start; 1115 ifp->if_hwassist = VGE_CSUM_FEATURES; 1116 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1117 IFCAP_VLAN_HWTAGGING; 1118 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) 1119 ifp->if_capabilities |= IFCAP_WOL; 1120 ifp->if_capenable = ifp->if_capabilities; 1121 #ifdef DEVICE_POLLING 1122 ifp->if_capabilities |= IFCAP_POLLING; 1123 #endif 1124 ifp->if_init = vge_init; 1125 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1); 1126 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1; 1127 IFQ_SET_READY(&ifp->if_snd); 1128 1129 /* 1130 * Call MI attach routine. 1131 */ 1132 ether_ifattach(ifp, eaddr); 1133 1134 /* Tell the upper layer(s) we support long frames. */ 1135 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1136 1137 /* Hook interrupt last to avoid having to lock softc */ 1138 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1139 NULL, vge_intr, sc, &sc->vge_intrhand); 1140 1141 if (error) { 1142 device_printf(dev, "couldn't set up irq\n"); 1143 ether_ifdetach(ifp); 1144 goto fail; 1145 } 1146 1147 fail: 1148 if (error) 1149 vge_detach(dev); 1150 1151 return (error); 1152 } 1153 1154 /* 1155 * Shutdown hardware and free up resources. This can be called any 1156 * time after the mutex has been initialized. It is called in both 1157 * the error case in attach and the normal detach case so it needs 1158 * to be careful about only freeing resources that have actually been 1159 * allocated. 1160 */ 1161 static int 1162 vge_detach(device_t dev) 1163 { 1164 struct vge_softc *sc; 1165 struct ifnet *ifp; 1166 1167 sc = device_get_softc(dev); 1168 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1169 ifp = sc->vge_ifp; 1170 1171 #ifdef DEVICE_POLLING 1172 if (ifp->if_capenable & IFCAP_POLLING) 1173 ether_poll_deregister(ifp); 1174 #endif 1175 1176 /* These should only be active if attach succeeded */ 1177 if (device_is_attached(dev)) { 1178 ether_ifdetach(ifp); 1179 VGE_LOCK(sc); 1180 vge_stop(sc); 1181 VGE_UNLOCK(sc); 1182 callout_drain(&sc->vge_watchdog); 1183 } 1184 if (sc->vge_miibus) 1185 device_delete_child(dev, sc->vge_miibus); 1186 bus_generic_detach(dev); 1187 1188 if (sc->vge_intrhand) 1189 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1190 if (sc->vge_irq) 1191 bus_release_resource(dev, SYS_RES_IRQ, 1192 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1193 if (sc->vge_flags & VGE_FLAG_MSI) 1194 pci_release_msi(dev); 1195 if (sc->vge_res) 1196 bus_release_resource(dev, SYS_RES_MEMORY, 1197 PCIR_BAR(1), sc->vge_res); 1198 if (ifp) 1199 if_free(ifp); 1200 1201 vge_dma_free(sc); 1202 mtx_destroy(&sc->vge_mtx); 1203 1204 return (0); 1205 } 1206 1207 static void 1208 vge_discard_rxbuf(struct vge_softc *sc, int prod) 1209 { 1210 struct vge_rxdesc *rxd; 1211 int i; 1212 1213 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1214 rxd->rx_desc->vge_sts = 0; 1215 rxd->rx_desc->vge_ctl = 0; 1216 1217 /* 1218 * Note: the manual fails to document the fact that for 1219 * proper opration, the driver needs to replentish the RX 1220 * DMA ring 4 descriptors at a time (rather than one at a 1221 * time, like most chips). We can allocate the new buffers 1222 * but we should not set the OWN bits until we're ready 1223 * to hand back 4 of them in one shot. 1224 */ 1225 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1226 for (i = VGE_RXCHUNK; i > 0; i--) { 1227 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1228 rxd = rxd->rxd_prev; 1229 } 1230 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1231 } 1232 } 1233 1234 static int 1235 vge_newbuf(struct vge_softc *sc, int prod) 1236 { 1237 struct vge_rxdesc *rxd; 1238 struct mbuf *m; 1239 bus_dma_segment_t segs[1]; 1240 bus_dmamap_t map; 1241 int i, nsegs; 1242 1243 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1244 if (m == NULL) 1245 return (ENOBUFS); 1246 /* 1247 * This is part of an evil trick to deal with strict-alignment 1248 * architectures. The VIA chip requires RX buffers to be aligned 1249 * on 32-bit boundaries, but that will hose strict-alignment 1250 * architectures. To get around this, we leave some empty space 1251 * at the start of each buffer and for non-strict-alignment hosts, 1252 * we copy the buffer back two bytes to achieve word alignment. 1253 * This is slightly more efficient than allocating a new buffer, 1254 * copying the contents, and discarding the old buffer. 1255 */ 1256 m->m_len = m->m_pkthdr.len = MCLBYTES; 1257 m_adj(m, VGE_RX_BUF_ALIGN); 1258 1259 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1260 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1261 m_freem(m); 1262 return (ENOBUFS); 1263 } 1264 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1265 1266 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1267 if (rxd->rx_m != NULL) { 1268 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1269 BUS_DMASYNC_POSTREAD); 1270 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1271 } 1272 map = rxd->rx_dmamap; 1273 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1274 sc->vge_cdata.vge_rx_sparemap = map; 1275 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1276 BUS_DMASYNC_PREREAD); 1277 rxd->rx_m = m; 1278 1279 rxd->rx_desc->vge_sts = 0; 1280 rxd->rx_desc->vge_ctl = 0; 1281 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1282 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1283 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1284 1285 /* 1286 * Note: the manual fails to document the fact that for 1287 * proper operation, the driver needs to replenish the RX 1288 * DMA ring 4 descriptors at a time (rather than one at a 1289 * time, like most chips). We can allocate the new buffers 1290 * but we should not set the OWN bits until we're ready 1291 * to hand back 4 of them in one shot. 1292 */ 1293 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1294 for (i = VGE_RXCHUNK; i > 0; i--) { 1295 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1296 rxd = rxd->rxd_prev; 1297 } 1298 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1299 } 1300 1301 return (0); 1302 } 1303 1304 static int 1305 vge_tx_list_init(struct vge_softc *sc) 1306 { 1307 struct vge_ring_data *rd; 1308 struct vge_txdesc *txd; 1309 int i; 1310 1311 VGE_LOCK_ASSERT(sc); 1312 1313 sc->vge_cdata.vge_tx_prodidx = 0; 1314 sc->vge_cdata.vge_tx_considx = 0; 1315 sc->vge_cdata.vge_tx_cnt = 0; 1316 1317 rd = &sc->vge_rdata; 1318 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1319 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1320 txd = &sc->vge_cdata.vge_txdesc[i]; 1321 txd->tx_m = NULL; 1322 txd->tx_desc = &rd->vge_tx_ring[i]; 1323 } 1324 1325 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1326 sc->vge_cdata.vge_tx_ring_map, 1327 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1328 1329 return (0); 1330 } 1331 1332 static int 1333 vge_rx_list_init(struct vge_softc *sc) 1334 { 1335 struct vge_ring_data *rd; 1336 struct vge_rxdesc *rxd; 1337 int i; 1338 1339 VGE_LOCK_ASSERT(sc); 1340 1341 sc->vge_cdata.vge_rx_prodidx = 0; 1342 sc->vge_cdata.vge_head = NULL; 1343 sc->vge_cdata.vge_tail = NULL; 1344 sc->vge_cdata.vge_rx_commit = 0; 1345 1346 rd = &sc->vge_rdata; 1347 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1348 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1349 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1350 rxd->rx_m = NULL; 1351 rxd->rx_desc = &rd->vge_rx_ring[i]; 1352 if (i == 0) 1353 rxd->rxd_prev = 1354 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1355 else 1356 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1357 if (vge_newbuf(sc, i) != 0) 1358 return (ENOBUFS); 1359 } 1360 1361 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1362 sc->vge_cdata.vge_rx_ring_map, 1363 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1364 1365 sc->vge_cdata.vge_rx_commit = 0; 1366 1367 return (0); 1368 } 1369 1370 static void 1371 vge_freebufs(struct vge_softc *sc) 1372 { 1373 struct vge_txdesc *txd; 1374 struct vge_rxdesc *rxd; 1375 struct ifnet *ifp; 1376 int i; 1377 1378 VGE_LOCK_ASSERT(sc); 1379 1380 ifp = sc->vge_ifp; 1381 /* 1382 * Free RX and TX mbufs still in the queues. 1383 */ 1384 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1385 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1386 if (rxd->rx_m != NULL) { 1387 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1388 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1389 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1390 rxd->rx_dmamap); 1391 m_freem(rxd->rx_m); 1392 rxd->rx_m = NULL; 1393 } 1394 } 1395 1396 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1397 txd = &sc->vge_cdata.vge_txdesc[i]; 1398 if (txd->tx_m != NULL) { 1399 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1400 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1401 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1402 txd->tx_dmamap); 1403 m_freem(txd->tx_m); 1404 txd->tx_m = NULL; 1405 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1406 } 1407 } 1408 } 1409 1410 #ifndef __NO_STRICT_ALIGNMENT 1411 static __inline void 1412 vge_fixup_rx(struct mbuf *m) 1413 { 1414 int i; 1415 uint16_t *src, *dst; 1416 1417 src = mtod(m, uint16_t *); 1418 dst = src - 1; 1419 1420 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1421 *dst++ = *src++; 1422 1423 m->m_data -= ETHER_ALIGN; 1424 } 1425 #endif 1426 1427 /* 1428 * RX handler. We support the reception of jumbo frames that have 1429 * been fragmented across multiple 2K mbuf cluster buffers. 1430 */ 1431 static int 1432 vge_rxeof(struct vge_softc *sc, int count) 1433 { 1434 struct mbuf *m; 1435 struct ifnet *ifp; 1436 int prod, prog, total_len; 1437 struct vge_rxdesc *rxd; 1438 struct vge_rx_desc *cur_rx; 1439 uint32_t rxstat, rxctl; 1440 1441 VGE_LOCK_ASSERT(sc); 1442 1443 ifp = sc->vge_ifp; 1444 1445 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1446 sc->vge_cdata.vge_rx_ring_map, 1447 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1448 1449 prod = sc->vge_cdata.vge_rx_prodidx; 1450 for (prog = 0; count > 0 && 1451 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1452 VGE_RX_DESC_INC(prod)) { 1453 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1454 rxstat = le32toh(cur_rx->vge_sts); 1455 if ((rxstat & VGE_RDSTS_OWN) != 0) 1456 break; 1457 count--; 1458 prog++; 1459 rxctl = le32toh(cur_rx->vge_ctl); 1460 total_len = VGE_RXBYTES(rxstat); 1461 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1462 m = rxd->rx_m; 1463 1464 /* 1465 * If the 'start of frame' bit is set, this indicates 1466 * either the first fragment in a multi-fragment receive, 1467 * or an intermediate fragment. Either way, we want to 1468 * accumulate the buffers. 1469 */ 1470 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1471 if (vge_newbuf(sc, prod) != 0) { 1472 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1473 VGE_CHAIN_RESET(sc); 1474 vge_discard_rxbuf(sc, prod); 1475 continue; 1476 } 1477 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1478 if (sc->vge_cdata.vge_head == NULL) { 1479 sc->vge_cdata.vge_head = m; 1480 sc->vge_cdata.vge_tail = m; 1481 } else { 1482 m->m_flags &= ~M_PKTHDR; 1483 sc->vge_cdata.vge_tail->m_next = m; 1484 sc->vge_cdata.vge_tail = m; 1485 } 1486 continue; 1487 } 1488 1489 /* 1490 * Bad/error frames will have the RXOK bit cleared. 1491 * However, there's one error case we want to allow: 1492 * if a VLAN tagged frame arrives and the chip can't 1493 * match it against the CAM filter, it considers this 1494 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1495 * We don't want to drop the frame though: our VLAN 1496 * filtering is done in software. 1497 * We also want to receive bad-checksummed frames and 1498 * and frames with bad-length. 1499 */ 1500 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1501 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1502 VGE_RDSTS_CSUMERR)) == 0) { 1503 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1504 /* 1505 * If this is part of a multi-fragment packet, 1506 * discard all the pieces. 1507 */ 1508 VGE_CHAIN_RESET(sc); 1509 vge_discard_rxbuf(sc, prod); 1510 continue; 1511 } 1512 1513 if (vge_newbuf(sc, prod) != 0) { 1514 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1515 VGE_CHAIN_RESET(sc); 1516 vge_discard_rxbuf(sc, prod); 1517 continue; 1518 } 1519 1520 /* Chain received mbufs. */ 1521 if (sc->vge_cdata.vge_head != NULL) { 1522 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1523 /* 1524 * Special case: if there's 4 bytes or less 1525 * in this buffer, the mbuf can be discarded: 1526 * the last 4 bytes is the CRC, which we don't 1527 * care about anyway. 1528 */ 1529 if (m->m_len <= ETHER_CRC_LEN) { 1530 sc->vge_cdata.vge_tail->m_len -= 1531 (ETHER_CRC_LEN - m->m_len); 1532 m_freem(m); 1533 } else { 1534 m->m_len -= ETHER_CRC_LEN; 1535 m->m_flags &= ~M_PKTHDR; 1536 sc->vge_cdata.vge_tail->m_next = m; 1537 } 1538 m = sc->vge_cdata.vge_head; 1539 m->m_flags |= M_PKTHDR; 1540 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1541 } else { 1542 m->m_flags |= M_PKTHDR; 1543 m->m_pkthdr.len = m->m_len = 1544 (total_len - ETHER_CRC_LEN); 1545 } 1546 1547 #ifndef __NO_STRICT_ALIGNMENT 1548 vge_fixup_rx(m); 1549 #endif 1550 m->m_pkthdr.rcvif = ifp; 1551 1552 /* Do RX checksumming if enabled */ 1553 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1554 (rxctl & VGE_RDCTL_FRAG) == 0) { 1555 /* Check IP header checksum */ 1556 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1557 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1558 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1559 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1560 1561 /* Check TCP/UDP checksum */ 1562 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1563 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1564 m->m_pkthdr.csum_flags |= 1565 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1566 m->m_pkthdr.csum_data = 0xffff; 1567 } 1568 } 1569 1570 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1571 /* 1572 * The 32-bit rxctl register is stored in little-endian. 1573 * However, the 16-bit vlan tag is stored in big-endian, 1574 * so we have to byte swap it. 1575 */ 1576 m->m_pkthdr.ether_vtag = 1577 bswap16(rxctl & VGE_RDCTL_VLANID); 1578 m->m_flags |= M_VLANTAG; 1579 } 1580 1581 VGE_UNLOCK(sc); 1582 (*ifp->if_input)(ifp, m); 1583 VGE_LOCK(sc); 1584 sc->vge_cdata.vge_head = NULL; 1585 sc->vge_cdata.vge_tail = NULL; 1586 } 1587 1588 if (prog > 0) { 1589 sc->vge_cdata.vge_rx_prodidx = prod; 1590 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1591 sc->vge_cdata.vge_rx_ring_map, 1592 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1593 /* Update residue counter. */ 1594 if (sc->vge_cdata.vge_rx_commit != 0) { 1595 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1596 sc->vge_cdata.vge_rx_commit); 1597 sc->vge_cdata.vge_rx_commit = 0; 1598 } 1599 } 1600 return (prog); 1601 } 1602 1603 static void 1604 vge_txeof(struct vge_softc *sc) 1605 { 1606 struct ifnet *ifp; 1607 struct vge_tx_desc *cur_tx; 1608 struct vge_txdesc *txd; 1609 uint32_t txstat; 1610 int cons, prod; 1611 1612 VGE_LOCK_ASSERT(sc); 1613 1614 ifp = sc->vge_ifp; 1615 1616 if (sc->vge_cdata.vge_tx_cnt == 0) 1617 return; 1618 1619 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1620 sc->vge_cdata.vge_tx_ring_map, 1621 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1622 1623 /* 1624 * Go through our tx list and free mbufs for those 1625 * frames that have been transmitted. 1626 */ 1627 cons = sc->vge_cdata.vge_tx_considx; 1628 prod = sc->vge_cdata.vge_tx_prodidx; 1629 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1630 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1631 txstat = le32toh(cur_tx->vge_sts); 1632 if ((txstat & VGE_TDSTS_OWN) != 0) 1633 break; 1634 sc->vge_cdata.vge_tx_cnt--; 1635 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1636 1637 txd = &sc->vge_cdata.vge_txdesc[cons]; 1638 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1639 BUS_DMASYNC_POSTWRITE); 1640 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1641 1642 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1643 __func__)); 1644 m_freem(txd->tx_m); 1645 txd->tx_m = NULL; 1646 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1647 } 1648 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1649 sc->vge_cdata.vge_tx_ring_map, 1650 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1651 sc->vge_cdata.vge_tx_considx = cons; 1652 if (sc->vge_cdata.vge_tx_cnt == 0) 1653 sc->vge_timer = 0; 1654 } 1655 1656 static void 1657 vge_link_statchg(void *xsc) 1658 { 1659 struct vge_softc *sc; 1660 struct ifnet *ifp; 1661 uint8_t physts; 1662 1663 sc = xsc; 1664 ifp = sc->vge_ifp; 1665 VGE_LOCK_ASSERT(sc); 1666 1667 physts = CSR_READ_1(sc, VGE_PHYSTS0); 1668 if ((physts & VGE_PHYSTS_RESETSTS) == 0) { 1669 if ((physts & VGE_PHYSTS_LINK) == 0) { 1670 sc->vge_flags &= ~VGE_FLAG_LINK; 1671 if_link_state_change(sc->vge_ifp, 1672 LINK_STATE_DOWN); 1673 } else { 1674 sc->vge_flags |= VGE_FLAG_LINK; 1675 if_link_state_change(sc->vge_ifp, 1676 LINK_STATE_UP); 1677 CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE | 1678 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1679 if ((physts & VGE_PHYSTS_FDX) != 0) { 1680 if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0) 1681 CSR_WRITE_1(sc, VGE_CRS2, 1682 VGE_CR2_FDX_TXFLOWCTL_ENABLE); 1683 if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0) 1684 CSR_WRITE_1(sc, VGE_CRS2, 1685 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1686 } 1687 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1688 vge_start_locked(ifp); 1689 } 1690 } 1691 /* 1692 * Restart MII auto-polling because link state change interrupt 1693 * will disable it. 1694 */ 1695 vge_miipoll_start(sc); 1696 } 1697 1698 #ifdef DEVICE_POLLING 1699 static int 1700 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1701 { 1702 struct vge_softc *sc = ifp->if_softc; 1703 int rx_npkts = 0; 1704 1705 VGE_LOCK(sc); 1706 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1707 goto done; 1708 1709 rx_npkts = vge_rxeof(sc, count); 1710 vge_txeof(sc); 1711 1712 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1713 vge_start_locked(ifp); 1714 1715 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1716 uint32_t status; 1717 status = CSR_READ_4(sc, VGE_ISR); 1718 if (status == 0xFFFFFFFF) 1719 goto done; 1720 if (status) 1721 CSR_WRITE_4(sc, VGE_ISR, status); 1722 1723 /* 1724 * XXX check behaviour on receiver stalls. 1725 */ 1726 1727 if (status & VGE_ISR_TXDMA_STALL || 1728 status & VGE_ISR_RXDMA_STALL) { 1729 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1730 vge_init_locked(sc); 1731 } 1732 1733 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1734 vge_rxeof(sc, count); 1735 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1736 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1737 } 1738 } 1739 done: 1740 VGE_UNLOCK(sc); 1741 return (rx_npkts); 1742 } 1743 #endif /* DEVICE_POLLING */ 1744 1745 static void 1746 vge_intr(void *arg) 1747 { 1748 struct vge_softc *sc; 1749 struct ifnet *ifp; 1750 uint32_t status; 1751 1752 sc = arg; 1753 VGE_LOCK(sc); 1754 1755 ifp = sc->vge_ifp; 1756 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1757 (ifp->if_flags & IFF_UP) == 0) { 1758 VGE_UNLOCK(sc); 1759 return; 1760 } 1761 1762 #ifdef DEVICE_POLLING 1763 if (ifp->if_capenable & IFCAP_POLLING) { 1764 status = CSR_READ_4(sc, VGE_ISR); 1765 CSR_WRITE_4(sc, VGE_ISR, status); 1766 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0) 1767 vge_link_statchg(sc); 1768 VGE_UNLOCK(sc); 1769 return; 1770 } 1771 #endif 1772 1773 /* Disable interrupts */ 1774 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1775 status = CSR_READ_4(sc, VGE_ISR); 1776 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD); 1777 /* If the card has gone away the read returns 0xffff. */ 1778 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0) 1779 goto done; 1780 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1781 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1782 vge_rxeof(sc, VGE_RX_DESC_CNT); 1783 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1784 vge_rxeof(sc, VGE_RX_DESC_CNT); 1785 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1786 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1787 } 1788 1789 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO)) 1790 vge_txeof(sc); 1791 1792 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1793 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1794 vge_init_locked(sc); 1795 } 1796 1797 if (status & VGE_ISR_LINKSTS) 1798 vge_link_statchg(sc); 1799 } 1800 done: 1801 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1802 /* Re-enable interrupts */ 1803 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1804 1805 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1806 vge_start_locked(ifp); 1807 } 1808 VGE_UNLOCK(sc); 1809 } 1810 1811 static int 1812 vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1813 { 1814 struct vge_txdesc *txd; 1815 struct vge_tx_frag *frag; 1816 struct mbuf *m; 1817 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1818 int error, i, nsegs, padlen; 1819 uint32_t cflags; 1820 1821 VGE_LOCK_ASSERT(sc); 1822 1823 M_ASSERTPKTHDR((*m_head)); 1824 1825 /* Argh. This chip does not autopad short frames. */ 1826 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1827 m = *m_head; 1828 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1829 if (M_WRITABLE(m) == 0) { 1830 /* Get a writable copy. */ 1831 m = m_dup(*m_head, M_NOWAIT); 1832 m_freem(*m_head); 1833 if (m == NULL) { 1834 *m_head = NULL; 1835 return (ENOBUFS); 1836 } 1837 *m_head = m; 1838 } 1839 if (M_TRAILINGSPACE(m) < padlen) { 1840 m = m_defrag(m, M_NOWAIT); 1841 if (m == NULL) { 1842 m_freem(*m_head); 1843 *m_head = NULL; 1844 return (ENOBUFS); 1845 } 1846 } 1847 /* 1848 * Manually pad short frames, and zero the pad space 1849 * to avoid leaking data. 1850 */ 1851 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1852 m->m_pkthdr.len += padlen; 1853 m->m_len = m->m_pkthdr.len; 1854 *m_head = m; 1855 } 1856 1857 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1858 1859 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1860 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1861 if (error == EFBIG) { 1862 m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS); 1863 if (m == NULL) { 1864 m_freem(*m_head); 1865 *m_head = NULL; 1866 return (ENOMEM); 1867 } 1868 *m_head = m; 1869 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1870 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1871 if (error != 0) { 1872 m_freem(*m_head); 1873 *m_head = NULL; 1874 return (error); 1875 } 1876 } else if (error != 0) 1877 return (error); 1878 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1879 BUS_DMASYNC_PREWRITE); 1880 1881 m = *m_head; 1882 cflags = 0; 1883 1884 /* Configure checksum offload. */ 1885 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1886 cflags |= VGE_TDCTL_IPCSUM; 1887 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1888 cflags |= VGE_TDCTL_TCPCSUM; 1889 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1890 cflags |= VGE_TDCTL_UDPCSUM; 1891 1892 /* Configure VLAN. */ 1893 if ((m->m_flags & M_VLANTAG) != 0) 1894 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1895 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1896 /* 1897 * XXX 1898 * Velocity family seems to support TSO but no information 1899 * for MSS configuration is available. Also the number of 1900 * fragments supported by a descriptor is too small to hold 1901 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1902 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1903 * longer chain of buffers but no additional information is 1904 * available. 1905 * 1906 * When telling the chip how many segments there are, we 1907 * must use nsegs + 1 instead of just nsegs. Darned if I 1908 * know why. This also means we can't use the last fragment 1909 * field of Tx descriptor. 1910 */ 1911 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1912 VGE_TD_LS_NORM); 1913 for (i = 0; i < nsegs; i++) { 1914 frag = &txd->tx_desc->vge_frag[i]; 1915 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1916 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1917 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1918 } 1919 1920 sc->vge_cdata.vge_tx_cnt++; 1921 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1922 1923 /* 1924 * Finally request interrupt and give the first descriptor 1925 * ownership to hardware. 1926 */ 1927 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1928 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1929 txd->tx_m = m; 1930 1931 return (0); 1932 } 1933 1934 /* 1935 * Main transmit routine. 1936 */ 1937 1938 static void 1939 vge_start(struct ifnet *ifp) 1940 { 1941 struct vge_softc *sc; 1942 1943 sc = ifp->if_softc; 1944 VGE_LOCK(sc); 1945 vge_start_locked(ifp); 1946 VGE_UNLOCK(sc); 1947 } 1948 1949 1950 static void 1951 vge_start_locked(struct ifnet *ifp) 1952 { 1953 struct vge_softc *sc; 1954 struct vge_txdesc *txd; 1955 struct mbuf *m_head; 1956 int enq, idx; 1957 1958 sc = ifp->if_softc; 1959 1960 VGE_LOCK_ASSERT(sc); 1961 1962 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1963 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1964 IFF_DRV_RUNNING) 1965 return; 1966 1967 idx = sc->vge_cdata.vge_tx_prodidx; 1968 VGE_TX_DESC_DEC(idx); 1969 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1970 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1971 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1972 if (m_head == NULL) 1973 break; 1974 /* 1975 * Pack the data into the transmit ring. If we 1976 * don't have room, set the OACTIVE flag and wait 1977 * for the NIC to drain the ring. 1978 */ 1979 if (vge_encap(sc, &m_head)) { 1980 if (m_head == NULL) 1981 break; 1982 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1983 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1984 break; 1985 } 1986 1987 txd = &sc->vge_cdata.vge_txdesc[idx]; 1988 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1989 VGE_TX_DESC_INC(idx); 1990 1991 enq++; 1992 /* 1993 * If there's a BPF listener, bounce a copy of this frame 1994 * to him. 1995 */ 1996 ETHER_BPF_MTAP(ifp, m_head); 1997 } 1998 1999 if (enq > 0) { 2000 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 2001 sc->vge_cdata.vge_tx_ring_map, 2002 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2003 /* Issue a transmit command. */ 2004 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 2005 /* 2006 * Set a timeout in case the chip goes out to lunch. 2007 */ 2008 sc->vge_timer = 5; 2009 } 2010 } 2011 2012 static void 2013 vge_init(void *xsc) 2014 { 2015 struct vge_softc *sc = xsc; 2016 2017 VGE_LOCK(sc); 2018 vge_init_locked(sc); 2019 VGE_UNLOCK(sc); 2020 } 2021 2022 static void 2023 vge_init_locked(struct vge_softc *sc) 2024 { 2025 struct ifnet *ifp = sc->vge_ifp; 2026 int error, i; 2027 2028 VGE_LOCK_ASSERT(sc); 2029 2030 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2031 return; 2032 2033 /* 2034 * Cancel pending I/O and free all RX/TX buffers. 2035 */ 2036 vge_stop(sc); 2037 vge_reset(sc); 2038 vge_miipoll_start(sc); 2039 2040 /* 2041 * Initialize the RX and TX descriptors and mbufs. 2042 */ 2043 2044 error = vge_rx_list_init(sc); 2045 if (error != 0) { 2046 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2047 return; 2048 } 2049 vge_tx_list_init(sc); 2050 /* Clear MAC statistics. */ 2051 vge_stats_clear(sc); 2052 /* Set our station address */ 2053 for (i = 0; i < ETHER_ADDR_LEN; i++) 2054 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 2055 2056 /* 2057 * Set receive FIFO threshold. Also allow transmission and 2058 * reception of VLAN tagged frames. 2059 */ 2060 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2061 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2062 2063 /* Set DMA burst length */ 2064 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2065 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2066 2067 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2068 2069 /* Set collision backoff algorithm */ 2070 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2071 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2072 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2073 2074 /* Disable LPSEL field in priority resolution */ 2075 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2076 2077 /* 2078 * Load the addresses of the DMA queues into the chip. 2079 * Note that we only use one transmit queue. 2080 */ 2081 2082 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2083 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2084 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2085 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2086 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2087 2088 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2089 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2090 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2091 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2092 2093 /* Configure interrupt moderation. */ 2094 vge_intr_holdoff(sc); 2095 2096 /* Enable and wake up the RX descriptor queue */ 2097 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2098 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2099 2100 /* Enable the TX descriptor queue */ 2101 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2102 2103 /* Init the cam filter. */ 2104 vge_cam_clear(sc); 2105 2106 /* Set up receiver filter. */ 2107 vge_rxfilter(sc); 2108 vge_setvlan(sc); 2109 2110 /* Initialize pause timer. */ 2111 CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF); 2112 /* 2113 * Initialize flow control parameters. 2114 * TX XON high threshold : 48 2115 * TX pause low threshold : 24 2116 * Disable hald-duplex flow control 2117 */ 2118 CSR_WRITE_1(sc, VGE_CRC2, 0xFF); 2119 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B); 2120 2121 /* Enable jumbo frame reception (if desired) */ 2122 2123 /* Start the MAC. */ 2124 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2125 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2126 CSR_WRITE_1(sc, VGE_CRS0, 2127 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2128 2129 #ifdef DEVICE_POLLING 2130 /* 2131 * Disable interrupts except link state change if we are polling. 2132 */ 2133 if (ifp->if_capenable & IFCAP_POLLING) { 2134 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2135 } else /* otherwise ... */ 2136 #endif 2137 { 2138 /* 2139 * Enable interrupts. 2140 */ 2141 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2142 } 2143 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2144 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2145 2146 sc->vge_flags &= ~VGE_FLAG_LINK; 2147 vge_ifmedia_upd_locked(sc); 2148 2149 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2150 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2151 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2152 } 2153 2154 /* 2155 * Set media options. 2156 */ 2157 static int 2158 vge_ifmedia_upd(struct ifnet *ifp) 2159 { 2160 struct vge_softc *sc; 2161 int error; 2162 2163 sc = ifp->if_softc; 2164 VGE_LOCK(sc); 2165 error = vge_ifmedia_upd_locked(sc); 2166 VGE_UNLOCK(sc); 2167 2168 return (error); 2169 } 2170 2171 static int 2172 vge_ifmedia_upd_locked(struct vge_softc *sc) 2173 { 2174 struct mii_data *mii; 2175 struct mii_softc *miisc; 2176 int error; 2177 2178 mii = device_get_softc(sc->vge_miibus); 2179 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2180 PHY_RESET(miisc); 2181 vge_setmedia(sc); 2182 error = mii_mediachg(mii); 2183 2184 return (error); 2185 } 2186 2187 /* 2188 * Report current media status. 2189 */ 2190 static void 2191 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2192 { 2193 struct vge_softc *sc; 2194 struct mii_data *mii; 2195 2196 sc = ifp->if_softc; 2197 mii = device_get_softc(sc->vge_miibus); 2198 2199 VGE_LOCK(sc); 2200 if ((ifp->if_flags & IFF_UP) == 0) { 2201 VGE_UNLOCK(sc); 2202 return; 2203 } 2204 mii_pollstat(mii); 2205 ifmr->ifm_active = mii->mii_media_active; 2206 ifmr->ifm_status = mii->mii_media_status; 2207 VGE_UNLOCK(sc); 2208 } 2209 2210 static void 2211 vge_setmedia(struct vge_softc *sc) 2212 { 2213 struct mii_data *mii; 2214 struct ifmedia_entry *ife; 2215 2216 mii = device_get_softc(sc->vge_miibus); 2217 ife = mii->mii_media.ifm_cur; 2218 2219 /* 2220 * If the user manually selects a media mode, we need to turn 2221 * on the forced MAC mode bit in the DIAGCTL register. If the 2222 * user happens to choose a full duplex mode, we also need to 2223 * set the 'force full duplex' bit. This applies only to 2224 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2225 * mode is disabled, and in 1000baseT mode, full duplex is 2226 * always implied, so we turn on the forced mode bit but leave 2227 * the FDX bit cleared. 2228 */ 2229 2230 switch (IFM_SUBTYPE(ife->ifm_media)) { 2231 case IFM_AUTO: 2232 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2233 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2234 break; 2235 case IFM_1000_T: 2236 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2237 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2238 break; 2239 case IFM_100_TX: 2240 case IFM_10_T: 2241 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2242 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2243 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2244 } else { 2245 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2246 } 2247 break; 2248 default: 2249 device_printf(sc->vge_dev, "unknown media type: %x\n", 2250 IFM_SUBTYPE(ife->ifm_media)); 2251 break; 2252 } 2253 } 2254 2255 static int 2256 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2257 { 2258 struct vge_softc *sc = ifp->if_softc; 2259 struct ifreq *ifr = (struct ifreq *) data; 2260 struct mii_data *mii; 2261 int error = 0, mask; 2262 2263 switch (command) { 2264 case SIOCSIFMTU: 2265 VGE_LOCK(sc); 2266 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU) 2267 error = EINVAL; 2268 else if (ifp->if_mtu != ifr->ifr_mtu) { 2269 if (ifr->ifr_mtu > ETHERMTU && 2270 (sc->vge_flags & VGE_FLAG_JUMBO) == 0) 2271 error = EINVAL; 2272 else 2273 ifp->if_mtu = ifr->ifr_mtu; 2274 } 2275 VGE_UNLOCK(sc); 2276 break; 2277 case SIOCSIFFLAGS: 2278 VGE_LOCK(sc); 2279 if ((ifp->if_flags & IFF_UP) != 0) { 2280 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2281 ((ifp->if_flags ^ sc->vge_if_flags) & 2282 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2283 vge_rxfilter(sc); 2284 else 2285 vge_init_locked(sc); 2286 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2287 vge_stop(sc); 2288 sc->vge_if_flags = ifp->if_flags; 2289 VGE_UNLOCK(sc); 2290 break; 2291 case SIOCADDMULTI: 2292 case SIOCDELMULTI: 2293 VGE_LOCK(sc); 2294 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2295 vge_rxfilter(sc); 2296 VGE_UNLOCK(sc); 2297 break; 2298 case SIOCGIFMEDIA: 2299 case SIOCSIFMEDIA: 2300 mii = device_get_softc(sc->vge_miibus); 2301 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2302 break; 2303 case SIOCSIFCAP: 2304 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2305 #ifdef DEVICE_POLLING 2306 if (mask & IFCAP_POLLING) { 2307 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2308 error = ether_poll_register(vge_poll, ifp); 2309 if (error) 2310 return (error); 2311 VGE_LOCK(sc); 2312 /* Disable interrupts */ 2313 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2314 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2315 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2316 ifp->if_capenable |= IFCAP_POLLING; 2317 VGE_UNLOCK(sc); 2318 } else { 2319 error = ether_poll_deregister(ifp); 2320 /* Enable interrupts. */ 2321 VGE_LOCK(sc); 2322 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2323 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2324 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2325 ifp->if_capenable &= ~IFCAP_POLLING; 2326 VGE_UNLOCK(sc); 2327 } 2328 } 2329 #endif /* DEVICE_POLLING */ 2330 VGE_LOCK(sc); 2331 if ((mask & IFCAP_TXCSUM) != 0 && 2332 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2333 ifp->if_capenable ^= IFCAP_TXCSUM; 2334 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2335 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2336 else 2337 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2338 } 2339 if ((mask & IFCAP_RXCSUM) != 0 && 2340 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2341 ifp->if_capenable ^= IFCAP_RXCSUM; 2342 if ((mask & IFCAP_WOL_UCAST) != 0 && 2343 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2344 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2345 if ((mask & IFCAP_WOL_MCAST) != 0 && 2346 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2347 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2348 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2349 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2350 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2351 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2352 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2353 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2354 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2355 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2356 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2357 vge_setvlan(sc); 2358 } 2359 VGE_UNLOCK(sc); 2360 VLAN_CAPABILITIES(ifp); 2361 break; 2362 default: 2363 error = ether_ioctl(ifp, command, data); 2364 break; 2365 } 2366 2367 return (error); 2368 } 2369 2370 static void 2371 vge_watchdog(void *arg) 2372 { 2373 struct vge_softc *sc; 2374 struct ifnet *ifp; 2375 2376 sc = arg; 2377 VGE_LOCK_ASSERT(sc); 2378 vge_stats_update(sc); 2379 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2380 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2381 return; 2382 2383 ifp = sc->vge_ifp; 2384 if_printf(ifp, "watchdog timeout\n"); 2385 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2386 2387 vge_txeof(sc); 2388 vge_rxeof(sc, VGE_RX_DESC_CNT); 2389 2390 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2391 vge_init_locked(sc); 2392 } 2393 2394 /* 2395 * Stop the adapter and free any mbufs allocated to the 2396 * RX and TX lists. 2397 */ 2398 static void 2399 vge_stop(struct vge_softc *sc) 2400 { 2401 struct ifnet *ifp; 2402 2403 VGE_LOCK_ASSERT(sc); 2404 ifp = sc->vge_ifp; 2405 sc->vge_timer = 0; 2406 callout_stop(&sc->vge_watchdog); 2407 2408 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2409 2410 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2411 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2412 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2413 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2414 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2415 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2416 2417 vge_stats_update(sc); 2418 VGE_CHAIN_RESET(sc); 2419 vge_txeof(sc); 2420 vge_freebufs(sc); 2421 } 2422 2423 /* 2424 * Device suspend routine. Stop the interface and save some PCI 2425 * settings in case the BIOS doesn't restore them properly on 2426 * resume. 2427 */ 2428 static int 2429 vge_suspend(device_t dev) 2430 { 2431 struct vge_softc *sc; 2432 2433 sc = device_get_softc(dev); 2434 2435 VGE_LOCK(sc); 2436 vge_stop(sc); 2437 vge_setwol(sc); 2438 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2439 VGE_UNLOCK(sc); 2440 2441 return (0); 2442 } 2443 2444 /* 2445 * Device resume routine. Restore some PCI settings in case the BIOS 2446 * doesn't, re-enable busmastering, and restart the interface if 2447 * appropriate. 2448 */ 2449 static int 2450 vge_resume(device_t dev) 2451 { 2452 struct vge_softc *sc; 2453 struct ifnet *ifp; 2454 uint16_t pmstat; 2455 2456 sc = device_get_softc(dev); 2457 VGE_LOCK(sc); 2458 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) { 2459 /* Disable PME and clear PME status. */ 2460 pmstat = pci_read_config(sc->vge_dev, 2461 sc->vge_pmcap + PCIR_POWER_STATUS, 2); 2462 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2463 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2464 pci_write_config(sc->vge_dev, 2465 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2466 } 2467 } 2468 vge_clrwol(sc); 2469 /* Restart MII auto-polling. */ 2470 vge_miipoll_start(sc); 2471 ifp = sc->vge_ifp; 2472 /* Reinitialize interface if necessary. */ 2473 if ((ifp->if_flags & IFF_UP) != 0) { 2474 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2475 vge_init_locked(sc); 2476 } 2477 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2478 VGE_UNLOCK(sc); 2479 2480 return (0); 2481 } 2482 2483 /* 2484 * Stop all chip I/O so that the kernel's probe routines don't 2485 * get confused by errant DMAs when rebooting. 2486 */ 2487 static int 2488 vge_shutdown(device_t dev) 2489 { 2490 2491 return (vge_suspend(dev)); 2492 } 2493 2494 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2495 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2496 2497 static void 2498 vge_sysctl_node(struct vge_softc *sc) 2499 { 2500 struct sysctl_ctx_list *ctx; 2501 struct sysctl_oid_list *child, *parent; 2502 struct sysctl_oid *tree; 2503 struct vge_hw_stats *stats; 2504 2505 stats = &sc->vge_stats; 2506 ctx = device_get_sysctl_ctx(sc->vge_dev); 2507 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2508 2509 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff", 2510 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff"); 2511 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt", 2512 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet"); 2513 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt", 2514 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet"); 2515 2516 /* Pull in device tunables. */ 2517 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT; 2518 resource_int_value(device_get_name(sc->vge_dev), 2519 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff); 2520 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT; 2521 resource_int_value(device_get_name(sc->vge_dev), 2522 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt); 2523 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT; 2524 resource_int_value(device_get_name(sc->vge_dev), 2525 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt); 2526 2527 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2528 NULL, "VGE statistics"); 2529 parent = SYSCTL_CHILDREN(tree); 2530 2531 /* Rx statistics. */ 2532 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2533 NULL, "RX MAC statistics"); 2534 child = SYSCTL_CHILDREN(tree); 2535 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2536 &stats->rx_frames, "frames"); 2537 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2538 &stats->rx_good_frames, "Good frames"); 2539 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2540 &stats->rx_fifo_oflows, "FIFO overflows"); 2541 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2542 &stats->rx_runts, "Too short frames"); 2543 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2544 &stats->rx_runts_errs, "Too short frames with errors"); 2545 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2546 &stats->rx_pkts_64, "64 bytes frames"); 2547 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2548 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2549 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2550 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2551 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2552 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2553 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2554 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2555 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2556 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2557 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2558 &stats->rx_pkts_1519_max, "1519 to max frames"); 2559 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2560 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2561 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2562 &stats->rx_jumbos, "Jumbo frames"); 2563 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2564 &stats->rx_crcerrs, "CRC errors"); 2565 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2566 &stats->rx_pause_frames, "CRC errors"); 2567 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2568 &stats->rx_alignerrs, "Alignment errors"); 2569 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2570 &stats->rx_nobufs, "Frames with no buffer event"); 2571 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2572 &stats->rx_symerrs, "Frames with symbol errors"); 2573 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2574 &stats->rx_lenerrs, "Frames with length mismatched"); 2575 2576 /* Tx statistics. */ 2577 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2578 NULL, "TX MAC statistics"); 2579 child = SYSCTL_CHILDREN(tree); 2580 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2581 &stats->tx_good_frames, "Good frames"); 2582 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2583 &stats->tx_pkts_64, "64 bytes frames"); 2584 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2585 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2586 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2587 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2588 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2589 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2590 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2591 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2592 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2593 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2594 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2595 &stats->tx_jumbos, "Jumbo frames"); 2596 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2597 &stats->tx_colls, "Collisions"); 2598 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2599 &stats->tx_latecolls, "Late collisions"); 2600 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2601 &stats->tx_pause, "Pause frames"); 2602 #ifdef VGE_ENABLE_SQEERR 2603 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2604 &stats->tx_sqeerrs, "SQE errors"); 2605 #endif 2606 /* Clear MAC statistics. */ 2607 vge_stats_clear(sc); 2608 } 2609 2610 #undef VGE_SYSCTL_STAT_ADD32 2611 2612 static void 2613 vge_stats_clear(struct vge_softc *sc) 2614 { 2615 int i; 2616 2617 CSR_WRITE_1(sc, VGE_MIBCSR, 2618 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2619 CSR_WRITE_1(sc, VGE_MIBCSR, 2620 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2621 for (i = VGE_TIMEOUT; i > 0; i--) { 2622 DELAY(1); 2623 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2624 break; 2625 } 2626 if (i == 0) 2627 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2628 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2629 ~VGE_MIBCSR_FREEZE); 2630 } 2631 2632 static void 2633 vge_stats_update(struct vge_softc *sc) 2634 { 2635 struct vge_hw_stats *stats; 2636 struct ifnet *ifp; 2637 uint32_t mib[VGE_MIB_CNT], val; 2638 int i; 2639 2640 VGE_LOCK_ASSERT(sc); 2641 2642 stats = &sc->vge_stats; 2643 ifp = sc->vge_ifp; 2644 2645 CSR_WRITE_1(sc, VGE_MIBCSR, 2646 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2647 for (i = VGE_TIMEOUT; i > 0; i--) { 2648 DELAY(1); 2649 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2650 break; 2651 } 2652 if (i == 0) { 2653 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2654 vge_stats_clear(sc); 2655 return; 2656 } 2657 2658 bzero(mib, sizeof(mib)); 2659 reset_idx: 2660 /* Set MIB read index to 0. */ 2661 CSR_WRITE_1(sc, VGE_MIBCSR, 2662 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2663 for (i = 0; i < VGE_MIB_CNT; i++) { 2664 val = CSR_READ_4(sc, VGE_MIBDATA); 2665 if (i != VGE_MIB_DATA_IDX(val)) { 2666 /* Reading interrupted. */ 2667 goto reset_idx; 2668 } 2669 mib[i] = val & VGE_MIB_DATA_MASK; 2670 } 2671 2672 /* Rx stats. */ 2673 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2674 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2675 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2676 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2677 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2678 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2679 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2680 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2681 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2682 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2683 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2684 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2685 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2686 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2687 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2688 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2689 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2690 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2691 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2692 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2693 2694 /* Tx stats. */ 2695 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2696 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2697 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2698 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2699 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2700 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2701 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2702 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2703 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2704 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2705 #ifdef VGE_ENABLE_SQEERR 2706 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2707 #endif 2708 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2709 2710 /* Update counters in ifnet. */ 2711 if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]); 2712 2713 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 2714 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2715 2716 if_inc_counter(ifp, IFCOUNTER_OERRORS, 2717 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2718 2719 if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]); 2720 2721 if_inc_counter(ifp, IFCOUNTER_IERRORS, 2722 mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2723 mib[VGE_MIB_RX_RUNTS] + 2724 mib[VGE_MIB_RX_RUNTS_ERRS] + 2725 mib[VGE_MIB_RX_CRCERRS] + 2726 mib[VGE_MIB_RX_ALIGNERRS] + 2727 mib[VGE_MIB_RX_NOBUFS] + 2728 mib[VGE_MIB_RX_SYMERRS] + 2729 mib[VGE_MIB_RX_LENERRS]); 2730 } 2731 2732 static void 2733 vge_intr_holdoff(struct vge_softc *sc) 2734 { 2735 uint8_t intctl; 2736 2737 VGE_LOCK_ASSERT(sc); 2738 2739 /* 2740 * Set Tx interrupt supression threshold. 2741 * It's possible to use single-shot timer in VGE_CRS1 register 2742 * in Tx path such that driver can remove most of Tx completion 2743 * interrupts. However this requires additional access to 2744 * VGE_CRS1 register to reload the timer in addintion to 2745 * activating Tx kick command. Another downside is we don't know 2746 * what single-shot timer value should be used in advance so 2747 * reclaiming transmitted mbufs could be delayed a lot which in 2748 * turn slows down Tx operation. 2749 */ 2750 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR); 2751 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt); 2752 2753 /* Set Rx interrupt suppresion threshold. */ 2754 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2755 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt); 2756 2757 intctl = CSR_READ_1(sc, VGE_INTCTL1); 2758 intctl &= ~VGE_INTCTL_SC_RELOAD; 2759 intctl |= VGE_INTCTL_HC_RELOAD; 2760 if (sc->vge_tx_coal_pkt <= 0) 2761 intctl |= VGE_INTCTL_TXINTSUP_DISABLE; 2762 else 2763 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE; 2764 if (sc->vge_rx_coal_pkt <= 0) 2765 intctl |= VGE_INTCTL_RXINTSUP_DISABLE; 2766 else 2767 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE; 2768 CSR_WRITE_1(sc, VGE_INTCTL1, intctl); 2769 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF); 2770 if (sc->vge_int_holdoff > 0) { 2771 /* Set interrupt holdoff timer. */ 2772 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2773 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 2774 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff)); 2775 /* Enable holdoff timer. */ 2776 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2777 } 2778 } 2779 2780 static void 2781 vge_setlinkspeed(struct vge_softc *sc) 2782 { 2783 struct mii_data *mii; 2784 int aneg, i; 2785 2786 VGE_LOCK_ASSERT(sc); 2787 2788 mii = device_get_softc(sc->vge_miibus); 2789 mii_pollstat(mii); 2790 aneg = 0; 2791 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2792 (IFM_ACTIVE | IFM_AVALID)) { 2793 switch IFM_SUBTYPE(mii->mii_media_active) { 2794 case IFM_10_T: 2795 case IFM_100_TX: 2796 return; 2797 case IFM_1000_T: 2798 aneg++; 2799 default: 2800 break; 2801 } 2802 } 2803 /* Clear forced MAC speed/duplex configuration. */ 2804 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2805 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2806 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0); 2807 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR, 2808 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2809 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2810 BMCR_AUTOEN | BMCR_STARTNEG); 2811 DELAY(1000); 2812 if (aneg != 0) { 2813 /* Poll link state until vge(4) get a 10/100 link. */ 2814 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2815 mii_pollstat(mii); 2816 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2817 == (IFM_ACTIVE | IFM_AVALID)) { 2818 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2819 case IFM_10_T: 2820 case IFM_100_TX: 2821 return; 2822 default: 2823 break; 2824 } 2825 } 2826 VGE_UNLOCK(sc); 2827 pause("vgelnk", hz); 2828 VGE_LOCK(sc); 2829 } 2830 if (i == MII_ANEGTICKS_GIGE) 2831 device_printf(sc->vge_dev, "establishing link failed, " 2832 "WOL may not work!"); 2833 } 2834 /* 2835 * No link, force MAC to have 100Mbps, full-duplex link. 2836 * This is the last resort and may/may not work. 2837 */ 2838 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2839 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2840 } 2841 2842 static void 2843 vge_setwol(struct vge_softc *sc) 2844 { 2845 struct ifnet *ifp; 2846 uint16_t pmstat; 2847 uint8_t val; 2848 2849 VGE_LOCK_ASSERT(sc); 2850 2851 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) { 2852 /* No PME capability, PHY power down. */ 2853 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2854 BMCR_PDOWN); 2855 vge_miipoll_stop(sc); 2856 return; 2857 } 2858 2859 ifp = sc->vge_ifp; 2860 2861 /* Clear WOL on pattern match. */ 2862 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2863 /* Disable WOL on magic/unicast packet. */ 2864 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2865 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2866 VGE_WOLCFG_PMEOVR); 2867 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2868 vge_setlinkspeed(sc); 2869 val = 0; 2870 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2871 val |= VGE_WOLCR1_UCAST; 2872 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2873 val |= VGE_WOLCR1_MAGIC; 2874 CSR_WRITE_1(sc, VGE_WOLCR1S, val); 2875 val = 0; 2876 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2877 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB; 2878 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR); 2879 /* Disable MII auto-polling. */ 2880 vge_miipoll_stop(sc); 2881 } 2882 CSR_SETBIT_1(sc, VGE_DIAGCTL, 2883 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE); 2884 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2885 2886 /* Clear WOL status on pattern match. */ 2887 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2888 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2889 2890 val = CSR_READ_1(sc, VGE_PWRSTAT); 2891 val |= VGE_STICKHW_SWPTAG; 2892 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2893 /* Put hardware into sleep. */ 2894 val = CSR_READ_1(sc, VGE_PWRSTAT); 2895 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1; 2896 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2897 /* Request PME if WOL is requested. */ 2898 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap + 2899 PCIR_POWER_STATUS, 2); 2900 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2901 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2902 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2903 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS, 2904 pmstat, 2); 2905 } 2906 2907 static void 2908 vge_clrwol(struct vge_softc *sc) 2909 { 2910 uint8_t val; 2911 2912 val = CSR_READ_1(sc, VGE_PWRSTAT); 2913 val &= ~VGE_STICKHW_SWPTAG; 2914 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2915 /* Disable WOL and clear power state indicator. */ 2916 val = CSR_READ_1(sc, VGE_PWRSTAT); 2917 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); 2918 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2919 2920 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2921 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2922 2923 /* Clear WOL on pattern match. */ 2924 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2925 /* Disable WOL on magic/unicast packet. */ 2926 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2927 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2928 VGE_WOLCFG_PMEOVR); 2929 /* Clear WOL status on pattern match. */ 2930 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2931 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2932 } 2933