1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /* 39 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 40 * 41 * Written by Bill Paul <wpaul@windriver.com> 42 * Senior Networking Software Engineer 43 * Wind River Systems 44 */ 45 46 /* 47 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 48 * combines a tri-speed ethernet MAC and PHY, with the following 49 * features: 50 * 51 * o Jumbo frame support up to 16K 52 * o Transmit and receive flow control 53 * o IPv4 checksum offload 54 * o VLAN tag insertion and stripping 55 * o TCP large send 56 * o 64-bit multicast hash table filter 57 * o 64 entry CAM filter 58 * o 16K RX FIFO and 48K TX FIFO memory 59 * o Interrupt moderation 60 * 61 * The VT6122 supports up to four transmit DMA queues. The descriptors 62 * in the transmit ring can address up to 7 data fragments; frames which 63 * span more than 7 data buffers must be coalesced, but in general the 64 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 65 * long. The receive descriptors address only a single buffer. 66 * 67 * There are two peculiar design issues with the VT6122. One is that 68 * receive data buffers must be aligned on a 32-bit boundary. This is 69 * not a problem where the VT6122 is used as a LOM device in x86-based 70 * systems, but on architectures that generate unaligned access traps, we 71 * have to do some copying. 72 * 73 * The other issue has to do with the way 64-bit addresses are handled. 74 * The DMA descriptors only allow you to specify 48 bits of addressing 75 * information. The remaining 16 bits are specified using one of the 76 * I/O registers. If you only have a 32-bit system, then this isn't 77 * an issue, but if you have a 64-bit system and more than 4GB of 78 * memory, you must have to make sure your network data buffers reside 79 * in the same 48-bit 'segment.' 80 * 81 * Special thanks to Ryan Fu at VIA Networking for providing documentation 82 * and sample NICs for testing. 83 */ 84 85 #ifdef HAVE_KERNEL_OPTION_HEADERS 86 #include "opt_device_polling.h" 87 #endif 88 89 #include <sys/param.h> 90 #include <sys/endian.h> 91 #include <sys/systm.h> 92 #include <sys/sockio.h> 93 #include <sys/mbuf.h> 94 #include <sys/malloc.h> 95 #include <sys/module.h> 96 #include <sys/kernel.h> 97 #include <sys/socket.h> 98 #include <sys/sysctl.h> 99 100 #include <net/if.h> 101 #include <net/if_arp.h> 102 #include <net/ethernet.h> 103 #include <net/if_dl.h> 104 #include <net/if_var.h> 105 #include <net/if_media.h> 106 #include <net/if_types.h> 107 #include <net/if_vlan_var.h> 108 109 #include <net/bpf.h> 110 111 #include <machine/bus.h> 112 #include <machine/resource.h> 113 #include <sys/bus.h> 114 #include <sys/rman.h> 115 116 #include <dev/mii/mii.h> 117 #include <dev/mii/miivar.h> 118 119 #include <dev/pci/pcireg.h> 120 #include <dev/pci/pcivar.h> 121 122 MODULE_DEPEND(vge, pci, 1, 1, 1); 123 MODULE_DEPEND(vge, ether, 1, 1, 1); 124 MODULE_DEPEND(vge, miibus, 1, 1, 1); 125 126 /* "device miibus" required. See GENERIC if you get errors here. */ 127 #include "miibus_if.h" 128 129 #include <dev/vge/if_vgereg.h> 130 #include <dev/vge/if_vgevar.h> 131 132 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 133 134 /* Tunables */ 135 static int msi_disable = 0; 136 TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 137 138 /* 139 * The SQE error counter of MIB seems to report bogus value. 140 * Vendor's workaround does not seem to work on PCIe based 141 * controllers. Disable it until we find better workaround. 142 */ 143 #undef VGE_ENABLE_SQEERR 144 145 /* 146 * Various supported device vendors/types and their names. 147 */ 148 static struct vge_type vge_devs[] = { 149 { VIA_VENDORID, VIA_DEVICEID_61XX, 150 "VIA Networking Velocity Gigabit Ethernet" }, 151 { 0, 0, NULL } 152 }; 153 154 static int vge_attach(device_t); 155 static int vge_detach(device_t); 156 static int vge_probe(device_t); 157 static int vge_resume(device_t); 158 static int vge_shutdown(device_t); 159 static int vge_suspend(device_t); 160 161 static void vge_cam_clear(struct vge_softc *); 162 static int vge_cam_set(struct vge_softc *, uint8_t *); 163 static void vge_clrwol(struct vge_softc *); 164 static void vge_discard_rxbuf(struct vge_softc *, int); 165 static int vge_dma_alloc(struct vge_softc *); 166 static void vge_dma_free(struct vge_softc *); 167 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 168 #ifdef VGE_EEPROM 169 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 170 #endif 171 static int vge_encap(struct vge_softc *, struct mbuf **); 172 #ifndef __NO_STRICT_ALIGNMENT 173 static __inline void 174 vge_fixup_rx(struct mbuf *); 175 #endif 176 static void vge_freebufs(struct vge_softc *); 177 static void vge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 178 static int vge_ifmedia_upd(struct ifnet *); 179 static int vge_ifmedia_upd_locked(struct vge_softc *); 180 static void vge_init(void *); 181 static void vge_init_locked(struct vge_softc *); 182 static void vge_intr(void *); 183 static void vge_intr_holdoff(struct vge_softc *); 184 static int vge_ioctl(struct ifnet *, u_long, caddr_t); 185 static void vge_link_statchg(void *); 186 static int vge_miibus_readreg(device_t, int, int); 187 static int vge_miibus_writereg(device_t, int, int, int); 188 static void vge_miipoll_start(struct vge_softc *); 189 static void vge_miipoll_stop(struct vge_softc *); 190 static int vge_newbuf(struct vge_softc *, int); 191 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 192 static void vge_reset(struct vge_softc *); 193 static int vge_rx_list_init(struct vge_softc *); 194 static int vge_rxeof(struct vge_softc *, int); 195 static void vge_rxfilter(struct vge_softc *); 196 static void vge_setmedia(struct vge_softc *); 197 static void vge_setvlan(struct vge_softc *); 198 static void vge_setwol(struct vge_softc *); 199 static void vge_start(struct ifnet *); 200 static void vge_start_locked(struct ifnet *); 201 static void vge_stats_clear(struct vge_softc *); 202 static void vge_stats_update(struct vge_softc *); 203 static void vge_stop(struct vge_softc *); 204 static void vge_sysctl_node(struct vge_softc *); 205 static int vge_tx_list_init(struct vge_softc *); 206 static void vge_txeof(struct vge_softc *); 207 static void vge_watchdog(void *); 208 209 static device_method_t vge_methods[] = { 210 /* Device interface */ 211 DEVMETHOD(device_probe, vge_probe), 212 DEVMETHOD(device_attach, vge_attach), 213 DEVMETHOD(device_detach, vge_detach), 214 DEVMETHOD(device_suspend, vge_suspend), 215 DEVMETHOD(device_resume, vge_resume), 216 DEVMETHOD(device_shutdown, vge_shutdown), 217 218 /* MII interface */ 219 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 220 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 221 222 DEVMETHOD_END 223 }; 224 225 static driver_t vge_driver = { 226 "vge", 227 vge_methods, 228 sizeof(struct vge_softc) 229 }; 230 231 static devclass_t vge_devclass; 232 233 DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); 234 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); 235 236 #ifdef VGE_EEPROM 237 /* 238 * Read a word of data stored in the EEPROM at address 'addr.' 239 */ 240 static void 241 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 242 { 243 int i; 244 uint16_t word = 0; 245 246 /* 247 * Enter EEPROM embedded programming mode. In order to 248 * access the EEPROM at all, we first have to set the 249 * EELOAD bit in the CHIPCFG2 register. 250 */ 251 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 252 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 253 254 /* Select the address of the word we want to read */ 255 CSR_WRITE_1(sc, VGE_EEADDR, addr); 256 257 /* Issue read command */ 258 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 259 260 /* Wait for the done bit to be set. */ 261 for (i = 0; i < VGE_TIMEOUT; i++) { 262 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 263 break; 264 } 265 266 if (i == VGE_TIMEOUT) { 267 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 268 *dest = 0; 269 return; 270 } 271 272 /* Read the result */ 273 word = CSR_READ_2(sc, VGE_EERDDAT); 274 275 /* Turn off EEPROM access mode. */ 276 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 277 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 278 279 *dest = word; 280 } 281 #endif 282 283 /* 284 * Read a sequence of words from the EEPROM. 285 */ 286 static void 287 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 288 { 289 int i; 290 #ifdef VGE_EEPROM 291 uint16_t word = 0, *ptr; 292 293 for (i = 0; i < cnt; i++) { 294 vge_eeprom_getword(sc, off + i, &word); 295 ptr = (uint16_t *)(dest + (i * 2)); 296 if (swap) 297 *ptr = ntohs(word); 298 else 299 *ptr = word; 300 } 301 #else 302 for (i = 0; i < ETHER_ADDR_LEN; i++) 303 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 304 #endif 305 } 306 307 static void 308 vge_miipoll_stop(struct vge_softc *sc) 309 { 310 int i; 311 312 CSR_WRITE_1(sc, VGE_MIICMD, 0); 313 314 for (i = 0; i < VGE_TIMEOUT; i++) { 315 DELAY(1); 316 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 317 break; 318 } 319 320 if (i == VGE_TIMEOUT) 321 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 322 } 323 324 static void 325 vge_miipoll_start(struct vge_softc *sc) 326 { 327 int i; 328 329 /* First, make sure we're idle. */ 330 331 CSR_WRITE_1(sc, VGE_MIICMD, 0); 332 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 333 334 for (i = 0; i < VGE_TIMEOUT; i++) { 335 DELAY(1); 336 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 337 break; 338 } 339 340 if (i == VGE_TIMEOUT) { 341 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 342 return; 343 } 344 345 /* Now enable auto poll mode. */ 346 347 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 348 349 /* And make sure it started. */ 350 351 for (i = 0; i < VGE_TIMEOUT; i++) { 352 DELAY(1); 353 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 354 break; 355 } 356 357 if (i == VGE_TIMEOUT) 358 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 359 } 360 361 static int 362 vge_miibus_readreg(device_t dev, int phy, int reg) 363 { 364 struct vge_softc *sc; 365 int i; 366 uint16_t rval = 0; 367 368 sc = device_get_softc(dev); 369 370 vge_miipoll_stop(sc); 371 372 /* Specify the register we want to read. */ 373 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 374 375 /* Issue read command. */ 376 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 377 378 /* Wait for the read command bit to self-clear. */ 379 for (i = 0; i < VGE_TIMEOUT; i++) { 380 DELAY(1); 381 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 382 break; 383 } 384 385 if (i == VGE_TIMEOUT) 386 device_printf(sc->vge_dev, "MII read timed out\n"); 387 else 388 rval = CSR_READ_2(sc, VGE_MIIDATA); 389 390 vge_miipoll_start(sc); 391 392 return (rval); 393 } 394 395 static int 396 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 397 { 398 struct vge_softc *sc; 399 int i, rval = 0; 400 401 sc = device_get_softc(dev); 402 403 vge_miipoll_stop(sc); 404 405 /* Specify the register we want to write. */ 406 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 407 408 /* Specify the data we want to write. */ 409 CSR_WRITE_2(sc, VGE_MIIDATA, data); 410 411 /* Issue write command. */ 412 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 413 414 /* Wait for the write command bit to self-clear. */ 415 for (i = 0; i < VGE_TIMEOUT; i++) { 416 DELAY(1); 417 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 418 break; 419 } 420 421 if (i == VGE_TIMEOUT) { 422 device_printf(sc->vge_dev, "MII write timed out\n"); 423 rval = EIO; 424 } 425 426 vge_miipoll_start(sc); 427 428 return (rval); 429 } 430 431 static void 432 vge_cam_clear(struct vge_softc *sc) 433 { 434 int i; 435 436 /* 437 * Turn off all the mask bits. This tells the chip 438 * that none of the entries in the CAM filter are valid. 439 * desired entries will be enabled as we fill the filter in. 440 */ 441 442 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 443 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 444 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 445 for (i = 0; i < 8; i++) 446 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 447 448 /* Clear the VLAN filter too. */ 449 450 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 451 for (i = 0; i < 8; i++) 452 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 453 454 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 455 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 456 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 457 458 sc->vge_camidx = 0; 459 } 460 461 static int 462 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 463 { 464 int i, error = 0; 465 466 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 467 return (ENOSPC); 468 469 /* Select the CAM data page. */ 470 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 471 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 472 473 /* Set the filter entry we want to update and enable writing. */ 474 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 475 476 /* Write the address to the CAM registers */ 477 for (i = 0; i < ETHER_ADDR_LEN; i++) 478 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 479 480 /* Issue a write command. */ 481 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 482 483 /* Wake for it to clear. */ 484 for (i = 0; i < VGE_TIMEOUT; i++) { 485 DELAY(1); 486 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 487 break; 488 } 489 490 if (i == VGE_TIMEOUT) { 491 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 492 error = EIO; 493 goto fail; 494 } 495 496 /* Select the CAM mask page. */ 497 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 498 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 499 500 /* Set the mask bit that enables this filter. */ 501 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 502 1<<(sc->vge_camidx & 7)); 503 504 sc->vge_camidx++; 505 506 fail: 507 /* Turn off access to CAM. */ 508 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 509 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 510 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 511 512 return (error); 513 } 514 515 static void 516 vge_setvlan(struct vge_softc *sc) 517 { 518 struct ifnet *ifp; 519 uint8_t cfg; 520 521 VGE_LOCK_ASSERT(sc); 522 523 ifp = sc->vge_ifp; 524 cfg = CSR_READ_1(sc, VGE_RXCFG); 525 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 526 cfg |= VGE_VTAG_OPT2; 527 else 528 cfg &= ~VGE_VTAG_OPT2; 529 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 530 } 531 532 static u_int 533 vge_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 534 { 535 struct vge_softc *sc = arg; 536 537 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 538 return (0); 539 540 (void )vge_cam_set(sc, LLADDR(sdl)); 541 542 return (1); 543 } 544 545 static u_int 546 vge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 547 { 548 uint32_t h, *hashes = arg; 549 550 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 551 if (h < 32) 552 hashes[0] |= (1 << h); 553 else 554 hashes[1] |= (1 << (h - 32)); 555 556 return (1); 557 } 558 559 560 /* 561 * Program the multicast filter. We use the 64-entry CAM filter 562 * for perfect filtering. If there's more than 64 multicast addresses, 563 * we use the hash filter instead. 564 */ 565 static void 566 vge_rxfilter(struct vge_softc *sc) 567 { 568 struct ifnet *ifp; 569 uint32_t hashes[2]; 570 uint8_t rxcfg; 571 572 VGE_LOCK_ASSERT(sc); 573 574 /* First, zot all the multicast entries. */ 575 hashes[0] = 0; 576 hashes[1] = 0; 577 578 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 579 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 580 VGE_RXCTL_RX_PROMISC); 581 /* 582 * Always allow VLAN oversized frames and frames for 583 * this host. 584 */ 585 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 586 587 ifp = sc->vge_ifp; 588 if ((ifp->if_flags & IFF_BROADCAST) != 0) 589 rxcfg |= VGE_RXCTL_RX_BCAST; 590 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 591 if ((ifp->if_flags & IFF_PROMISC) != 0) 592 rxcfg |= VGE_RXCTL_RX_PROMISC; 593 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 594 hashes[0] = 0xFFFFFFFF; 595 hashes[1] = 0xFFFFFFFF; 596 } 597 goto done; 598 } 599 600 vge_cam_clear(sc); 601 602 /* Now program new ones */ 603 if_foreach_llmaddr(ifp, vge_set_maddr, sc); 604 605 /* If there were too many addresses, use the hash filter. */ 606 if (sc->vge_camidx == VGE_CAM_MAXADDRS) { 607 vge_cam_clear(sc); 608 if_foreach_llmaddr(ifp, vge_hash_maddr, hashes); 609 } 610 611 done: 612 if (hashes[0] != 0 || hashes[1] != 0) 613 rxcfg |= VGE_RXCTL_RX_MCAST; 614 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 615 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 616 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 617 } 618 619 static void 620 vge_reset(struct vge_softc *sc) 621 { 622 int i; 623 624 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 625 626 for (i = 0; i < VGE_TIMEOUT; i++) { 627 DELAY(5); 628 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 629 break; 630 } 631 632 if (i == VGE_TIMEOUT) { 633 device_printf(sc->vge_dev, "soft reset timed out\n"); 634 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 635 DELAY(2000); 636 } 637 638 DELAY(5000); 639 } 640 641 /* 642 * Probe for a VIA gigabit chip. Check the PCI vendor and device 643 * IDs against our list and return a device name if we find a match. 644 */ 645 static int 646 vge_probe(device_t dev) 647 { 648 struct vge_type *t; 649 650 t = vge_devs; 651 652 while (t->vge_name != NULL) { 653 if ((pci_get_vendor(dev) == t->vge_vid) && 654 (pci_get_device(dev) == t->vge_did)) { 655 device_set_desc(dev, t->vge_name); 656 return (BUS_PROBE_DEFAULT); 657 } 658 t++; 659 } 660 661 return (ENXIO); 662 } 663 664 /* 665 * Map a single buffer address. 666 */ 667 668 struct vge_dmamap_arg { 669 bus_addr_t vge_busaddr; 670 }; 671 672 static void 673 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 674 { 675 struct vge_dmamap_arg *ctx; 676 677 if (error != 0) 678 return; 679 680 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 681 682 ctx = (struct vge_dmamap_arg *)arg; 683 ctx->vge_busaddr = segs[0].ds_addr; 684 } 685 686 static int 687 vge_dma_alloc(struct vge_softc *sc) 688 { 689 struct vge_dmamap_arg ctx; 690 struct vge_txdesc *txd; 691 struct vge_rxdesc *rxd; 692 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 693 int error, i; 694 695 /* 696 * It seems old PCI controllers do not support DAC. DAC 697 * configuration can be enabled by accessing VGE_CHIPCFG3 698 * register but honor EEPROM configuration instead of 699 * blindly overriding DAC configuration. PCIe based 700 * controllers are supposed to support 64bit DMA so enable 701 * 64bit DMA on these controllers. 702 */ 703 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 704 lowaddr = BUS_SPACE_MAXADDR; 705 else 706 lowaddr = BUS_SPACE_MAXADDR_32BIT; 707 708 again: 709 /* Create parent ring tag. */ 710 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 711 1, 0, /* algnmnt, boundary */ 712 lowaddr, /* lowaddr */ 713 BUS_SPACE_MAXADDR, /* highaddr */ 714 NULL, NULL, /* filter, filterarg */ 715 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 716 0, /* nsegments */ 717 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 718 0, /* flags */ 719 NULL, NULL, /* lockfunc, lockarg */ 720 &sc->vge_cdata.vge_ring_tag); 721 if (error != 0) { 722 device_printf(sc->vge_dev, 723 "could not create parent DMA tag.\n"); 724 goto fail; 725 } 726 727 /* Create tag for Tx ring. */ 728 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 729 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 730 BUS_SPACE_MAXADDR, /* lowaddr */ 731 BUS_SPACE_MAXADDR, /* highaddr */ 732 NULL, NULL, /* filter, filterarg */ 733 VGE_TX_LIST_SZ, /* maxsize */ 734 1, /* nsegments */ 735 VGE_TX_LIST_SZ, /* maxsegsize */ 736 0, /* flags */ 737 NULL, NULL, /* lockfunc, lockarg */ 738 &sc->vge_cdata.vge_tx_ring_tag); 739 if (error != 0) { 740 device_printf(sc->vge_dev, 741 "could not allocate Tx ring DMA tag.\n"); 742 goto fail; 743 } 744 745 /* Create tag for Rx ring. */ 746 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 747 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 748 BUS_SPACE_MAXADDR, /* lowaddr */ 749 BUS_SPACE_MAXADDR, /* highaddr */ 750 NULL, NULL, /* filter, filterarg */ 751 VGE_RX_LIST_SZ, /* maxsize */ 752 1, /* nsegments */ 753 VGE_RX_LIST_SZ, /* maxsegsize */ 754 0, /* flags */ 755 NULL, NULL, /* lockfunc, lockarg */ 756 &sc->vge_cdata.vge_rx_ring_tag); 757 if (error != 0) { 758 device_printf(sc->vge_dev, 759 "could not allocate Rx ring DMA tag.\n"); 760 goto fail; 761 } 762 763 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 764 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 765 (void **)&sc->vge_rdata.vge_tx_ring, 766 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 767 &sc->vge_cdata.vge_tx_ring_map); 768 if (error != 0) { 769 device_printf(sc->vge_dev, 770 "could not allocate DMA'able memory for Tx ring.\n"); 771 goto fail; 772 } 773 774 ctx.vge_busaddr = 0; 775 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 776 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 777 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 778 if (error != 0 || ctx.vge_busaddr == 0) { 779 device_printf(sc->vge_dev, 780 "could not load DMA'able memory for Tx ring.\n"); 781 goto fail; 782 } 783 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 784 785 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 786 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 787 (void **)&sc->vge_rdata.vge_rx_ring, 788 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 789 &sc->vge_cdata.vge_rx_ring_map); 790 if (error != 0) { 791 device_printf(sc->vge_dev, 792 "could not allocate DMA'able memory for Rx ring.\n"); 793 goto fail; 794 } 795 796 ctx.vge_busaddr = 0; 797 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 798 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 799 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 800 if (error != 0 || ctx.vge_busaddr == 0) { 801 device_printf(sc->vge_dev, 802 "could not load DMA'able memory for Rx ring.\n"); 803 goto fail; 804 } 805 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 806 807 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 808 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 809 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 810 if ((VGE_ADDR_HI(tx_ring_end) != 811 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 812 (VGE_ADDR_HI(rx_ring_end) != 813 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 814 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 815 device_printf(sc->vge_dev, "4GB boundary crossed, " 816 "switching to 32bit DMA address mode.\n"); 817 vge_dma_free(sc); 818 /* Limit DMA address space to 32bit and try again. */ 819 lowaddr = BUS_SPACE_MAXADDR_32BIT; 820 goto again; 821 } 822 823 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 824 lowaddr = VGE_BUF_DMA_MAXADDR; 825 else 826 lowaddr = BUS_SPACE_MAXADDR_32BIT; 827 /* Create parent buffer tag. */ 828 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 829 1, 0, /* algnmnt, boundary */ 830 lowaddr, /* lowaddr */ 831 BUS_SPACE_MAXADDR, /* highaddr */ 832 NULL, NULL, /* filter, filterarg */ 833 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 834 0, /* nsegments */ 835 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 836 0, /* flags */ 837 NULL, NULL, /* lockfunc, lockarg */ 838 &sc->vge_cdata.vge_buffer_tag); 839 if (error != 0) { 840 device_printf(sc->vge_dev, 841 "could not create parent buffer DMA tag.\n"); 842 goto fail; 843 } 844 845 /* Create tag for Tx buffers. */ 846 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 847 1, 0, /* algnmnt, boundary */ 848 BUS_SPACE_MAXADDR, /* lowaddr */ 849 BUS_SPACE_MAXADDR, /* highaddr */ 850 NULL, NULL, /* filter, filterarg */ 851 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 852 VGE_MAXTXSEGS, /* nsegments */ 853 MCLBYTES, /* maxsegsize */ 854 0, /* flags */ 855 NULL, NULL, /* lockfunc, lockarg */ 856 &sc->vge_cdata.vge_tx_tag); 857 if (error != 0) { 858 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 859 goto fail; 860 } 861 862 /* Create tag for Rx buffers. */ 863 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 864 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 865 BUS_SPACE_MAXADDR, /* lowaddr */ 866 BUS_SPACE_MAXADDR, /* highaddr */ 867 NULL, NULL, /* filter, filterarg */ 868 MCLBYTES, /* maxsize */ 869 1, /* nsegments */ 870 MCLBYTES, /* maxsegsize */ 871 0, /* flags */ 872 NULL, NULL, /* lockfunc, lockarg */ 873 &sc->vge_cdata.vge_rx_tag); 874 if (error != 0) { 875 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 876 goto fail; 877 } 878 879 /* Create DMA maps for Tx buffers. */ 880 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 881 txd = &sc->vge_cdata.vge_txdesc[i]; 882 txd->tx_m = NULL; 883 txd->tx_dmamap = NULL; 884 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 885 &txd->tx_dmamap); 886 if (error != 0) { 887 device_printf(sc->vge_dev, 888 "could not create Tx dmamap.\n"); 889 goto fail; 890 } 891 } 892 /* Create DMA maps for Rx buffers. */ 893 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 894 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 895 device_printf(sc->vge_dev, 896 "could not create spare Rx dmamap.\n"); 897 goto fail; 898 } 899 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 900 rxd = &sc->vge_cdata.vge_rxdesc[i]; 901 rxd->rx_m = NULL; 902 rxd->rx_dmamap = NULL; 903 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 904 &rxd->rx_dmamap); 905 if (error != 0) { 906 device_printf(sc->vge_dev, 907 "could not create Rx dmamap.\n"); 908 goto fail; 909 } 910 } 911 912 fail: 913 return (error); 914 } 915 916 static void 917 vge_dma_free(struct vge_softc *sc) 918 { 919 struct vge_txdesc *txd; 920 struct vge_rxdesc *rxd; 921 int i; 922 923 /* Tx ring. */ 924 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 925 if (sc->vge_rdata.vge_tx_ring_paddr) 926 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 927 sc->vge_cdata.vge_tx_ring_map); 928 if (sc->vge_rdata.vge_tx_ring) 929 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 930 sc->vge_rdata.vge_tx_ring, 931 sc->vge_cdata.vge_tx_ring_map); 932 sc->vge_rdata.vge_tx_ring = NULL; 933 sc->vge_rdata.vge_tx_ring_paddr = 0; 934 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 935 sc->vge_cdata.vge_tx_ring_tag = NULL; 936 } 937 /* Rx ring. */ 938 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 939 if (sc->vge_rdata.vge_rx_ring_paddr) 940 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 941 sc->vge_cdata.vge_rx_ring_map); 942 if (sc->vge_rdata.vge_rx_ring) 943 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 944 sc->vge_rdata.vge_rx_ring, 945 sc->vge_cdata.vge_rx_ring_map); 946 sc->vge_rdata.vge_rx_ring = NULL; 947 sc->vge_rdata.vge_rx_ring_paddr = 0; 948 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 949 sc->vge_cdata.vge_rx_ring_tag = NULL; 950 } 951 /* Tx buffers. */ 952 if (sc->vge_cdata.vge_tx_tag != NULL) { 953 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 954 txd = &sc->vge_cdata.vge_txdesc[i]; 955 if (txd->tx_dmamap != NULL) { 956 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 957 txd->tx_dmamap); 958 txd->tx_dmamap = NULL; 959 } 960 } 961 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 962 sc->vge_cdata.vge_tx_tag = NULL; 963 } 964 /* Rx buffers. */ 965 if (sc->vge_cdata.vge_rx_tag != NULL) { 966 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 967 rxd = &sc->vge_cdata.vge_rxdesc[i]; 968 if (rxd->rx_dmamap != NULL) { 969 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 970 rxd->rx_dmamap); 971 rxd->rx_dmamap = NULL; 972 } 973 } 974 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 975 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 976 sc->vge_cdata.vge_rx_sparemap); 977 sc->vge_cdata.vge_rx_sparemap = NULL; 978 } 979 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 980 sc->vge_cdata.vge_rx_tag = NULL; 981 } 982 983 if (sc->vge_cdata.vge_buffer_tag != NULL) { 984 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 985 sc->vge_cdata.vge_buffer_tag = NULL; 986 } 987 if (sc->vge_cdata.vge_ring_tag != NULL) { 988 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 989 sc->vge_cdata.vge_ring_tag = NULL; 990 } 991 } 992 993 /* 994 * Attach the interface. Allocate softc structures, do ifmedia 995 * setup and ethernet/BPF attach. 996 */ 997 static int 998 vge_attach(device_t dev) 999 { 1000 u_char eaddr[ETHER_ADDR_LEN]; 1001 struct vge_softc *sc; 1002 struct ifnet *ifp; 1003 int error = 0, cap, i, msic, rid; 1004 1005 sc = device_get_softc(dev); 1006 sc->vge_dev = dev; 1007 1008 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1009 MTX_DEF); 1010 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 1011 1012 /* 1013 * Map control/status registers. 1014 */ 1015 pci_enable_busmaster(dev); 1016 1017 rid = PCIR_BAR(1); 1018 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1019 RF_ACTIVE); 1020 1021 if (sc->vge_res == NULL) { 1022 device_printf(dev, "couldn't map ports/memory\n"); 1023 error = ENXIO; 1024 goto fail; 1025 } 1026 1027 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 1028 sc->vge_flags |= VGE_FLAG_PCIE; 1029 sc->vge_expcap = cap; 1030 } else 1031 sc->vge_flags |= VGE_FLAG_JUMBO; 1032 if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) { 1033 sc->vge_flags |= VGE_FLAG_PMCAP; 1034 sc->vge_pmcap = cap; 1035 } 1036 rid = 0; 1037 msic = pci_msi_count(dev); 1038 if (msi_disable == 0 && msic > 0) { 1039 msic = 1; 1040 if (pci_alloc_msi(dev, &msic) == 0) { 1041 if (msic == 1) { 1042 sc->vge_flags |= VGE_FLAG_MSI; 1043 device_printf(dev, "Using %d MSI message\n", 1044 msic); 1045 rid = 1; 1046 } else 1047 pci_release_msi(dev); 1048 } 1049 } 1050 1051 /* Allocate interrupt */ 1052 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1053 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1054 if (sc->vge_irq == NULL) { 1055 device_printf(dev, "couldn't map interrupt\n"); 1056 error = ENXIO; 1057 goto fail; 1058 } 1059 1060 /* Reset the adapter. */ 1061 vge_reset(sc); 1062 /* Reload EEPROM. */ 1063 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1064 for (i = 0; i < VGE_TIMEOUT; i++) { 1065 DELAY(5); 1066 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1067 break; 1068 } 1069 if (i == VGE_TIMEOUT) 1070 device_printf(dev, "EEPROM reload timed out\n"); 1071 /* 1072 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1073 * MAC will receive magic packet which in turn confuses 1074 * controller. 1075 */ 1076 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1077 1078 /* 1079 * Get station address from the EEPROM. 1080 */ 1081 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1082 /* 1083 * Save configured PHY address. 1084 * It seems the PHY address of PCIe controllers just 1085 * reflects media jump strapping status so we assume the 1086 * internal PHY address of PCIe controller is at 1. 1087 */ 1088 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1089 sc->vge_phyaddr = 1; 1090 else 1091 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1092 VGE_MIICFG_PHYADDR; 1093 /* Clear WOL and take hardware from powerdown. */ 1094 vge_clrwol(sc); 1095 vge_sysctl_node(sc); 1096 error = vge_dma_alloc(sc); 1097 if (error) 1098 goto fail; 1099 1100 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1101 if (ifp == NULL) { 1102 device_printf(dev, "can not if_alloc()\n"); 1103 error = ENOSPC; 1104 goto fail; 1105 } 1106 1107 vge_miipoll_start(sc); 1108 /* Do MII setup */ 1109 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd, 1110 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY, 1111 MIIF_DOPAUSE); 1112 if (error != 0) { 1113 device_printf(dev, "attaching PHYs failed\n"); 1114 goto fail; 1115 } 1116 1117 ifp->if_softc = sc; 1118 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1119 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1120 ifp->if_ioctl = vge_ioctl; 1121 ifp->if_capabilities = IFCAP_VLAN_MTU; 1122 ifp->if_start = vge_start; 1123 ifp->if_hwassist = VGE_CSUM_FEATURES; 1124 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1125 IFCAP_VLAN_HWTAGGING; 1126 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) 1127 ifp->if_capabilities |= IFCAP_WOL; 1128 ifp->if_capenable = ifp->if_capabilities; 1129 #ifdef DEVICE_POLLING 1130 ifp->if_capabilities |= IFCAP_POLLING; 1131 #endif 1132 ifp->if_init = vge_init; 1133 IFQ_SET_MAXLEN(&ifp->if_snd, VGE_TX_DESC_CNT - 1); 1134 ifp->if_snd.ifq_drv_maxlen = VGE_TX_DESC_CNT - 1; 1135 IFQ_SET_READY(&ifp->if_snd); 1136 1137 /* 1138 * Call MI attach routine. 1139 */ 1140 ether_ifattach(ifp, eaddr); 1141 1142 /* Tell the upper layer(s) we support long frames. */ 1143 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1144 1145 /* Hook interrupt last to avoid having to lock softc */ 1146 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1147 NULL, vge_intr, sc, &sc->vge_intrhand); 1148 1149 if (error) { 1150 device_printf(dev, "couldn't set up irq\n"); 1151 ether_ifdetach(ifp); 1152 goto fail; 1153 } 1154 1155 fail: 1156 if (error) 1157 vge_detach(dev); 1158 1159 return (error); 1160 } 1161 1162 /* 1163 * Shutdown hardware and free up resources. This can be called any 1164 * time after the mutex has been initialized. It is called in both 1165 * the error case in attach and the normal detach case so it needs 1166 * to be careful about only freeing resources that have actually been 1167 * allocated. 1168 */ 1169 static int 1170 vge_detach(device_t dev) 1171 { 1172 struct vge_softc *sc; 1173 struct ifnet *ifp; 1174 1175 sc = device_get_softc(dev); 1176 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1177 ifp = sc->vge_ifp; 1178 1179 #ifdef DEVICE_POLLING 1180 if (ifp->if_capenable & IFCAP_POLLING) 1181 ether_poll_deregister(ifp); 1182 #endif 1183 1184 /* These should only be active if attach succeeded */ 1185 if (device_is_attached(dev)) { 1186 ether_ifdetach(ifp); 1187 VGE_LOCK(sc); 1188 vge_stop(sc); 1189 VGE_UNLOCK(sc); 1190 callout_drain(&sc->vge_watchdog); 1191 } 1192 if (sc->vge_miibus) 1193 device_delete_child(dev, sc->vge_miibus); 1194 bus_generic_detach(dev); 1195 1196 if (sc->vge_intrhand) 1197 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1198 if (sc->vge_irq) 1199 bus_release_resource(dev, SYS_RES_IRQ, 1200 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1201 if (sc->vge_flags & VGE_FLAG_MSI) 1202 pci_release_msi(dev); 1203 if (sc->vge_res) 1204 bus_release_resource(dev, SYS_RES_MEMORY, 1205 PCIR_BAR(1), sc->vge_res); 1206 if (ifp) 1207 if_free(ifp); 1208 1209 vge_dma_free(sc); 1210 mtx_destroy(&sc->vge_mtx); 1211 1212 return (0); 1213 } 1214 1215 static void 1216 vge_discard_rxbuf(struct vge_softc *sc, int prod) 1217 { 1218 struct vge_rxdesc *rxd; 1219 int i; 1220 1221 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1222 rxd->rx_desc->vge_sts = 0; 1223 rxd->rx_desc->vge_ctl = 0; 1224 1225 /* 1226 * Note: the manual fails to document the fact that for 1227 * proper opration, the driver needs to replentish the RX 1228 * DMA ring 4 descriptors at a time (rather than one at a 1229 * time, like most chips). We can allocate the new buffers 1230 * but we should not set the OWN bits until we're ready 1231 * to hand back 4 of them in one shot. 1232 */ 1233 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1234 for (i = VGE_RXCHUNK; i > 0; i--) { 1235 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1236 rxd = rxd->rxd_prev; 1237 } 1238 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1239 } 1240 } 1241 1242 static int 1243 vge_newbuf(struct vge_softc *sc, int prod) 1244 { 1245 struct vge_rxdesc *rxd; 1246 struct mbuf *m; 1247 bus_dma_segment_t segs[1]; 1248 bus_dmamap_t map; 1249 int i, nsegs; 1250 1251 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1252 if (m == NULL) 1253 return (ENOBUFS); 1254 /* 1255 * This is part of an evil trick to deal with strict-alignment 1256 * architectures. The VIA chip requires RX buffers to be aligned 1257 * on 32-bit boundaries, but that will hose strict-alignment 1258 * architectures. To get around this, we leave some empty space 1259 * at the start of each buffer and for non-strict-alignment hosts, 1260 * we copy the buffer back two bytes to achieve word alignment. 1261 * This is slightly more efficient than allocating a new buffer, 1262 * copying the contents, and discarding the old buffer. 1263 */ 1264 m->m_len = m->m_pkthdr.len = MCLBYTES; 1265 m_adj(m, VGE_RX_BUF_ALIGN); 1266 1267 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1268 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1269 m_freem(m); 1270 return (ENOBUFS); 1271 } 1272 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1273 1274 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1275 if (rxd->rx_m != NULL) { 1276 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1277 BUS_DMASYNC_POSTREAD); 1278 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1279 } 1280 map = rxd->rx_dmamap; 1281 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1282 sc->vge_cdata.vge_rx_sparemap = map; 1283 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1284 BUS_DMASYNC_PREREAD); 1285 rxd->rx_m = m; 1286 1287 rxd->rx_desc->vge_sts = 0; 1288 rxd->rx_desc->vge_ctl = 0; 1289 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1290 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1291 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1292 1293 /* 1294 * Note: the manual fails to document the fact that for 1295 * proper operation, the driver needs to replenish the RX 1296 * DMA ring 4 descriptors at a time (rather than one at a 1297 * time, like most chips). We can allocate the new buffers 1298 * but we should not set the OWN bits until we're ready 1299 * to hand back 4 of them in one shot. 1300 */ 1301 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1302 for (i = VGE_RXCHUNK; i > 0; i--) { 1303 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1304 rxd = rxd->rxd_prev; 1305 } 1306 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1307 } 1308 1309 return (0); 1310 } 1311 1312 static int 1313 vge_tx_list_init(struct vge_softc *sc) 1314 { 1315 struct vge_ring_data *rd; 1316 struct vge_txdesc *txd; 1317 int i; 1318 1319 VGE_LOCK_ASSERT(sc); 1320 1321 sc->vge_cdata.vge_tx_prodidx = 0; 1322 sc->vge_cdata.vge_tx_considx = 0; 1323 sc->vge_cdata.vge_tx_cnt = 0; 1324 1325 rd = &sc->vge_rdata; 1326 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1327 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1328 txd = &sc->vge_cdata.vge_txdesc[i]; 1329 txd->tx_m = NULL; 1330 txd->tx_desc = &rd->vge_tx_ring[i]; 1331 } 1332 1333 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1334 sc->vge_cdata.vge_tx_ring_map, 1335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1336 1337 return (0); 1338 } 1339 1340 static int 1341 vge_rx_list_init(struct vge_softc *sc) 1342 { 1343 struct vge_ring_data *rd; 1344 struct vge_rxdesc *rxd; 1345 int i; 1346 1347 VGE_LOCK_ASSERT(sc); 1348 1349 sc->vge_cdata.vge_rx_prodidx = 0; 1350 sc->vge_cdata.vge_head = NULL; 1351 sc->vge_cdata.vge_tail = NULL; 1352 sc->vge_cdata.vge_rx_commit = 0; 1353 1354 rd = &sc->vge_rdata; 1355 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1356 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1357 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1358 rxd->rx_m = NULL; 1359 rxd->rx_desc = &rd->vge_rx_ring[i]; 1360 if (i == 0) 1361 rxd->rxd_prev = 1362 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1363 else 1364 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1365 if (vge_newbuf(sc, i) != 0) 1366 return (ENOBUFS); 1367 } 1368 1369 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1370 sc->vge_cdata.vge_rx_ring_map, 1371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1372 1373 sc->vge_cdata.vge_rx_commit = 0; 1374 1375 return (0); 1376 } 1377 1378 static void 1379 vge_freebufs(struct vge_softc *sc) 1380 { 1381 struct vge_txdesc *txd; 1382 struct vge_rxdesc *rxd; 1383 struct ifnet *ifp; 1384 int i; 1385 1386 VGE_LOCK_ASSERT(sc); 1387 1388 ifp = sc->vge_ifp; 1389 /* 1390 * Free RX and TX mbufs still in the queues. 1391 */ 1392 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1393 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1394 if (rxd->rx_m != NULL) { 1395 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1396 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1397 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1398 rxd->rx_dmamap); 1399 m_freem(rxd->rx_m); 1400 rxd->rx_m = NULL; 1401 } 1402 } 1403 1404 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1405 txd = &sc->vge_cdata.vge_txdesc[i]; 1406 if (txd->tx_m != NULL) { 1407 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1408 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1409 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1410 txd->tx_dmamap); 1411 m_freem(txd->tx_m); 1412 txd->tx_m = NULL; 1413 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1414 } 1415 } 1416 } 1417 1418 #ifndef __NO_STRICT_ALIGNMENT 1419 static __inline void 1420 vge_fixup_rx(struct mbuf *m) 1421 { 1422 int i; 1423 uint16_t *src, *dst; 1424 1425 src = mtod(m, uint16_t *); 1426 dst = src - 1; 1427 1428 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1429 *dst++ = *src++; 1430 1431 m->m_data -= ETHER_ALIGN; 1432 } 1433 #endif 1434 1435 /* 1436 * RX handler. We support the reception of jumbo frames that have 1437 * been fragmented across multiple 2K mbuf cluster buffers. 1438 */ 1439 static int 1440 vge_rxeof(struct vge_softc *sc, int count) 1441 { 1442 struct mbuf *m; 1443 struct ifnet *ifp; 1444 int prod, prog, total_len; 1445 struct vge_rxdesc *rxd; 1446 struct vge_rx_desc *cur_rx; 1447 uint32_t rxstat, rxctl; 1448 1449 VGE_LOCK_ASSERT(sc); 1450 1451 ifp = sc->vge_ifp; 1452 1453 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1454 sc->vge_cdata.vge_rx_ring_map, 1455 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1456 1457 prod = sc->vge_cdata.vge_rx_prodidx; 1458 for (prog = 0; count > 0 && 1459 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1460 VGE_RX_DESC_INC(prod)) { 1461 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1462 rxstat = le32toh(cur_rx->vge_sts); 1463 if ((rxstat & VGE_RDSTS_OWN) != 0) 1464 break; 1465 count--; 1466 prog++; 1467 rxctl = le32toh(cur_rx->vge_ctl); 1468 total_len = VGE_RXBYTES(rxstat); 1469 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1470 m = rxd->rx_m; 1471 1472 /* 1473 * If the 'start of frame' bit is set, this indicates 1474 * either the first fragment in a multi-fragment receive, 1475 * or an intermediate fragment. Either way, we want to 1476 * accumulate the buffers. 1477 */ 1478 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1479 if (vge_newbuf(sc, prod) != 0) { 1480 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1481 VGE_CHAIN_RESET(sc); 1482 vge_discard_rxbuf(sc, prod); 1483 continue; 1484 } 1485 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1486 if (sc->vge_cdata.vge_head == NULL) { 1487 sc->vge_cdata.vge_head = m; 1488 sc->vge_cdata.vge_tail = m; 1489 } else { 1490 m->m_flags &= ~M_PKTHDR; 1491 sc->vge_cdata.vge_tail->m_next = m; 1492 sc->vge_cdata.vge_tail = m; 1493 } 1494 continue; 1495 } 1496 1497 /* 1498 * Bad/error frames will have the RXOK bit cleared. 1499 * However, there's one error case we want to allow: 1500 * if a VLAN tagged frame arrives and the chip can't 1501 * match it against the CAM filter, it considers this 1502 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1503 * We don't want to drop the frame though: our VLAN 1504 * filtering is done in software. 1505 * We also want to receive bad-checksummed frames and 1506 * and frames with bad-length. 1507 */ 1508 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1509 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1510 VGE_RDSTS_CSUMERR)) == 0) { 1511 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1512 /* 1513 * If this is part of a multi-fragment packet, 1514 * discard all the pieces. 1515 */ 1516 VGE_CHAIN_RESET(sc); 1517 vge_discard_rxbuf(sc, prod); 1518 continue; 1519 } 1520 1521 if (vge_newbuf(sc, prod) != 0) { 1522 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1523 VGE_CHAIN_RESET(sc); 1524 vge_discard_rxbuf(sc, prod); 1525 continue; 1526 } 1527 1528 /* Chain received mbufs. */ 1529 if (sc->vge_cdata.vge_head != NULL) { 1530 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1531 /* 1532 * Special case: if there's 4 bytes or less 1533 * in this buffer, the mbuf can be discarded: 1534 * the last 4 bytes is the CRC, which we don't 1535 * care about anyway. 1536 */ 1537 if (m->m_len <= ETHER_CRC_LEN) { 1538 sc->vge_cdata.vge_tail->m_len -= 1539 (ETHER_CRC_LEN - m->m_len); 1540 m_freem(m); 1541 } else { 1542 m->m_len -= ETHER_CRC_LEN; 1543 m->m_flags &= ~M_PKTHDR; 1544 sc->vge_cdata.vge_tail->m_next = m; 1545 } 1546 m = sc->vge_cdata.vge_head; 1547 m->m_flags |= M_PKTHDR; 1548 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1549 } else { 1550 m->m_flags |= M_PKTHDR; 1551 m->m_pkthdr.len = m->m_len = 1552 (total_len - ETHER_CRC_LEN); 1553 } 1554 1555 #ifndef __NO_STRICT_ALIGNMENT 1556 vge_fixup_rx(m); 1557 #endif 1558 m->m_pkthdr.rcvif = ifp; 1559 1560 /* Do RX checksumming if enabled */ 1561 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1562 (rxctl & VGE_RDCTL_FRAG) == 0) { 1563 /* Check IP header checksum */ 1564 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1565 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1566 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1567 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1568 1569 /* Check TCP/UDP checksum */ 1570 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1571 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1572 m->m_pkthdr.csum_flags |= 1573 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1574 m->m_pkthdr.csum_data = 0xffff; 1575 } 1576 } 1577 1578 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1579 /* 1580 * The 32-bit rxctl register is stored in little-endian. 1581 * However, the 16-bit vlan tag is stored in big-endian, 1582 * so we have to byte swap it. 1583 */ 1584 m->m_pkthdr.ether_vtag = 1585 bswap16(rxctl & VGE_RDCTL_VLANID); 1586 m->m_flags |= M_VLANTAG; 1587 } 1588 1589 VGE_UNLOCK(sc); 1590 (*ifp->if_input)(ifp, m); 1591 VGE_LOCK(sc); 1592 sc->vge_cdata.vge_head = NULL; 1593 sc->vge_cdata.vge_tail = NULL; 1594 } 1595 1596 if (prog > 0) { 1597 sc->vge_cdata.vge_rx_prodidx = prod; 1598 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1599 sc->vge_cdata.vge_rx_ring_map, 1600 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1601 /* Update residue counter. */ 1602 if (sc->vge_cdata.vge_rx_commit != 0) { 1603 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1604 sc->vge_cdata.vge_rx_commit); 1605 sc->vge_cdata.vge_rx_commit = 0; 1606 } 1607 } 1608 return (prog); 1609 } 1610 1611 static void 1612 vge_txeof(struct vge_softc *sc) 1613 { 1614 struct ifnet *ifp; 1615 struct vge_tx_desc *cur_tx; 1616 struct vge_txdesc *txd; 1617 uint32_t txstat; 1618 int cons, prod; 1619 1620 VGE_LOCK_ASSERT(sc); 1621 1622 ifp = sc->vge_ifp; 1623 1624 if (sc->vge_cdata.vge_tx_cnt == 0) 1625 return; 1626 1627 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1628 sc->vge_cdata.vge_tx_ring_map, 1629 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1630 1631 /* 1632 * Go through our tx list and free mbufs for those 1633 * frames that have been transmitted. 1634 */ 1635 cons = sc->vge_cdata.vge_tx_considx; 1636 prod = sc->vge_cdata.vge_tx_prodidx; 1637 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1638 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1639 txstat = le32toh(cur_tx->vge_sts); 1640 if ((txstat & VGE_TDSTS_OWN) != 0) 1641 break; 1642 sc->vge_cdata.vge_tx_cnt--; 1643 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1644 1645 txd = &sc->vge_cdata.vge_txdesc[cons]; 1646 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1647 BUS_DMASYNC_POSTWRITE); 1648 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1649 1650 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1651 __func__)); 1652 m_freem(txd->tx_m); 1653 txd->tx_m = NULL; 1654 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1655 } 1656 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1657 sc->vge_cdata.vge_tx_ring_map, 1658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1659 sc->vge_cdata.vge_tx_considx = cons; 1660 if (sc->vge_cdata.vge_tx_cnt == 0) 1661 sc->vge_timer = 0; 1662 } 1663 1664 static void 1665 vge_link_statchg(void *xsc) 1666 { 1667 struct vge_softc *sc; 1668 struct ifnet *ifp; 1669 uint8_t physts; 1670 1671 sc = xsc; 1672 ifp = sc->vge_ifp; 1673 VGE_LOCK_ASSERT(sc); 1674 1675 physts = CSR_READ_1(sc, VGE_PHYSTS0); 1676 if ((physts & VGE_PHYSTS_RESETSTS) == 0) { 1677 if ((physts & VGE_PHYSTS_LINK) == 0) { 1678 sc->vge_flags &= ~VGE_FLAG_LINK; 1679 if_link_state_change(sc->vge_ifp, 1680 LINK_STATE_DOWN); 1681 } else { 1682 sc->vge_flags |= VGE_FLAG_LINK; 1683 if_link_state_change(sc->vge_ifp, 1684 LINK_STATE_UP); 1685 CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE | 1686 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1687 if ((physts & VGE_PHYSTS_FDX) != 0) { 1688 if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0) 1689 CSR_WRITE_1(sc, VGE_CRS2, 1690 VGE_CR2_FDX_TXFLOWCTL_ENABLE); 1691 if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0) 1692 CSR_WRITE_1(sc, VGE_CRS2, 1693 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1694 } 1695 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1696 vge_start_locked(ifp); 1697 } 1698 } 1699 /* 1700 * Restart MII auto-polling because link state change interrupt 1701 * will disable it. 1702 */ 1703 vge_miipoll_start(sc); 1704 } 1705 1706 #ifdef DEVICE_POLLING 1707 static int 1708 vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) 1709 { 1710 struct vge_softc *sc = ifp->if_softc; 1711 int rx_npkts = 0; 1712 1713 VGE_LOCK(sc); 1714 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1715 goto done; 1716 1717 rx_npkts = vge_rxeof(sc, count); 1718 vge_txeof(sc); 1719 1720 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1721 vge_start_locked(ifp); 1722 1723 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1724 uint32_t status; 1725 status = CSR_READ_4(sc, VGE_ISR); 1726 if (status == 0xFFFFFFFF) 1727 goto done; 1728 if (status) 1729 CSR_WRITE_4(sc, VGE_ISR, status); 1730 1731 /* 1732 * XXX check behaviour on receiver stalls. 1733 */ 1734 1735 if (status & VGE_ISR_TXDMA_STALL || 1736 status & VGE_ISR_RXDMA_STALL) { 1737 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1738 vge_init_locked(sc); 1739 } 1740 1741 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1742 vge_rxeof(sc, count); 1743 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1744 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1745 } 1746 } 1747 done: 1748 VGE_UNLOCK(sc); 1749 return (rx_npkts); 1750 } 1751 #endif /* DEVICE_POLLING */ 1752 1753 static void 1754 vge_intr(void *arg) 1755 { 1756 struct vge_softc *sc; 1757 struct ifnet *ifp; 1758 uint32_t status; 1759 1760 sc = arg; 1761 VGE_LOCK(sc); 1762 1763 ifp = sc->vge_ifp; 1764 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1765 (ifp->if_flags & IFF_UP) == 0) { 1766 VGE_UNLOCK(sc); 1767 return; 1768 } 1769 1770 #ifdef DEVICE_POLLING 1771 if (ifp->if_capenable & IFCAP_POLLING) { 1772 status = CSR_READ_4(sc, VGE_ISR); 1773 CSR_WRITE_4(sc, VGE_ISR, status); 1774 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0) 1775 vge_link_statchg(sc); 1776 VGE_UNLOCK(sc); 1777 return; 1778 } 1779 #endif 1780 1781 /* Disable interrupts */ 1782 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1783 status = CSR_READ_4(sc, VGE_ISR); 1784 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD); 1785 /* If the card has gone away the read returns 0xffff. */ 1786 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0) 1787 goto done; 1788 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1789 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1790 vge_rxeof(sc, VGE_RX_DESC_CNT); 1791 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1792 vge_rxeof(sc, VGE_RX_DESC_CNT); 1793 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1794 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1795 } 1796 1797 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO)) 1798 vge_txeof(sc); 1799 1800 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1801 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1802 vge_init_locked(sc); 1803 } 1804 1805 if (status & VGE_ISR_LINKSTS) 1806 vge_link_statchg(sc); 1807 } 1808 done: 1809 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1810 /* Re-enable interrupts */ 1811 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1812 1813 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1814 vge_start_locked(ifp); 1815 } 1816 VGE_UNLOCK(sc); 1817 } 1818 1819 static int 1820 vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1821 { 1822 struct vge_txdesc *txd; 1823 struct vge_tx_frag *frag; 1824 struct mbuf *m; 1825 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1826 int error, i, nsegs, padlen; 1827 uint32_t cflags; 1828 1829 VGE_LOCK_ASSERT(sc); 1830 1831 M_ASSERTPKTHDR((*m_head)); 1832 1833 /* Argh. This chip does not autopad short frames. */ 1834 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1835 m = *m_head; 1836 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1837 if (M_WRITABLE(m) == 0) { 1838 /* Get a writable copy. */ 1839 m = m_dup(*m_head, M_NOWAIT); 1840 m_freem(*m_head); 1841 if (m == NULL) { 1842 *m_head = NULL; 1843 return (ENOBUFS); 1844 } 1845 *m_head = m; 1846 } 1847 if (M_TRAILINGSPACE(m) < padlen) { 1848 m = m_defrag(m, M_NOWAIT); 1849 if (m == NULL) { 1850 m_freem(*m_head); 1851 *m_head = NULL; 1852 return (ENOBUFS); 1853 } 1854 } 1855 /* 1856 * Manually pad short frames, and zero the pad space 1857 * to avoid leaking data. 1858 */ 1859 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1860 m->m_pkthdr.len += padlen; 1861 m->m_len = m->m_pkthdr.len; 1862 *m_head = m; 1863 } 1864 1865 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1866 1867 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1868 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1869 if (error == EFBIG) { 1870 m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS); 1871 if (m == NULL) { 1872 m_freem(*m_head); 1873 *m_head = NULL; 1874 return (ENOMEM); 1875 } 1876 *m_head = m; 1877 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1878 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1879 if (error != 0) { 1880 m_freem(*m_head); 1881 *m_head = NULL; 1882 return (error); 1883 } 1884 } else if (error != 0) 1885 return (error); 1886 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1887 BUS_DMASYNC_PREWRITE); 1888 1889 m = *m_head; 1890 cflags = 0; 1891 1892 /* Configure checksum offload. */ 1893 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1894 cflags |= VGE_TDCTL_IPCSUM; 1895 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1896 cflags |= VGE_TDCTL_TCPCSUM; 1897 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1898 cflags |= VGE_TDCTL_UDPCSUM; 1899 1900 /* Configure VLAN. */ 1901 if ((m->m_flags & M_VLANTAG) != 0) 1902 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1903 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1904 /* 1905 * XXX 1906 * Velocity family seems to support TSO but no information 1907 * for MSS configuration is available. Also the number of 1908 * fragments supported by a descriptor is too small to hold 1909 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1910 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1911 * longer chain of buffers but no additional information is 1912 * available. 1913 * 1914 * When telling the chip how many segments there are, we 1915 * must use nsegs + 1 instead of just nsegs. Darned if I 1916 * know why. This also means we can't use the last fragment 1917 * field of Tx descriptor. 1918 */ 1919 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1920 VGE_TD_LS_NORM); 1921 for (i = 0; i < nsegs; i++) { 1922 frag = &txd->tx_desc->vge_frag[i]; 1923 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1924 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1925 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1926 } 1927 1928 sc->vge_cdata.vge_tx_cnt++; 1929 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1930 1931 /* 1932 * Finally request interrupt and give the first descriptor 1933 * ownership to hardware. 1934 */ 1935 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1936 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1937 txd->tx_m = m; 1938 1939 return (0); 1940 } 1941 1942 /* 1943 * Main transmit routine. 1944 */ 1945 1946 static void 1947 vge_start(struct ifnet *ifp) 1948 { 1949 struct vge_softc *sc; 1950 1951 sc = ifp->if_softc; 1952 VGE_LOCK(sc); 1953 vge_start_locked(ifp); 1954 VGE_UNLOCK(sc); 1955 } 1956 1957 1958 static void 1959 vge_start_locked(struct ifnet *ifp) 1960 { 1961 struct vge_softc *sc; 1962 struct vge_txdesc *txd; 1963 struct mbuf *m_head; 1964 int enq, idx; 1965 1966 sc = ifp->if_softc; 1967 1968 VGE_LOCK_ASSERT(sc); 1969 1970 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1971 (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1972 IFF_DRV_RUNNING) 1973 return; 1974 1975 idx = sc->vge_cdata.vge_tx_prodidx; 1976 VGE_TX_DESC_DEC(idx); 1977 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1978 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1979 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1980 if (m_head == NULL) 1981 break; 1982 /* 1983 * Pack the data into the transmit ring. If we 1984 * don't have room, set the OACTIVE flag and wait 1985 * for the NIC to drain the ring. 1986 */ 1987 if (vge_encap(sc, &m_head)) { 1988 if (m_head == NULL) 1989 break; 1990 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1991 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1992 break; 1993 } 1994 1995 txd = &sc->vge_cdata.vge_txdesc[idx]; 1996 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1997 VGE_TX_DESC_INC(idx); 1998 1999 enq++; 2000 /* 2001 * If there's a BPF listener, bounce a copy of this frame 2002 * to him. 2003 */ 2004 ETHER_BPF_MTAP(ifp, m_head); 2005 } 2006 2007 if (enq > 0) { 2008 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 2009 sc->vge_cdata.vge_tx_ring_map, 2010 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2011 /* Issue a transmit command. */ 2012 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 2013 /* 2014 * Set a timeout in case the chip goes out to lunch. 2015 */ 2016 sc->vge_timer = 5; 2017 } 2018 } 2019 2020 static void 2021 vge_init(void *xsc) 2022 { 2023 struct vge_softc *sc = xsc; 2024 2025 VGE_LOCK(sc); 2026 vge_init_locked(sc); 2027 VGE_UNLOCK(sc); 2028 } 2029 2030 static void 2031 vge_init_locked(struct vge_softc *sc) 2032 { 2033 struct ifnet *ifp = sc->vge_ifp; 2034 int error, i; 2035 2036 VGE_LOCK_ASSERT(sc); 2037 2038 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2039 return; 2040 2041 /* 2042 * Cancel pending I/O and free all RX/TX buffers. 2043 */ 2044 vge_stop(sc); 2045 vge_reset(sc); 2046 vge_miipoll_start(sc); 2047 2048 /* 2049 * Initialize the RX and TX descriptors and mbufs. 2050 */ 2051 2052 error = vge_rx_list_init(sc); 2053 if (error != 0) { 2054 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2055 return; 2056 } 2057 vge_tx_list_init(sc); 2058 /* Clear MAC statistics. */ 2059 vge_stats_clear(sc); 2060 /* Set our station address */ 2061 for (i = 0; i < ETHER_ADDR_LEN; i++) 2062 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); 2063 2064 /* 2065 * Set receive FIFO threshold. Also allow transmission and 2066 * reception of VLAN tagged frames. 2067 */ 2068 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2069 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2070 2071 /* Set DMA burst length */ 2072 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2073 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2074 2075 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2076 2077 /* Set collision backoff algorithm */ 2078 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2079 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2080 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2081 2082 /* Disable LPSEL field in priority resolution */ 2083 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2084 2085 /* 2086 * Load the addresses of the DMA queues into the chip. 2087 * Note that we only use one transmit queue. 2088 */ 2089 2090 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2091 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2092 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2093 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2094 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2095 2096 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2097 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2098 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2099 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2100 2101 /* Configure interrupt moderation. */ 2102 vge_intr_holdoff(sc); 2103 2104 /* Enable and wake up the RX descriptor queue */ 2105 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2106 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2107 2108 /* Enable the TX descriptor queue */ 2109 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2110 2111 /* Init the cam filter. */ 2112 vge_cam_clear(sc); 2113 2114 /* Set up receiver filter. */ 2115 vge_rxfilter(sc); 2116 vge_setvlan(sc); 2117 2118 /* Initialize pause timer. */ 2119 CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF); 2120 /* 2121 * Initialize flow control parameters. 2122 * TX XON high threshold : 48 2123 * TX pause low threshold : 24 2124 * Disable hald-duplex flow control 2125 */ 2126 CSR_WRITE_1(sc, VGE_CRC2, 0xFF); 2127 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B); 2128 2129 /* Enable jumbo frame reception (if desired) */ 2130 2131 /* Start the MAC. */ 2132 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2133 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2134 CSR_WRITE_1(sc, VGE_CRS0, 2135 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2136 2137 #ifdef DEVICE_POLLING 2138 /* 2139 * Disable interrupts except link state change if we are polling. 2140 */ 2141 if (ifp->if_capenable & IFCAP_POLLING) { 2142 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2143 } else /* otherwise ... */ 2144 #endif 2145 { 2146 /* 2147 * Enable interrupts. 2148 */ 2149 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2150 } 2151 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2152 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2153 2154 sc->vge_flags &= ~VGE_FLAG_LINK; 2155 vge_ifmedia_upd_locked(sc); 2156 2157 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2158 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2159 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2160 } 2161 2162 /* 2163 * Set media options. 2164 */ 2165 static int 2166 vge_ifmedia_upd(struct ifnet *ifp) 2167 { 2168 struct vge_softc *sc; 2169 int error; 2170 2171 sc = ifp->if_softc; 2172 VGE_LOCK(sc); 2173 error = vge_ifmedia_upd_locked(sc); 2174 VGE_UNLOCK(sc); 2175 2176 return (error); 2177 } 2178 2179 static int 2180 vge_ifmedia_upd_locked(struct vge_softc *sc) 2181 { 2182 struct mii_data *mii; 2183 struct mii_softc *miisc; 2184 int error; 2185 2186 mii = device_get_softc(sc->vge_miibus); 2187 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2188 PHY_RESET(miisc); 2189 vge_setmedia(sc); 2190 error = mii_mediachg(mii); 2191 2192 return (error); 2193 } 2194 2195 /* 2196 * Report current media status. 2197 */ 2198 static void 2199 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2200 { 2201 struct vge_softc *sc; 2202 struct mii_data *mii; 2203 2204 sc = ifp->if_softc; 2205 mii = device_get_softc(sc->vge_miibus); 2206 2207 VGE_LOCK(sc); 2208 if ((ifp->if_flags & IFF_UP) == 0) { 2209 VGE_UNLOCK(sc); 2210 return; 2211 } 2212 mii_pollstat(mii); 2213 ifmr->ifm_active = mii->mii_media_active; 2214 ifmr->ifm_status = mii->mii_media_status; 2215 VGE_UNLOCK(sc); 2216 } 2217 2218 static void 2219 vge_setmedia(struct vge_softc *sc) 2220 { 2221 struct mii_data *mii; 2222 struct ifmedia_entry *ife; 2223 2224 mii = device_get_softc(sc->vge_miibus); 2225 ife = mii->mii_media.ifm_cur; 2226 2227 /* 2228 * If the user manually selects a media mode, we need to turn 2229 * on the forced MAC mode bit in the DIAGCTL register. If the 2230 * user happens to choose a full duplex mode, we also need to 2231 * set the 'force full duplex' bit. This applies only to 2232 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2233 * mode is disabled, and in 1000baseT mode, full duplex is 2234 * always implied, so we turn on the forced mode bit but leave 2235 * the FDX bit cleared. 2236 */ 2237 2238 switch (IFM_SUBTYPE(ife->ifm_media)) { 2239 case IFM_AUTO: 2240 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2241 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2242 break; 2243 case IFM_1000_T: 2244 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2245 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2246 break; 2247 case IFM_100_TX: 2248 case IFM_10_T: 2249 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2250 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2251 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2252 } else { 2253 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2254 } 2255 break; 2256 default: 2257 device_printf(sc->vge_dev, "unknown media type: %x\n", 2258 IFM_SUBTYPE(ife->ifm_media)); 2259 break; 2260 } 2261 } 2262 2263 static int 2264 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2265 { 2266 struct vge_softc *sc = ifp->if_softc; 2267 struct ifreq *ifr = (struct ifreq *) data; 2268 struct mii_data *mii; 2269 int error = 0, mask; 2270 2271 switch (command) { 2272 case SIOCSIFMTU: 2273 VGE_LOCK(sc); 2274 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU) 2275 error = EINVAL; 2276 else if (ifp->if_mtu != ifr->ifr_mtu) { 2277 if (ifr->ifr_mtu > ETHERMTU && 2278 (sc->vge_flags & VGE_FLAG_JUMBO) == 0) 2279 error = EINVAL; 2280 else 2281 ifp->if_mtu = ifr->ifr_mtu; 2282 } 2283 VGE_UNLOCK(sc); 2284 break; 2285 case SIOCSIFFLAGS: 2286 VGE_LOCK(sc); 2287 if ((ifp->if_flags & IFF_UP) != 0) { 2288 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2289 ((ifp->if_flags ^ sc->vge_if_flags) & 2290 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2291 vge_rxfilter(sc); 2292 else 2293 vge_init_locked(sc); 2294 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2295 vge_stop(sc); 2296 sc->vge_if_flags = ifp->if_flags; 2297 VGE_UNLOCK(sc); 2298 break; 2299 case SIOCADDMULTI: 2300 case SIOCDELMULTI: 2301 VGE_LOCK(sc); 2302 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2303 vge_rxfilter(sc); 2304 VGE_UNLOCK(sc); 2305 break; 2306 case SIOCGIFMEDIA: 2307 case SIOCSIFMEDIA: 2308 mii = device_get_softc(sc->vge_miibus); 2309 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2310 break; 2311 case SIOCSIFCAP: 2312 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2313 #ifdef DEVICE_POLLING 2314 if (mask & IFCAP_POLLING) { 2315 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2316 error = ether_poll_register(vge_poll, ifp); 2317 if (error) 2318 return (error); 2319 VGE_LOCK(sc); 2320 /* Disable interrupts */ 2321 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2322 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2323 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2324 ifp->if_capenable |= IFCAP_POLLING; 2325 VGE_UNLOCK(sc); 2326 } else { 2327 error = ether_poll_deregister(ifp); 2328 /* Enable interrupts. */ 2329 VGE_LOCK(sc); 2330 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2331 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2332 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2333 ifp->if_capenable &= ~IFCAP_POLLING; 2334 VGE_UNLOCK(sc); 2335 } 2336 } 2337 #endif /* DEVICE_POLLING */ 2338 VGE_LOCK(sc); 2339 if ((mask & IFCAP_TXCSUM) != 0 && 2340 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2341 ifp->if_capenable ^= IFCAP_TXCSUM; 2342 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2343 ifp->if_hwassist |= VGE_CSUM_FEATURES; 2344 else 2345 ifp->if_hwassist &= ~VGE_CSUM_FEATURES; 2346 } 2347 if ((mask & IFCAP_RXCSUM) != 0 && 2348 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 2349 ifp->if_capenable ^= IFCAP_RXCSUM; 2350 if ((mask & IFCAP_WOL_UCAST) != 0 && 2351 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2352 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2353 if ((mask & IFCAP_WOL_MCAST) != 0 && 2354 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2355 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2356 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2357 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2358 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2359 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2360 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2361 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2362 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2363 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2364 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2365 vge_setvlan(sc); 2366 } 2367 VGE_UNLOCK(sc); 2368 VLAN_CAPABILITIES(ifp); 2369 break; 2370 default: 2371 error = ether_ioctl(ifp, command, data); 2372 break; 2373 } 2374 2375 return (error); 2376 } 2377 2378 static void 2379 vge_watchdog(void *arg) 2380 { 2381 struct vge_softc *sc; 2382 struct ifnet *ifp; 2383 2384 sc = arg; 2385 VGE_LOCK_ASSERT(sc); 2386 vge_stats_update(sc); 2387 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2388 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2389 return; 2390 2391 ifp = sc->vge_ifp; 2392 if_printf(ifp, "watchdog timeout\n"); 2393 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2394 2395 vge_txeof(sc); 2396 vge_rxeof(sc, VGE_RX_DESC_CNT); 2397 2398 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2399 vge_init_locked(sc); 2400 } 2401 2402 /* 2403 * Stop the adapter and free any mbufs allocated to the 2404 * RX and TX lists. 2405 */ 2406 static void 2407 vge_stop(struct vge_softc *sc) 2408 { 2409 struct ifnet *ifp; 2410 2411 VGE_LOCK_ASSERT(sc); 2412 ifp = sc->vge_ifp; 2413 sc->vge_timer = 0; 2414 callout_stop(&sc->vge_watchdog); 2415 2416 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2417 2418 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2419 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2420 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2421 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2422 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2423 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2424 2425 vge_stats_update(sc); 2426 VGE_CHAIN_RESET(sc); 2427 vge_txeof(sc); 2428 vge_freebufs(sc); 2429 } 2430 2431 /* 2432 * Device suspend routine. Stop the interface and save some PCI 2433 * settings in case the BIOS doesn't restore them properly on 2434 * resume. 2435 */ 2436 static int 2437 vge_suspend(device_t dev) 2438 { 2439 struct vge_softc *sc; 2440 2441 sc = device_get_softc(dev); 2442 2443 VGE_LOCK(sc); 2444 vge_stop(sc); 2445 vge_setwol(sc); 2446 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2447 VGE_UNLOCK(sc); 2448 2449 return (0); 2450 } 2451 2452 /* 2453 * Device resume routine. Restore some PCI settings in case the BIOS 2454 * doesn't, re-enable busmastering, and restart the interface if 2455 * appropriate. 2456 */ 2457 static int 2458 vge_resume(device_t dev) 2459 { 2460 struct vge_softc *sc; 2461 struct ifnet *ifp; 2462 uint16_t pmstat; 2463 2464 sc = device_get_softc(dev); 2465 VGE_LOCK(sc); 2466 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) { 2467 /* Disable PME and clear PME status. */ 2468 pmstat = pci_read_config(sc->vge_dev, 2469 sc->vge_pmcap + PCIR_POWER_STATUS, 2); 2470 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2471 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2472 pci_write_config(sc->vge_dev, 2473 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2474 } 2475 } 2476 vge_clrwol(sc); 2477 /* Restart MII auto-polling. */ 2478 vge_miipoll_start(sc); 2479 ifp = sc->vge_ifp; 2480 /* Reinitialize interface if necessary. */ 2481 if ((ifp->if_flags & IFF_UP) != 0) { 2482 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2483 vge_init_locked(sc); 2484 } 2485 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2486 VGE_UNLOCK(sc); 2487 2488 return (0); 2489 } 2490 2491 /* 2492 * Stop all chip I/O so that the kernel's probe routines don't 2493 * get confused by errant DMAs when rebooting. 2494 */ 2495 static int 2496 vge_shutdown(device_t dev) 2497 { 2498 2499 return (vge_suspend(dev)); 2500 } 2501 2502 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2503 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2504 2505 static void 2506 vge_sysctl_node(struct vge_softc *sc) 2507 { 2508 struct sysctl_ctx_list *ctx; 2509 struct sysctl_oid_list *child, *parent; 2510 struct sysctl_oid *tree; 2511 struct vge_hw_stats *stats; 2512 2513 stats = &sc->vge_stats; 2514 ctx = device_get_sysctl_ctx(sc->vge_dev); 2515 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2516 2517 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff", 2518 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff"); 2519 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt", 2520 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet"); 2521 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt", 2522 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet"); 2523 2524 /* Pull in device tunables. */ 2525 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT; 2526 resource_int_value(device_get_name(sc->vge_dev), 2527 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff); 2528 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT; 2529 resource_int_value(device_get_name(sc->vge_dev), 2530 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt); 2531 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT; 2532 resource_int_value(device_get_name(sc->vge_dev), 2533 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt); 2534 2535 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 2536 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VGE statistics"); 2537 parent = SYSCTL_CHILDREN(tree); 2538 2539 /* Rx statistics. */ 2540 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 2541 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics"); 2542 child = SYSCTL_CHILDREN(tree); 2543 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2544 &stats->rx_frames, "frames"); 2545 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2546 &stats->rx_good_frames, "Good frames"); 2547 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2548 &stats->rx_fifo_oflows, "FIFO overflows"); 2549 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2550 &stats->rx_runts, "Too short frames"); 2551 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2552 &stats->rx_runts_errs, "Too short frames with errors"); 2553 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2554 &stats->rx_pkts_64, "64 bytes frames"); 2555 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2556 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2557 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2558 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2559 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2560 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2561 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2562 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2563 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2564 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2565 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2566 &stats->rx_pkts_1519_max, "1519 to max frames"); 2567 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2568 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2569 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2570 &stats->rx_jumbos, "Jumbo frames"); 2571 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2572 &stats->rx_crcerrs, "CRC errors"); 2573 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2574 &stats->rx_pause_frames, "CRC errors"); 2575 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2576 &stats->rx_alignerrs, "Alignment errors"); 2577 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2578 &stats->rx_nobufs, "Frames with no buffer event"); 2579 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2580 &stats->rx_symerrs, "Frames with symbol errors"); 2581 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2582 &stats->rx_lenerrs, "Frames with length mismatched"); 2583 2584 /* Tx statistics. */ 2585 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 2586 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics"); 2587 child = SYSCTL_CHILDREN(tree); 2588 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2589 &stats->tx_good_frames, "Good frames"); 2590 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2591 &stats->tx_pkts_64, "64 bytes frames"); 2592 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2593 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2594 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2595 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2596 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2597 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2598 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2599 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2600 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2601 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2602 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2603 &stats->tx_jumbos, "Jumbo frames"); 2604 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2605 &stats->tx_colls, "Collisions"); 2606 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2607 &stats->tx_latecolls, "Late collisions"); 2608 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2609 &stats->tx_pause, "Pause frames"); 2610 #ifdef VGE_ENABLE_SQEERR 2611 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2612 &stats->tx_sqeerrs, "SQE errors"); 2613 #endif 2614 /* Clear MAC statistics. */ 2615 vge_stats_clear(sc); 2616 } 2617 2618 #undef VGE_SYSCTL_STAT_ADD32 2619 2620 static void 2621 vge_stats_clear(struct vge_softc *sc) 2622 { 2623 int i; 2624 2625 CSR_WRITE_1(sc, VGE_MIBCSR, 2626 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2627 CSR_WRITE_1(sc, VGE_MIBCSR, 2628 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2629 for (i = VGE_TIMEOUT; i > 0; i--) { 2630 DELAY(1); 2631 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2632 break; 2633 } 2634 if (i == 0) 2635 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2636 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2637 ~VGE_MIBCSR_FREEZE); 2638 } 2639 2640 static void 2641 vge_stats_update(struct vge_softc *sc) 2642 { 2643 struct vge_hw_stats *stats; 2644 struct ifnet *ifp; 2645 uint32_t mib[VGE_MIB_CNT], val; 2646 int i; 2647 2648 VGE_LOCK_ASSERT(sc); 2649 2650 stats = &sc->vge_stats; 2651 ifp = sc->vge_ifp; 2652 2653 CSR_WRITE_1(sc, VGE_MIBCSR, 2654 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2655 for (i = VGE_TIMEOUT; i > 0; i--) { 2656 DELAY(1); 2657 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2658 break; 2659 } 2660 if (i == 0) { 2661 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2662 vge_stats_clear(sc); 2663 return; 2664 } 2665 2666 bzero(mib, sizeof(mib)); 2667 reset_idx: 2668 /* Set MIB read index to 0. */ 2669 CSR_WRITE_1(sc, VGE_MIBCSR, 2670 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2671 for (i = 0; i < VGE_MIB_CNT; i++) { 2672 val = CSR_READ_4(sc, VGE_MIBDATA); 2673 if (i != VGE_MIB_DATA_IDX(val)) { 2674 /* Reading interrupted. */ 2675 goto reset_idx; 2676 } 2677 mib[i] = val & VGE_MIB_DATA_MASK; 2678 } 2679 2680 /* Rx stats. */ 2681 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2682 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2683 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2684 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2685 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2686 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2687 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2688 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2689 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2690 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2691 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2692 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2693 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2694 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2695 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2696 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2697 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2698 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2699 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2700 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2701 2702 /* Tx stats. */ 2703 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2704 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2705 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2706 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2707 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2708 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2709 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2710 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2711 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2712 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2713 #ifdef VGE_ENABLE_SQEERR 2714 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2715 #endif 2716 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2717 2718 /* Update counters in ifnet. */ 2719 if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]); 2720 2721 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 2722 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2723 2724 if_inc_counter(ifp, IFCOUNTER_OERRORS, 2725 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2726 2727 if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]); 2728 2729 if_inc_counter(ifp, IFCOUNTER_IERRORS, 2730 mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2731 mib[VGE_MIB_RX_RUNTS] + 2732 mib[VGE_MIB_RX_RUNTS_ERRS] + 2733 mib[VGE_MIB_RX_CRCERRS] + 2734 mib[VGE_MIB_RX_ALIGNERRS] + 2735 mib[VGE_MIB_RX_NOBUFS] + 2736 mib[VGE_MIB_RX_SYMERRS] + 2737 mib[VGE_MIB_RX_LENERRS]); 2738 } 2739 2740 static void 2741 vge_intr_holdoff(struct vge_softc *sc) 2742 { 2743 uint8_t intctl; 2744 2745 VGE_LOCK_ASSERT(sc); 2746 2747 /* 2748 * Set Tx interrupt supression threshold. 2749 * It's possible to use single-shot timer in VGE_CRS1 register 2750 * in Tx path such that driver can remove most of Tx completion 2751 * interrupts. However this requires additional access to 2752 * VGE_CRS1 register to reload the timer in addintion to 2753 * activating Tx kick command. Another downside is we don't know 2754 * what single-shot timer value should be used in advance so 2755 * reclaiming transmitted mbufs could be delayed a lot which in 2756 * turn slows down Tx operation. 2757 */ 2758 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR); 2759 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt); 2760 2761 /* Set Rx interrupt suppresion threshold. */ 2762 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2763 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt); 2764 2765 intctl = CSR_READ_1(sc, VGE_INTCTL1); 2766 intctl &= ~VGE_INTCTL_SC_RELOAD; 2767 intctl |= VGE_INTCTL_HC_RELOAD; 2768 if (sc->vge_tx_coal_pkt <= 0) 2769 intctl |= VGE_INTCTL_TXINTSUP_DISABLE; 2770 else 2771 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE; 2772 if (sc->vge_rx_coal_pkt <= 0) 2773 intctl |= VGE_INTCTL_RXINTSUP_DISABLE; 2774 else 2775 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE; 2776 CSR_WRITE_1(sc, VGE_INTCTL1, intctl); 2777 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF); 2778 if (sc->vge_int_holdoff > 0) { 2779 /* Set interrupt holdoff timer. */ 2780 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2781 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 2782 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff)); 2783 /* Enable holdoff timer. */ 2784 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2785 } 2786 } 2787 2788 static void 2789 vge_setlinkspeed(struct vge_softc *sc) 2790 { 2791 struct mii_data *mii; 2792 int aneg, i; 2793 2794 VGE_LOCK_ASSERT(sc); 2795 2796 mii = device_get_softc(sc->vge_miibus); 2797 mii_pollstat(mii); 2798 aneg = 0; 2799 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2800 (IFM_ACTIVE | IFM_AVALID)) { 2801 switch IFM_SUBTYPE(mii->mii_media_active) { 2802 case IFM_10_T: 2803 case IFM_100_TX: 2804 return; 2805 case IFM_1000_T: 2806 aneg++; 2807 default: 2808 break; 2809 } 2810 } 2811 /* Clear forced MAC speed/duplex configuration. */ 2812 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2813 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2814 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0); 2815 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR, 2816 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2817 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2818 BMCR_AUTOEN | BMCR_STARTNEG); 2819 DELAY(1000); 2820 if (aneg != 0) { 2821 /* Poll link state until vge(4) get a 10/100 link. */ 2822 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2823 mii_pollstat(mii); 2824 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2825 == (IFM_ACTIVE | IFM_AVALID)) { 2826 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2827 case IFM_10_T: 2828 case IFM_100_TX: 2829 return; 2830 default: 2831 break; 2832 } 2833 } 2834 VGE_UNLOCK(sc); 2835 pause("vgelnk", hz); 2836 VGE_LOCK(sc); 2837 } 2838 if (i == MII_ANEGTICKS_GIGE) 2839 device_printf(sc->vge_dev, "establishing link failed, " 2840 "WOL may not work!"); 2841 } 2842 /* 2843 * No link, force MAC to have 100Mbps, full-duplex link. 2844 * This is the last resort and may/may not work. 2845 */ 2846 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2847 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2848 } 2849 2850 static void 2851 vge_setwol(struct vge_softc *sc) 2852 { 2853 struct ifnet *ifp; 2854 uint16_t pmstat; 2855 uint8_t val; 2856 2857 VGE_LOCK_ASSERT(sc); 2858 2859 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) { 2860 /* No PME capability, PHY power down. */ 2861 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2862 BMCR_PDOWN); 2863 vge_miipoll_stop(sc); 2864 return; 2865 } 2866 2867 ifp = sc->vge_ifp; 2868 2869 /* Clear WOL on pattern match. */ 2870 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2871 /* Disable WOL on magic/unicast packet. */ 2872 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2873 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2874 VGE_WOLCFG_PMEOVR); 2875 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2876 vge_setlinkspeed(sc); 2877 val = 0; 2878 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2879 val |= VGE_WOLCR1_UCAST; 2880 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2881 val |= VGE_WOLCR1_MAGIC; 2882 CSR_WRITE_1(sc, VGE_WOLCR1S, val); 2883 val = 0; 2884 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2885 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB; 2886 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR); 2887 /* Disable MII auto-polling. */ 2888 vge_miipoll_stop(sc); 2889 } 2890 CSR_SETBIT_1(sc, VGE_DIAGCTL, 2891 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE); 2892 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2893 2894 /* Clear WOL status on pattern match. */ 2895 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2896 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2897 2898 val = CSR_READ_1(sc, VGE_PWRSTAT); 2899 val |= VGE_STICKHW_SWPTAG; 2900 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2901 /* Put hardware into sleep. */ 2902 val = CSR_READ_1(sc, VGE_PWRSTAT); 2903 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1; 2904 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2905 /* Request PME if WOL is requested. */ 2906 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap + 2907 PCIR_POWER_STATUS, 2); 2908 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2909 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2910 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2911 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS, 2912 pmstat, 2); 2913 } 2914 2915 static void 2916 vge_clrwol(struct vge_softc *sc) 2917 { 2918 uint8_t val; 2919 2920 val = CSR_READ_1(sc, VGE_PWRSTAT); 2921 val &= ~VGE_STICKHW_SWPTAG; 2922 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2923 /* Disable WOL and clear power state indicator. */ 2924 val = CSR_READ_1(sc, VGE_PWRSTAT); 2925 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); 2926 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2927 2928 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2929 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2930 2931 /* Clear WOL on pattern match. */ 2932 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2933 /* Disable WOL on magic/unicast packet. */ 2934 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2935 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2936 VGE_WOLCFG_PMEOVR); 2937 /* Clear WOL status on pattern match. */ 2938 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2939 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2940 } 2941