1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /* 39 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 40 * 41 * Written by Bill Paul <wpaul@windriver.com> 42 * Senior Networking Software Engineer 43 * Wind River Systems 44 */ 45 46 /* 47 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 48 * combines a tri-speed ethernet MAC and PHY, with the following 49 * features: 50 * 51 * o Jumbo frame support up to 16K 52 * o Transmit and receive flow control 53 * o IPv4 checksum offload 54 * o VLAN tag insertion and stripping 55 * o TCP large send 56 * o 64-bit multicast hash table filter 57 * o 64 entry CAM filter 58 * o 16K RX FIFO and 48K TX FIFO memory 59 * o Interrupt moderation 60 * 61 * The VT6122 supports up to four transmit DMA queues. The descriptors 62 * in the transmit ring can address up to 7 data fragments; frames which 63 * span more than 7 data buffers must be coalesced, but in general the 64 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 65 * long. The receive descriptors address only a single buffer. 66 * 67 * There are two peculiar design issues with the VT6122. One is that 68 * receive data buffers must be aligned on a 32-bit boundary. This is 69 * not a problem where the VT6122 is used as a LOM device in x86-based 70 * systems, but on architectures that generate unaligned access traps, we 71 * have to do some copying. 72 * 73 * The other issue has to do with the way 64-bit addresses are handled. 74 * The DMA descriptors only allow you to specify 48 bits of addressing 75 * information. The remaining 16 bits are specified using one of the 76 * I/O registers. If you only have a 32-bit system, then this isn't 77 * an issue, but if you have a 64-bit system and more than 4GB of 78 * memory, you must have to make sure your network data buffers reside 79 * in the same 48-bit 'segment.' 80 * 81 * Special thanks to Ryan Fu at VIA Networking for providing documentation 82 * and sample NICs for testing. 83 */ 84 85 #ifdef HAVE_KERNEL_OPTION_HEADERS 86 #include "opt_device_polling.h" 87 #endif 88 89 #include <sys/param.h> 90 #include <sys/endian.h> 91 #include <sys/systm.h> 92 #include <sys/sockio.h> 93 #include <sys/mbuf.h> 94 #include <sys/malloc.h> 95 #include <sys/module.h> 96 #include <sys/kernel.h> 97 #include <sys/socket.h> 98 #include <sys/sysctl.h> 99 100 #include <net/if.h> 101 #include <net/if_arp.h> 102 #include <net/ethernet.h> 103 #include <net/if_dl.h> 104 #include <net/if_var.h> 105 #include <net/if_media.h> 106 #include <net/if_types.h> 107 #include <net/if_vlan_var.h> 108 109 #include <net/bpf.h> 110 111 #include <machine/bus.h> 112 #include <machine/resource.h> 113 #include <sys/bus.h> 114 #include <sys/rman.h> 115 116 #include <dev/mii/mii.h> 117 #include <dev/mii/miivar.h> 118 119 #include <dev/pci/pcireg.h> 120 #include <dev/pci/pcivar.h> 121 122 MODULE_DEPEND(vge, pci, 1, 1, 1); 123 MODULE_DEPEND(vge, ether, 1, 1, 1); 124 MODULE_DEPEND(vge, miibus, 1, 1, 1); 125 126 /* "device miibus" required. See GENERIC if you get errors here. */ 127 #include "miibus_if.h" 128 129 #include <dev/vge/if_vgereg.h> 130 #include <dev/vge/if_vgevar.h> 131 132 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 133 134 /* Tunables */ 135 static int msi_disable = 0; 136 TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 137 138 /* 139 * The SQE error counter of MIB seems to report bogus value. 140 * Vendor's workaround does not seem to work on PCIe based 141 * controllers. Disable it until we find better workaround. 142 */ 143 #undef VGE_ENABLE_SQEERR 144 145 /* 146 * Various supported device vendors/types and their names. 147 */ 148 static struct vge_type vge_devs[] = { 149 { VIA_VENDORID, VIA_DEVICEID_61XX, 150 "VIA Networking Velocity Gigabit Ethernet" }, 151 { 0, 0, NULL } 152 }; 153 154 static int vge_attach(device_t); 155 static int vge_detach(device_t); 156 static int vge_probe(device_t); 157 static int vge_resume(device_t); 158 static int vge_shutdown(device_t); 159 static int vge_suspend(device_t); 160 161 static void vge_cam_clear(struct vge_softc *); 162 static int vge_cam_set(struct vge_softc *, uint8_t *); 163 static void vge_clrwol(struct vge_softc *); 164 static void vge_discard_rxbuf(struct vge_softc *, int); 165 static int vge_dma_alloc(struct vge_softc *); 166 static void vge_dma_free(struct vge_softc *); 167 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 168 #ifdef VGE_EEPROM 169 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 170 #endif 171 static int vge_encap(struct vge_softc *, struct mbuf **); 172 #ifndef __NO_STRICT_ALIGNMENT 173 static __inline void 174 vge_fixup_rx(struct mbuf *); 175 #endif 176 static void vge_freebufs(struct vge_softc *); 177 static void vge_ifmedia_sts(if_t, struct ifmediareq *); 178 static int vge_ifmedia_upd(if_t); 179 static int vge_ifmedia_upd_locked(struct vge_softc *); 180 static void vge_init(void *); 181 static void vge_init_locked(struct vge_softc *); 182 static void vge_intr(void *); 183 static void vge_intr_holdoff(struct vge_softc *); 184 static int vge_ioctl(if_t, u_long, caddr_t); 185 static void vge_link_statchg(void *); 186 static int vge_miibus_readreg(device_t, int, int); 187 static int vge_miibus_writereg(device_t, int, int, int); 188 static void vge_miipoll_start(struct vge_softc *); 189 static void vge_miipoll_stop(struct vge_softc *); 190 static int vge_newbuf(struct vge_softc *, int); 191 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 192 static void vge_reset(struct vge_softc *); 193 static int vge_rx_list_init(struct vge_softc *); 194 static int vge_rxeof(struct vge_softc *, int); 195 static void vge_rxfilter(struct vge_softc *); 196 static void vge_setmedia(struct vge_softc *); 197 static void vge_setvlan(struct vge_softc *); 198 static void vge_setwol(struct vge_softc *); 199 static void vge_start(if_t); 200 static void vge_start_locked(if_t); 201 static void vge_stats_clear(struct vge_softc *); 202 static void vge_stats_update(struct vge_softc *); 203 static void vge_stop(struct vge_softc *); 204 static void vge_sysctl_node(struct vge_softc *); 205 static int vge_tx_list_init(struct vge_softc *); 206 static void vge_txeof(struct vge_softc *); 207 static void vge_watchdog(void *); 208 209 static device_method_t vge_methods[] = { 210 /* Device interface */ 211 DEVMETHOD(device_probe, vge_probe), 212 DEVMETHOD(device_attach, vge_attach), 213 DEVMETHOD(device_detach, vge_detach), 214 DEVMETHOD(device_suspend, vge_suspend), 215 DEVMETHOD(device_resume, vge_resume), 216 DEVMETHOD(device_shutdown, vge_shutdown), 217 218 /* MII interface */ 219 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 220 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 221 222 DEVMETHOD_END 223 }; 224 225 static driver_t vge_driver = { 226 "vge", 227 vge_methods, 228 sizeof(struct vge_softc) 229 }; 230 231 DRIVER_MODULE(vge, pci, vge_driver, 0, 0); 232 DRIVER_MODULE(miibus, vge, miibus_driver, 0, 0); 233 234 #ifdef VGE_EEPROM 235 /* 236 * Read a word of data stored in the EEPROM at address 'addr.' 237 */ 238 static void 239 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 240 { 241 int i; 242 uint16_t word = 0; 243 244 /* 245 * Enter EEPROM embedded programming mode. In order to 246 * access the EEPROM at all, we first have to set the 247 * EELOAD bit in the CHIPCFG2 register. 248 */ 249 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 250 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 251 252 /* Select the address of the word we want to read */ 253 CSR_WRITE_1(sc, VGE_EEADDR, addr); 254 255 /* Issue read command */ 256 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 257 258 /* Wait for the done bit to be set. */ 259 for (i = 0; i < VGE_TIMEOUT; i++) { 260 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 261 break; 262 } 263 264 if (i == VGE_TIMEOUT) { 265 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 266 *dest = 0; 267 return; 268 } 269 270 /* Read the result */ 271 word = CSR_READ_2(sc, VGE_EERDDAT); 272 273 /* Turn off EEPROM access mode. */ 274 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 275 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 276 277 *dest = word; 278 } 279 #endif 280 281 /* 282 * Read a sequence of words from the EEPROM. 283 */ 284 static void 285 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 286 { 287 int i; 288 #ifdef VGE_EEPROM 289 uint16_t word = 0, *ptr; 290 291 for (i = 0; i < cnt; i++) { 292 vge_eeprom_getword(sc, off + i, &word); 293 ptr = (uint16_t *)(dest + (i * 2)); 294 if (swap) 295 *ptr = ntohs(word); 296 else 297 *ptr = word; 298 } 299 #else 300 for (i = 0; i < ETHER_ADDR_LEN; i++) 301 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 302 #endif 303 } 304 305 static void 306 vge_miipoll_stop(struct vge_softc *sc) 307 { 308 int i; 309 310 CSR_WRITE_1(sc, VGE_MIICMD, 0); 311 312 for (i = 0; i < VGE_TIMEOUT; i++) { 313 DELAY(1); 314 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 315 break; 316 } 317 318 if (i == VGE_TIMEOUT) 319 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 320 } 321 322 static void 323 vge_miipoll_start(struct vge_softc *sc) 324 { 325 int i; 326 327 /* First, make sure we're idle. */ 328 329 CSR_WRITE_1(sc, VGE_MIICMD, 0); 330 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 331 332 for (i = 0; i < VGE_TIMEOUT; i++) { 333 DELAY(1); 334 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 335 break; 336 } 337 338 if (i == VGE_TIMEOUT) { 339 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 340 return; 341 } 342 343 /* Now enable auto poll mode. */ 344 345 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 346 347 /* And make sure it started. */ 348 349 for (i = 0; i < VGE_TIMEOUT; i++) { 350 DELAY(1); 351 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 352 break; 353 } 354 355 if (i == VGE_TIMEOUT) 356 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 357 } 358 359 static int 360 vge_miibus_readreg(device_t dev, int phy, int reg) 361 { 362 struct vge_softc *sc; 363 int i; 364 uint16_t rval = 0; 365 366 sc = device_get_softc(dev); 367 368 vge_miipoll_stop(sc); 369 370 /* Specify the register we want to read. */ 371 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 372 373 /* Issue read command. */ 374 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 375 376 /* Wait for the read command bit to self-clear. */ 377 for (i = 0; i < VGE_TIMEOUT; i++) { 378 DELAY(1); 379 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 380 break; 381 } 382 383 if (i == VGE_TIMEOUT) 384 device_printf(sc->vge_dev, "MII read timed out\n"); 385 else 386 rval = CSR_READ_2(sc, VGE_MIIDATA); 387 388 vge_miipoll_start(sc); 389 390 return (rval); 391 } 392 393 static int 394 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 395 { 396 struct vge_softc *sc; 397 int i, rval = 0; 398 399 sc = device_get_softc(dev); 400 401 vge_miipoll_stop(sc); 402 403 /* Specify the register we want to write. */ 404 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 405 406 /* Specify the data we want to write. */ 407 CSR_WRITE_2(sc, VGE_MIIDATA, data); 408 409 /* Issue write command. */ 410 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 411 412 /* Wait for the write command bit to self-clear. */ 413 for (i = 0; i < VGE_TIMEOUT; i++) { 414 DELAY(1); 415 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 416 break; 417 } 418 419 if (i == VGE_TIMEOUT) { 420 device_printf(sc->vge_dev, "MII write timed out\n"); 421 rval = EIO; 422 } 423 424 vge_miipoll_start(sc); 425 426 return (rval); 427 } 428 429 static void 430 vge_cam_clear(struct vge_softc *sc) 431 { 432 int i; 433 434 /* 435 * Turn off all the mask bits. This tells the chip 436 * that none of the entries in the CAM filter are valid. 437 * desired entries will be enabled as we fill the filter in. 438 */ 439 440 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 441 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 442 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 443 for (i = 0; i < 8; i++) 444 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 445 446 /* Clear the VLAN filter too. */ 447 448 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 449 for (i = 0; i < 8; i++) 450 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 451 452 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 453 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 454 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 455 456 sc->vge_camidx = 0; 457 } 458 459 static int 460 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 461 { 462 int i, error = 0; 463 464 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 465 return (ENOSPC); 466 467 /* Select the CAM data page. */ 468 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 469 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 470 471 /* Set the filter entry we want to update and enable writing. */ 472 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 473 474 /* Write the address to the CAM registers */ 475 for (i = 0; i < ETHER_ADDR_LEN; i++) 476 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 477 478 /* Issue a write command. */ 479 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 480 481 /* Wake for it to clear. */ 482 for (i = 0; i < VGE_TIMEOUT; i++) { 483 DELAY(1); 484 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 485 break; 486 } 487 488 if (i == VGE_TIMEOUT) { 489 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 490 error = EIO; 491 goto fail; 492 } 493 494 /* Select the CAM mask page. */ 495 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 496 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 497 498 /* Set the mask bit that enables this filter. */ 499 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 500 1<<(sc->vge_camidx & 7)); 501 502 sc->vge_camidx++; 503 504 fail: 505 /* Turn off access to CAM. */ 506 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 507 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 508 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 509 510 return (error); 511 } 512 513 static void 514 vge_setvlan(struct vge_softc *sc) 515 { 516 if_t ifp; 517 uint8_t cfg; 518 519 VGE_LOCK_ASSERT(sc); 520 521 ifp = sc->vge_ifp; 522 cfg = CSR_READ_1(sc, VGE_RXCFG); 523 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 524 cfg |= VGE_VTAG_OPT2; 525 else 526 cfg &= ~VGE_VTAG_OPT2; 527 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 528 } 529 530 static u_int 531 vge_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 532 { 533 struct vge_softc *sc = arg; 534 535 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 536 return (0); 537 538 (void )vge_cam_set(sc, LLADDR(sdl)); 539 540 return (1); 541 } 542 543 static u_int 544 vge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 545 { 546 uint32_t h, *hashes = arg; 547 548 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 549 if (h < 32) 550 hashes[0] |= (1 << h); 551 else 552 hashes[1] |= (1 << (h - 32)); 553 554 return (1); 555 } 556 557 /* 558 * Program the multicast filter. We use the 64-entry CAM filter 559 * for perfect filtering. If there's more than 64 multicast addresses, 560 * we use the hash filter instead. 561 */ 562 static void 563 vge_rxfilter(struct vge_softc *sc) 564 { 565 if_t ifp; 566 uint32_t hashes[2]; 567 uint8_t rxcfg; 568 569 VGE_LOCK_ASSERT(sc); 570 571 /* First, zot all the multicast entries. */ 572 hashes[0] = 0; 573 hashes[1] = 0; 574 575 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 576 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 577 VGE_RXCTL_RX_PROMISC); 578 /* 579 * Always allow VLAN oversized frames and frames for 580 * this host. 581 */ 582 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 583 584 ifp = sc->vge_ifp; 585 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 586 rxcfg |= VGE_RXCTL_RX_BCAST; 587 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 588 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 589 rxcfg |= VGE_RXCTL_RX_PROMISC; 590 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { 591 hashes[0] = 0xFFFFFFFF; 592 hashes[1] = 0xFFFFFFFF; 593 } 594 goto done; 595 } 596 597 vge_cam_clear(sc); 598 599 /* Now program new ones */ 600 if_foreach_llmaddr(ifp, vge_set_maddr, sc); 601 602 /* If there were too many addresses, use the hash filter. */ 603 if (sc->vge_camidx == VGE_CAM_MAXADDRS) { 604 vge_cam_clear(sc); 605 if_foreach_llmaddr(ifp, vge_hash_maddr, hashes); 606 } 607 608 done: 609 if (hashes[0] != 0 || hashes[1] != 0) 610 rxcfg |= VGE_RXCTL_RX_MCAST; 611 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 612 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 613 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 614 } 615 616 static void 617 vge_reset(struct vge_softc *sc) 618 { 619 int i; 620 621 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 622 623 for (i = 0; i < VGE_TIMEOUT; i++) { 624 DELAY(5); 625 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 626 break; 627 } 628 629 if (i == VGE_TIMEOUT) { 630 device_printf(sc->vge_dev, "soft reset timed out\n"); 631 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 632 DELAY(2000); 633 } 634 635 DELAY(5000); 636 } 637 638 /* 639 * Probe for a VIA gigabit chip. Check the PCI vendor and device 640 * IDs against our list and return a device name if we find a match. 641 */ 642 static int 643 vge_probe(device_t dev) 644 { 645 struct vge_type *t; 646 647 t = vge_devs; 648 649 while (t->vge_name != NULL) { 650 if ((pci_get_vendor(dev) == t->vge_vid) && 651 (pci_get_device(dev) == t->vge_did)) { 652 device_set_desc(dev, t->vge_name); 653 return (BUS_PROBE_DEFAULT); 654 } 655 t++; 656 } 657 658 return (ENXIO); 659 } 660 661 /* 662 * Map a single buffer address. 663 */ 664 665 struct vge_dmamap_arg { 666 bus_addr_t vge_busaddr; 667 }; 668 669 static void 670 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 671 { 672 struct vge_dmamap_arg *ctx; 673 674 if (error != 0) 675 return; 676 677 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 678 679 ctx = (struct vge_dmamap_arg *)arg; 680 ctx->vge_busaddr = segs[0].ds_addr; 681 } 682 683 static int 684 vge_dma_alloc(struct vge_softc *sc) 685 { 686 struct vge_dmamap_arg ctx; 687 struct vge_txdesc *txd; 688 struct vge_rxdesc *rxd; 689 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 690 int error, i; 691 692 /* 693 * It seems old PCI controllers do not support DAC. DAC 694 * configuration can be enabled by accessing VGE_CHIPCFG3 695 * register but honor EEPROM configuration instead of 696 * blindly overriding DAC configuration. PCIe based 697 * controllers are supposed to support 64bit DMA so enable 698 * 64bit DMA on these controllers. 699 */ 700 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 701 lowaddr = BUS_SPACE_MAXADDR; 702 else 703 lowaddr = BUS_SPACE_MAXADDR_32BIT; 704 705 again: 706 /* Create parent ring tag. */ 707 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 708 1, 0, /* algnmnt, boundary */ 709 lowaddr, /* lowaddr */ 710 BUS_SPACE_MAXADDR, /* highaddr */ 711 NULL, NULL, /* filter, filterarg */ 712 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 713 0, /* nsegments */ 714 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 715 0, /* flags */ 716 NULL, NULL, /* lockfunc, lockarg */ 717 &sc->vge_cdata.vge_ring_tag); 718 if (error != 0) { 719 device_printf(sc->vge_dev, 720 "could not create parent DMA tag.\n"); 721 goto fail; 722 } 723 724 /* Create tag for Tx ring. */ 725 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 726 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 727 BUS_SPACE_MAXADDR, /* lowaddr */ 728 BUS_SPACE_MAXADDR, /* highaddr */ 729 NULL, NULL, /* filter, filterarg */ 730 VGE_TX_LIST_SZ, /* maxsize */ 731 1, /* nsegments */ 732 VGE_TX_LIST_SZ, /* maxsegsize */ 733 0, /* flags */ 734 NULL, NULL, /* lockfunc, lockarg */ 735 &sc->vge_cdata.vge_tx_ring_tag); 736 if (error != 0) { 737 device_printf(sc->vge_dev, 738 "could not allocate Tx ring DMA tag.\n"); 739 goto fail; 740 } 741 742 /* Create tag for Rx ring. */ 743 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 744 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 745 BUS_SPACE_MAXADDR, /* lowaddr */ 746 BUS_SPACE_MAXADDR, /* highaddr */ 747 NULL, NULL, /* filter, filterarg */ 748 VGE_RX_LIST_SZ, /* maxsize */ 749 1, /* nsegments */ 750 VGE_RX_LIST_SZ, /* maxsegsize */ 751 0, /* flags */ 752 NULL, NULL, /* lockfunc, lockarg */ 753 &sc->vge_cdata.vge_rx_ring_tag); 754 if (error != 0) { 755 device_printf(sc->vge_dev, 756 "could not allocate Rx ring DMA tag.\n"); 757 goto fail; 758 } 759 760 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 761 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 762 (void **)&sc->vge_rdata.vge_tx_ring, 763 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 764 &sc->vge_cdata.vge_tx_ring_map); 765 if (error != 0) { 766 device_printf(sc->vge_dev, 767 "could not allocate DMA'able memory for Tx ring.\n"); 768 goto fail; 769 } 770 771 ctx.vge_busaddr = 0; 772 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 773 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 774 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 775 if (error != 0 || ctx.vge_busaddr == 0) { 776 device_printf(sc->vge_dev, 777 "could not load DMA'able memory for Tx ring.\n"); 778 goto fail; 779 } 780 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 781 782 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 783 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 784 (void **)&sc->vge_rdata.vge_rx_ring, 785 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 786 &sc->vge_cdata.vge_rx_ring_map); 787 if (error != 0) { 788 device_printf(sc->vge_dev, 789 "could not allocate DMA'able memory for Rx ring.\n"); 790 goto fail; 791 } 792 793 ctx.vge_busaddr = 0; 794 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 795 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 796 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 797 if (error != 0 || ctx.vge_busaddr == 0) { 798 device_printf(sc->vge_dev, 799 "could not load DMA'able memory for Rx ring.\n"); 800 goto fail; 801 } 802 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 803 804 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 805 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 806 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 807 if ((VGE_ADDR_HI(tx_ring_end) != 808 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 809 (VGE_ADDR_HI(rx_ring_end) != 810 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 811 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 812 device_printf(sc->vge_dev, "4GB boundary crossed, " 813 "switching to 32bit DMA address mode.\n"); 814 vge_dma_free(sc); 815 /* Limit DMA address space to 32bit and try again. */ 816 lowaddr = BUS_SPACE_MAXADDR_32BIT; 817 goto again; 818 } 819 820 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 821 lowaddr = VGE_BUF_DMA_MAXADDR; 822 else 823 lowaddr = BUS_SPACE_MAXADDR_32BIT; 824 /* Create parent buffer tag. */ 825 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 826 1, 0, /* algnmnt, boundary */ 827 lowaddr, /* lowaddr */ 828 BUS_SPACE_MAXADDR, /* highaddr */ 829 NULL, NULL, /* filter, filterarg */ 830 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 831 0, /* nsegments */ 832 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 833 0, /* flags */ 834 NULL, NULL, /* lockfunc, lockarg */ 835 &sc->vge_cdata.vge_buffer_tag); 836 if (error != 0) { 837 device_printf(sc->vge_dev, 838 "could not create parent buffer DMA tag.\n"); 839 goto fail; 840 } 841 842 /* Create tag for Tx buffers. */ 843 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 844 1, 0, /* algnmnt, boundary */ 845 BUS_SPACE_MAXADDR, /* lowaddr */ 846 BUS_SPACE_MAXADDR, /* highaddr */ 847 NULL, NULL, /* filter, filterarg */ 848 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 849 VGE_MAXTXSEGS, /* nsegments */ 850 MCLBYTES, /* maxsegsize */ 851 0, /* flags */ 852 NULL, NULL, /* lockfunc, lockarg */ 853 &sc->vge_cdata.vge_tx_tag); 854 if (error != 0) { 855 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 856 goto fail; 857 } 858 859 /* Create tag for Rx buffers. */ 860 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 861 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 862 BUS_SPACE_MAXADDR, /* lowaddr */ 863 BUS_SPACE_MAXADDR, /* highaddr */ 864 NULL, NULL, /* filter, filterarg */ 865 MCLBYTES, /* maxsize */ 866 1, /* nsegments */ 867 MCLBYTES, /* maxsegsize */ 868 0, /* flags */ 869 NULL, NULL, /* lockfunc, lockarg */ 870 &sc->vge_cdata.vge_rx_tag); 871 if (error != 0) { 872 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 873 goto fail; 874 } 875 876 /* Create DMA maps for Tx buffers. */ 877 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 878 txd = &sc->vge_cdata.vge_txdesc[i]; 879 txd->tx_m = NULL; 880 txd->tx_dmamap = NULL; 881 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 882 &txd->tx_dmamap); 883 if (error != 0) { 884 device_printf(sc->vge_dev, 885 "could not create Tx dmamap.\n"); 886 goto fail; 887 } 888 } 889 /* Create DMA maps for Rx buffers. */ 890 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 891 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 892 device_printf(sc->vge_dev, 893 "could not create spare Rx dmamap.\n"); 894 goto fail; 895 } 896 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 897 rxd = &sc->vge_cdata.vge_rxdesc[i]; 898 rxd->rx_m = NULL; 899 rxd->rx_dmamap = NULL; 900 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 901 &rxd->rx_dmamap); 902 if (error != 0) { 903 device_printf(sc->vge_dev, 904 "could not create Rx dmamap.\n"); 905 goto fail; 906 } 907 } 908 909 fail: 910 return (error); 911 } 912 913 static void 914 vge_dma_free(struct vge_softc *sc) 915 { 916 struct vge_txdesc *txd; 917 struct vge_rxdesc *rxd; 918 int i; 919 920 /* Tx ring. */ 921 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 922 if (sc->vge_rdata.vge_tx_ring_paddr) 923 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 924 sc->vge_cdata.vge_tx_ring_map); 925 if (sc->vge_rdata.vge_tx_ring) 926 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 927 sc->vge_rdata.vge_tx_ring, 928 sc->vge_cdata.vge_tx_ring_map); 929 sc->vge_rdata.vge_tx_ring = NULL; 930 sc->vge_rdata.vge_tx_ring_paddr = 0; 931 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 932 sc->vge_cdata.vge_tx_ring_tag = NULL; 933 } 934 /* Rx ring. */ 935 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 936 if (sc->vge_rdata.vge_rx_ring_paddr) 937 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 938 sc->vge_cdata.vge_rx_ring_map); 939 if (sc->vge_rdata.vge_rx_ring) 940 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 941 sc->vge_rdata.vge_rx_ring, 942 sc->vge_cdata.vge_rx_ring_map); 943 sc->vge_rdata.vge_rx_ring = NULL; 944 sc->vge_rdata.vge_rx_ring_paddr = 0; 945 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 946 sc->vge_cdata.vge_rx_ring_tag = NULL; 947 } 948 /* Tx buffers. */ 949 if (sc->vge_cdata.vge_tx_tag != NULL) { 950 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 951 txd = &sc->vge_cdata.vge_txdesc[i]; 952 if (txd->tx_dmamap != NULL) { 953 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 954 txd->tx_dmamap); 955 txd->tx_dmamap = NULL; 956 } 957 } 958 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 959 sc->vge_cdata.vge_tx_tag = NULL; 960 } 961 /* Rx buffers. */ 962 if (sc->vge_cdata.vge_rx_tag != NULL) { 963 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 964 rxd = &sc->vge_cdata.vge_rxdesc[i]; 965 if (rxd->rx_dmamap != NULL) { 966 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 967 rxd->rx_dmamap); 968 rxd->rx_dmamap = NULL; 969 } 970 } 971 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 972 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 973 sc->vge_cdata.vge_rx_sparemap); 974 sc->vge_cdata.vge_rx_sparemap = NULL; 975 } 976 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 977 sc->vge_cdata.vge_rx_tag = NULL; 978 } 979 980 if (sc->vge_cdata.vge_buffer_tag != NULL) { 981 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 982 sc->vge_cdata.vge_buffer_tag = NULL; 983 } 984 if (sc->vge_cdata.vge_ring_tag != NULL) { 985 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 986 sc->vge_cdata.vge_ring_tag = NULL; 987 } 988 } 989 990 /* 991 * Attach the interface. Allocate softc structures, do ifmedia 992 * setup and ethernet/BPF attach. 993 */ 994 static int 995 vge_attach(device_t dev) 996 { 997 u_char eaddr[ETHER_ADDR_LEN]; 998 struct vge_softc *sc; 999 if_t ifp; 1000 int error = 0, cap, i, msic, rid; 1001 1002 sc = device_get_softc(dev); 1003 sc->vge_dev = dev; 1004 1005 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1006 MTX_DEF); 1007 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 1008 1009 /* 1010 * Map control/status registers. 1011 */ 1012 pci_enable_busmaster(dev); 1013 1014 rid = PCIR_BAR(1); 1015 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1016 RF_ACTIVE); 1017 1018 if (sc->vge_res == NULL) { 1019 device_printf(dev, "couldn't map ports/memory\n"); 1020 error = ENXIO; 1021 goto fail; 1022 } 1023 1024 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 1025 sc->vge_flags |= VGE_FLAG_PCIE; 1026 sc->vge_expcap = cap; 1027 } else 1028 sc->vge_flags |= VGE_FLAG_JUMBO; 1029 if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) { 1030 sc->vge_flags |= VGE_FLAG_PMCAP; 1031 sc->vge_pmcap = cap; 1032 } 1033 rid = 0; 1034 msic = pci_msi_count(dev); 1035 if (msi_disable == 0 && msic > 0) { 1036 msic = 1; 1037 if (pci_alloc_msi(dev, &msic) == 0) { 1038 if (msic == 1) { 1039 sc->vge_flags |= VGE_FLAG_MSI; 1040 device_printf(dev, "Using %d MSI message\n", 1041 msic); 1042 rid = 1; 1043 } else 1044 pci_release_msi(dev); 1045 } 1046 } 1047 1048 /* Allocate interrupt */ 1049 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1050 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1051 if (sc->vge_irq == NULL) { 1052 device_printf(dev, "couldn't map interrupt\n"); 1053 error = ENXIO; 1054 goto fail; 1055 } 1056 1057 /* Reset the adapter. */ 1058 vge_reset(sc); 1059 /* Reload EEPROM. */ 1060 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1061 for (i = 0; i < VGE_TIMEOUT; i++) { 1062 DELAY(5); 1063 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1064 break; 1065 } 1066 if (i == VGE_TIMEOUT) 1067 device_printf(dev, "EEPROM reload timed out\n"); 1068 /* 1069 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1070 * MAC will receive magic packet which in turn confuses 1071 * controller. 1072 */ 1073 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1074 1075 /* 1076 * Get station address from the EEPROM. 1077 */ 1078 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1079 /* 1080 * Save configured PHY address. 1081 * It seems the PHY address of PCIe controllers just 1082 * reflects media jump strapping status so we assume the 1083 * internal PHY address of PCIe controller is at 1. 1084 */ 1085 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1086 sc->vge_phyaddr = 1; 1087 else 1088 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1089 VGE_MIICFG_PHYADDR; 1090 /* Clear WOL and take hardware from powerdown. */ 1091 vge_clrwol(sc); 1092 vge_sysctl_node(sc); 1093 error = vge_dma_alloc(sc); 1094 if (error) 1095 goto fail; 1096 1097 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1098 if (ifp == NULL) { 1099 device_printf(dev, "can not if_alloc()\n"); 1100 error = ENOSPC; 1101 goto fail; 1102 } 1103 1104 vge_miipoll_start(sc); 1105 /* Do MII setup */ 1106 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd, 1107 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY, 1108 MIIF_DOPAUSE); 1109 if (error != 0) { 1110 device_printf(dev, "attaching PHYs failed\n"); 1111 goto fail; 1112 } 1113 1114 if_setsoftc(ifp, sc); 1115 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1116 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1117 if_setioctlfn(ifp, vge_ioctl); 1118 if_setcapabilities(ifp, IFCAP_VLAN_MTU); 1119 if_setstartfn(ifp, vge_start); 1120 if_sethwassist(ifp, VGE_CSUM_FEATURES); 1121 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1122 IFCAP_VLAN_HWTAGGING, 0); 1123 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) 1124 if_setcapabilitiesbit(ifp, IFCAP_WOL, 0); 1125 if_setcapenable(ifp, if_getcapabilities(ifp)); 1126 #ifdef DEVICE_POLLING 1127 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 1128 #endif 1129 if_setinitfn(ifp, vge_init); 1130 if_setsendqlen(ifp, VGE_TX_DESC_CNT - 1); 1131 if_setsendqready(ifp); 1132 1133 /* 1134 * Call MI attach routine. 1135 */ 1136 ether_ifattach(ifp, eaddr); 1137 1138 /* Tell the upper layer(s) we support long frames. */ 1139 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 1140 1141 /* Hook interrupt last to avoid having to lock softc */ 1142 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1143 NULL, vge_intr, sc, &sc->vge_intrhand); 1144 1145 if (error) { 1146 device_printf(dev, "couldn't set up irq\n"); 1147 ether_ifdetach(ifp); 1148 goto fail; 1149 } 1150 1151 fail: 1152 if (error) 1153 vge_detach(dev); 1154 1155 return (error); 1156 } 1157 1158 /* 1159 * Shutdown hardware and free up resources. This can be called any 1160 * time after the mutex has been initialized. It is called in both 1161 * the error case in attach and the normal detach case so it needs 1162 * to be careful about only freeing resources that have actually been 1163 * allocated. 1164 */ 1165 static int 1166 vge_detach(device_t dev) 1167 { 1168 struct vge_softc *sc; 1169 if_t ifp; 1170 1171 sc = device_get_softc(dev); 1172 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1173 ifp = sc->vge_ifp; 1174 1175 #ifdef DEVICE_POLLING 1176 if (if_getcapenable(ifp) & IFCAP_POLLING) 1177 ether_poll_deregister(ifp); 1178 #endif 1179 1180 /* These should only be active if attach succeeded */ 1181 if (device_is_attached(dev)) { 1182 ether_ifdetach(ifp); 1183 VGE_LOCK(sc); 1184 vge_stop(sc); 1185 VGE_UNLOCK(sc); 1186 callout_drain(&sc->vge_watchdog); 1187 } 1188 if (sc->vge_miibus) 1189 device_delete_child(dev, sc->vge_miibus); 1190 bus_generic_detach(dev); 1191 1192 if (sc->vge_intrhand) 1193 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1194 if (sc->vge_irq) 1195 bus_release_resource(dev, SYS_RES_IRQ, 1196 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1197 if (sc->vge_flags & VGE_FLAG_MSI) 1198 pci_release_msi(dev); 1199 if (sc->vge_res) 1200 bus_release_resource(dev, SYS_RES_MEMORY, 1201 PCIR_BAR(1), sc->vge_res); 1202 if (ifp) 1203 if_free(ifp); 1204 1205 vge_dma_free(sc); 1206 mtx_destroy(&sc->vge_mtx); 1207 1208 return (0); 1209 } 1210 1211 static void 1212 vge_discard_rxbuf(struct vge_softc *sc, int prod) 1213 { 1214 struct vge_rxdesc *rxd; 1215 int i; 1216 1217 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1218 rxd->rx_desc->vge_sts = 0; 1219 rxd->rx_desc->vge_ctl = 0; 1220 1221 /* 1222 * Note: the manual fails to document the fact that for 1223 * proper opration, the driver needs to replentish the RX 1224 * DMA ring 4 descriptors at a time (rather than one at a 1225 * time, like most chips). We can allocate the new buffers 1226 * but we should not set the OWN bits until we're ready 1227 * to hand back 4 of them in one shot. 1228 */ 1229 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1230 for (i = VGE_RXCHUNK; i > 0; i--) { 1231 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1232 rxd = rxd->rxd_prev; 1233 } 1234 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1235 } 1236 } 1237 1238 static int 1239 vge_newbuf(struct vge_softc *sc, int prod) 1240 { 1241 struct vge_rxdesc *rxd; 1242 struct mbuf *m; 1243 bus_dma_segment_t segs[1]; 1244 bus_dmamap_t map; 1245 int i, nsegs; 1246 1247 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1248 if (m == NULL) 1249 return (ENOBUFS); 1250 /* 1251 * This is part of an evil trick to deal with strict-alignment 1252 * architectures. The VIA chip requires RX buffers to be aligned 1253 * on 32-bit boundaries, but that will hose strict-alignment 1254 * architectures. To get around this, we leave some empty space 1255 * at the start of each buffer and for non-strict-alignment hosts, 1256 * we copy the buffer back two bytes to achieve word alignment. 1257 * This is slightly more efficient than allocating a new buffer, 1258 * copying the contents, and discarding the old buffer. 1259 */ 1260 m->m_len = m->m_pkthdr.len = MCLBYTES; 1261 m_adj(m, VGE_RX_BUF_ALIGN); 1262 1263 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1264 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1265 m_freem(m); 1266 return (ENOBUFS); 1267 } 1268 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1269 1270 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1271 if (rxd->rx_m != NULL) { 1272 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1273 BUS_DMASYNC_POSTREAD); 1274 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1275 } 1276 map = rxd->rx_dmamap; 1277 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1278 sc->vge_cdata.vge_rx_sparemap = map; 1279 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1280 BUS_DMASYNC_PREREAD); 1281 rxd->rx_m = m; 1282 1283 rxd->rx_desc->vge_sts = 0; 1284 rxd->rx_desc->vge_ctl = 0; 1285 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1286 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1287 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1288 1289 /* 1290 * Note: the manual fails to document the fact that for 1291 * proper operation, the driver needs to replenish the RX 1292 * DMA ring 4 descriptors at a time (rather than one at a 1293 * time, like most chips). We can allocate the new buffers 1294 * but we should not set the OWN bits until we're ready 1295 * to hand back 4 of them in one shot. 1296 */ 1297 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1298 for (i = VGE_RXCHUNK; i > 0; i--) { 1299 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1300 rxd = rxd->rxd_prev; 1301 } 1302 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1303 } 1304 1305 return (0); 1306 } 1307 1308 static int 1309 vge_tx_list_init(struct vge_softc *sc) 1310 { 1311 struct vge_ring_data *rd; 1312 struct vge_txdesc *txd; 1313 int i; 1314 1315 VGE_LOCK_ASSERT(sc); 1316 1317 sc->vge_cdata.vge_tx_prodidx = 0; 1318 sc->vge_cdata.vge_tx_considx = 0; 1319 sc->vge_cdata.vge_tx_cnt = 0; 1320 1321 rd = &sc->vge_rdata; 1322 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1323 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1324 txd = &sc->vge_cdata.vge_txdesc[i]; 1325 txd->tx_m = NULL; 1326 txd->tx_desc = &rd->vge_tx_ring[i]; 1327 } 1328 1329 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1330 sc->vge_cdata.vge_tx_ring_map, 1331 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1332 1333 return (0); 1334 } 1335 1336 static int 1337 vge_rx_list_init(struct vge_softc *sc) 1338 { 1339 struct vge_ring_data *rd; 1340 struct vge_rxdesc *rxd; 1341 int i; 1342 1343 VGE_LOCK_ASSERT(sc); 1344 1345 sc->vge_cdata.vge_rx_prodidx = 0; 1346 sc->vge_cdata.vge_head = NULL; 1347 sc->vge_cdata.vge_tail = NULL; 1348 sc->vge_cdata.vge_rx_commit = 0; 1349 1350 rd = &sc->vge_rdata; 1351 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1352 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1353 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1354 rxd->rx_m = NULL; 1355 rxd->rx_desc = &rd->vge_rx_ring[i]; 1356 if (i == 0) 1357 rxd->rxd_prev = 1358 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1359 else 1360 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1361 if (vge_newbuf(sc, i) != 0) 1362 return (ENOBUFS); 1363 } 1364 1365 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1366 sc->vge_cdata.vge_rx_ring_map, 1367 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1368 1369 sc->vge_cdata.vge_rx_commit = 0; 1370 1371 return (0); 1372 } 1373 1374 static void 1375 vge_freebufs(struct vge_softc *sc) 1376 { 1377 struct vge_txdesc *txd; 1378 struct vge_rxdesc *rxd; 1379 if_t ifp; 1380 int i; 1381 1382 VGE_LOCK_ASSERT(sc); 1383 1384 ifp = sc->vge_ifp; 1385 /* 1386 * Free RX and TX mbufs still in the queues. 1387 */ 1388 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1389 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1390 if (rxd->rx_m != NULL) { 1391 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1392 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1393 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1394 rxd->rx_dmamap); 1395 m_freem(rxd->rx_m); 1396 rxd->rx_m = NULL; 1397 } 1398 } 1399 1400 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1401 txd = &sc->vge_cdata.vge_txdesc[i]; 1402 if (txd->tx_m != NULL) { 1403 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1404 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1405 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1406 txd->tx_dmamap); 1407 m_freem(txd->tx_m); 1408 txd->tx_m = NULL; 1409 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1410 } 1411 } 1412 } 1413 1414 #ifndef __NO_STRICT_ALIGNMENT 1415 static __inline void 1416 vge_fixup_rx(struct mbuf *m) 1417 { 1418 int i; 1419 uint16_t *src, *dst; 1420 1421 src = mtod(m, uint16_t *); 1422 dst = src - 1; 1423 1424 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1425 *dst++ = *src++; 1426 1427 m->m_data -= ETHER_ALIGN; 1428 } 1429 #endif 1430 1431 /* 1432 * RX handler. We support the reception of jumbo frames that have 1433 * been fragmented across multiple 2K mbuf cluster buffers. 1434 */ 1435 static int 1436 vge_rxeof(struct vge_softc *sc, int count) 1437 { 1438 struct mbuf *m; 1439 if_t ifp; 1440 int prod, prog, total_len; 1441 struct vge_rxdesc *rxd; 1442 struct vge_rx_desc *cur_rx; 1443 uint32_t rxstat, rxctl; 1444 1445 VGE_LOCK_ASSERT(sc); 1446 1447 ifp = sc->vge_ifp; 1448 1449 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1450 sc->vge_cdata.vge_rx_ring_map, 1451 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1452 1453 prod = sc->vge_cdata.vge_rx_prodidx; 1454 for (prog = 0; count > 0 && 1455 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; 1456 VGE_RX_DESC_INC(prod)) { 1457 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1458 rxstat = le32toh(cur_rx->vge_sts); 1459 if ((rxstat & VGE_RDSTS_OWN) != 0) 1460 break; 1461 count--; 1462 prog++; 1463 rxctl = le32toh(cur_rx->vge_ctl); 1464 total_len = VGE_RXBYTES(rxstat); 1465 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1466 m = rxd->rx_m; 1467 1468 /* 1469 * If the 'start of frame' bit is set, this indicates 1470 * either the first fragment in a multi-fragment receive, 1471 * or an intermediate fragment. Either way, we want to 1472 * accumulate the buffers. 1473 */ 1474 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1475 if (vge_newbuf(sc, prod) != 0) { 1476 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1477 VGE_CHAIN_RESET(sc); 1478 vge_discard_rxbuf(sc, prod); 1479 continue; 1480 } 1481 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1482 if (sc->vge_cdata.vge_head == NULL) { 1483 sc->vge_cdata.vge_head = m; 1484 sc->vge_cdata.vge_tail = m; 1485 } else { 1486 m->m_flags &= ~M_PKTHDR; 1487 sc->vge_cdata.vge_tail->m_next = m; 1488 sc->vge_cdata.vge_tail = m; 1489 } 1490 continue; 1491 } 1492 1493 /* 1494 * Bad/error frames will have the RXOK bit cleared. 1495 * However, there's one error case we want to allow: 1496 * if a VLAN tagged frame arrives and the chip can't 1497 * match it against the CAM filter, it considers this 1498 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1499 * We don't want to drop the frame though: our VLAN 1500 * filtering is done in software. 1501 * We also want to receive bad-checksummed frames and 1502 * and frames with bad-length. 1503 */ 1504 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1505 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1506 VGE_RDSTS_CSUMERR)) == 0) { 1507 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1508 /* 1509 * If this is part of a multi-fragment packet, 1510 * discard all the pieces. 1511 */ 1512 VGE_CHAIN_RESET(sc); 1513 vge_discard_rxbuf(sc, prod); 1514 continue; 1515 } 1516 1517 if (vge_newbuf(sc, prod) != 0) { 1518 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1519 VGE_CHAIN_RESET(sc); 1520 vge_discard_rxbuf(sc, prod); 1521 continue; 1522 } 1523 1524 /* Chain received mbufs. */ 1525 if (sc->vge_cdata.vge_head != NULL) { 1526 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1527 /* 1528 * Special case: if there's 4 bytes or less 1529 * in this buffer, the mbuf can be discarded: 1530 * the last 4 bytes is the CRC, which we don't 1531 * care about anyway. 1532 */ 1533 if (m->m_len <= ETHER_CRC_LEN) { 1534 sc->vge_cdata.vge_tail->m_len -= 1535 (ETHER_CRC_LEN - m->m_len); 1536 m_freem(m); 1537 } else { 1538 m->m_len -= ETHER_CRC_LEN; 1539 m->m_flags &= ~M_PKTHDR; 1540 sc->vge_cdata.vge_tail->m_next = m; 1541 } 1542 m = sc->vge_cdata.vge_head; 1543 m->m_flags |= M_PKTHDR; 1544 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1545 } else { 1546 m->m_flags |= M_PKTHDR; 1547 m->m_pkthdr.len = m->m_len = 1548 (total_len - ETHER_CRC_LEN); 1549 } 1550 1551 #ifndef __NO_STRICT_ALIGNMENT 1552 vge_fixup_rx(m); 1553 #endif 1554 m->m_pkthdr.rcvif = ifp; 1555 1556 /* Do RX checksumming if enabled */ 1557 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 1558 (rxctl & VGE_RDCTL_FRAG) == 0) { 1559 /* Check IP header checksum */ 1560 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1561 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1562 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1563 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1564 1565 /* Check TCP/UDP checksum */ 1566 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1567 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1568 m->m_pkthdr.csum_flags |= 1569 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1570 m->m_pkthdr.csum_data = 0xffff; 1571 } 1572 } 1573 1574 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1575 /* 1576 * The 32-bit rxctl register is stored in little-endian. 1577 * However, the 16-bit vlan tag is stored in big-endian, 1578 * so we have to byte swap it. 1579 */ 1580 m->m_pkthdr.ether_vtag = 1581 bswap16(rxctl & VGE_RDCTL_VLANID); 1582 m->m_flags |= M_VLANTAG; 1583 } 1584 1585 VGE_UNLOCK(sc); 1586 if_input(ifp, m); 1587 VGE_LOCK(sc); 1588 sc->vge_cdata.vge_head = NULL; 1589 sc->vge_cdata.vge_tail = NULL; 1590 } 1591 1592 if (prog > 0) { 1593 sc->vge_cdata.vge_rx_prodidx = prod; 1594 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1595 sc->vge_cdata.vge_rx_ring_map, 1596 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1597 /* Update residue counter. */ 1598 if (sc->vge_cdata.vge_rx_commit != 0) { 1599 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1600 sc->vge_cdata.vge_rx_commit); 1601 sc->vge_cdata.vge_rx_commit = 0; 1602 } 1603 } 1604 return (prog); 1605 } 1606 1607 static void 1608 vge_txeof(struct vge_softc *sc) 1609 { 1610 if_t ifp; 1611 struct vge_tx_desc *cur_tx; 1612 struct vge_txdesc *txd; 1613 uint32_t txstat; 1614 int cons, prod; 1615 1616 VGE_LOCK_ASSERT(sc); 1617 1618 ifp = sc->vge_ifp; 1619 1620 if (sc->vge_cdata.vge_tx_cnt == 0) 1621 return; 1622 1623 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1624 sc->vge_cdata.vge_tx_ring_map, 1625 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1626 1627 /* 1628 * Go through our tx list and free mbufs for those 1629 * frames that have been transmitted. 1630 */ 1631 cons = sc->vge_cdata.vge_tx_considx; 1632 prod = sc->vge_cdata.vge_tx_prodidx; 1633 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1634 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1635 txstat = le32toh(cur_tx->vge_sts); 1636 if ((txstat & VGE_TDSTS_OWN) != 0) 1637 break; 1638 sc->vge_cdata.vge_tx_cnt--; 1639 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1640 1641 txd = &sc->vge_cdata.vge_txdesc[cons]; 1642 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1643 BUS_DMASYNC_POSTWRITE); 1644 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1645 1646 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1647 __func__)); 1648 m_freem(txd->tx_m); 1649 txd->tx_m = NULL; 1650 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1651 } 1652 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1653 sc->vge_cdata.vge_tx_ring_map, 1654 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1655 sc->vge_cdata.vge_tx_considx = cons; 1656 if (sc->vge_cdata.vge_tx_cnt == 0) 1657 sc->vge_timer = 0; 1658 } 1659 1660 static void 1661 vge_link_statchg(void *xsc) 1662 { 1663 struct vge_softc *sc; 1664 if_t ifp; 1665 uint8_t physts; 1666 1667 sc = xsc; 1668 ifp = sc->vge_ifp; 1669 VGE_LOCK_ASSERT(sc); 1670 1671 physts = CSR_READ_1(sc, VGE_PHYSTS0); 1672 if ((physts & VGE_PHYSTS_RESETSTS) == 0) { 1673 if ((physts & VGE_PHYSTS_LINK) == 0) { 1674 sc->vge_flags &= ~VGE_FLAG_LINK; 1675 if_link_state_change(sc->vge_ifp, 1676 LINK_STATE_DOWN); 1677 } else { 1678 sc->vge_flags |= VGE_FLAG_LINK; 1679 if_link_state_change(sc->vge_ifp, 1680 LINK_STATE_UP); 1681 CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE | 1682 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1683 if ((physts & VGE_PHYSTS_FDX) != 0) { 1684 if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0) 1685 CSR_WRITE_1(sc, VGE_CRS2, 1686 VGE_CR2_FDX_TXFLOWCTL_ENABLE); 1687 if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0) 1688 CSR_WRITE_1(sc, VGE_CRS2, 1689 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1690 } 1691 if (!if_sendq_empty(ifp)) 1692 vge_start_locked(ifp); 1693 } 1694 } 1695 /* 1696 * Restart MII auto-polling because link state change interrupt 1697 * will disable it. 1698 */ 1699 vge_miipoll_start(sc); 1700 } 1701 1702 #ifdef DEVICE_POLLING 1703 static int 1704 vge_poll (if_t ifp, enum poll_cmd cmd, int count) 1705 { 1706 struct vge_softc *sc = if_getsoftc(ifp); 1707 int rx_npkts = 0; 1708 1709 VGE_LOCK(sc); 1710 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 1711 goto done; 1712 1713 rx_npkts = vge_rxeof(sc, count); 1714 vge_txeof(sc); 1715 1716 if (!if_sendq_empty(ifp)) 1717 vge_start_locked(ifp); 1718 1719 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1720 uint32_t status; 1721 status = CSR_READ_4(sc, VGE_ISR); 1722 if (status == 0xFFFFFFFF) 1723 goto done; 1724 if (status) 1725 CSR_WRITE_4(sc, VGE_ISR, status); 1726 1727 /* 1728 * XXX check behaviour on receiver stalls. 1729 */ 1730 1731 if (status & VGE_ISR_TXDMA_STALL || 1732 status & VGE_ISR_RXDMA_STALL) { 1733 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1734 vge_init_locked(sc); 1735 } 1736 1737 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1738 vge_rxeof(sc, count); 1739 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1740 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1741 } 1742 } 1743 done: 1744 VGE_UNLOCK(sc); 1745 return (rx_npkts); 1746 } 1747 #endif /* DEVICE_POLLING */ 1748 1749 static void 1750 vge_intr(void *arg) 1751 { 1752 struct vge_softc *sc; 1753 if_t ifp; 1754 uint32_t status; 1755 1756 sc = arg; 1757 VGE_LOCK(sc); 1758 1759 ifp = sc->vge_ifp; 1760 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1761 (if_getflags(ifp) & IFF_UP) == 0) { 1762 VGE_UNLOCK(sc); 1763 return; 1764 } 1765 1766 #ifdef DEVICE_POLLING 1767 if (if_getcapenable(ifp) & IFCAP_POLLING) { 1768 status = CSR_READ_4(sc, VGE_ISR); 1769 CSR_WRITE_4(sc, VGE_ISR, status); 1770 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0) 1771 vge_link_statchg(sc); 1772 VGE_UNLOCK(sc); 1773 return; 1774 } 1775 #endif 1776 1777 /* Disable interrupts */ 1778 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1779 status = CSR_READ_4(sc, VGE_ISR); 1780 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD); 1781 /* If the card has gone away the read returns 0xffff. */ 1782 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0) 1783 goto done; 1784 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1785 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1786 vge_rxeof(sc, VGE_RX_DESC_CNT); 1787 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1788 vge_rxeof(sc, VGE_RX_DESC_CNT); 1789 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1790 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1791 } 1792 1793 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO)) 1794 vge_txeof(sc); 1795 1796 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1797 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1798 vge_init_locked(sc); 1799 } 1800 1801 if (status & VGE_ISR_LINKSTS) 1802 vge_link_statchg(sc); 1803 } 1804 done: 1805 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1806 /* Re-enable interrupts */ 1807 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1808 1809 if (!if_sendq_empty(ifp)) 1810 vge_start_locked(ifp); 1811 } 1812 VGE_UNLOCK(sc); 1813 } 1814 1815 static int 1816 vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1817 { 1818 struct vge_txdesc *txd; 1819 struct vge_tx_frag *frag; 1820 struct mbuf *m; 1821 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1822 int error, i, nsegs, padlen; 1823 uint32_t cflags; 1824 1825 VGE_LOCK_ASSERT(sc); 1826 1827 M_ASSERTPKTHDR((*m_head)); 1828 1829 /* Argh. This chip does not autopad short frames. */ 1830 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1831 m = *m_head; 1832 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1833 if (M_WRITABLE(m) == 0) { 1834 /* Get a writable copy. */ 1835 m = m_dup(*m_head, M_NOWAIT); 1836 m_freem(*m_head); 1837 if (m == NULL) { 1838 *m_head = NULL; 1839 return (ENOBUFS); 1840 } 1841 *m_head = m; 1842 } 1843 if (M_TRAILINGSPACE(m) < padlen) { 1844 m = m_defrag(m, M_NOWAIT); 1845 if (m == NULL) { 1846 m_freem(*m_head); 1847 *m_head = NULL; 1848 return (ENOBUFS); 1849 } 1850 } 1851 /* 1852 * Manually pad short frames, and zero the pad space 1853 * to avoid leaking data. 1854 */ 1855 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1856 m->m_pkthdr.len += padlen; 1857 m->m_len = m->m_pkthdr.len; 1858 *m_head = m; 1859 } 1860 1861 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1862 1863 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1864 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1865 if (error == EFBIG) { 1866 m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS); 1867 if (m == NULL) { 1868 m_freem(*m_head); 1869 *m_head = NULL; 1870 return (ENOMEM); 1871 } 1872 *m_head = m; 1873 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1874 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1875 if (error != 0) { 1876 m_freem(*m_head); 1877 *m_head = NULL; 1878 return (error); 1879 } 1880 } else if (error != 0) 1881 return (error); 1882 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1883 BUS_DMASYNC_PREWRITE); 1884 1885 m = *m_head; 1886 cflags = 0; 1887 1888 /* Configure checksum offload. */ 1889 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1890 cflags |= VGE_TDCTL_IPCSUM; 1891 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1892 cflags |= VGE_TDCTL_TCPCSUM; 1893 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1894 cflags |= VGE_TDCTL_UDPCSUM; 1895 1896 /* Configure VLAN. */ 1897 if ((m->m_flags & M_VLANTAG) != 0) 1898 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1899 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1900 /* 1901 * XXX 1902 * Velocity family seems to support TSO but no information 1903 * for MSS configuration is available. Also the number of 1904 * fragments supported by a descriptor is too small to hold 1905 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1906 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1907 * longer chain of buffers but no additional information is 1908 * available. 1909 * 1910 * When telling the chip how many segments there are, we 1911 * must use nsegs + 1 instead of just nsegs. Darned if I 1912 * know why. This also means we can't use the last fragment 1913 * field of Tx descriptor. 1914 */ 1915 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1916 VGE_TD_LS_NORM); 1917 for (i = 0; i < nsegs; i++) { 1918 frag = &txd->tx_desc->vge_frag[i]; 1919 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1920 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1921 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1922 } 1923 1924 sc->vge_cdata.vge_tx_cnt++; 1925 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1926 1927 /* 1928 * Finally request interrupt and give the first descriptor 1929 * ownership to hardware. 1930 */ 1931 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1932 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1933 txd->tx_m = m; 1934 1935 return (0); 1936 } 1937 1938 /* 1939 * Main transmit routine. 1940 */ 1941 1942 static void 1943 vge_start(if_t ifp) 1944 { 1945 struct vge_softc *sc; 1946 1947 sc = if_getsoftc(ifp); 1948 VGE_LOCK(sc); 1949 vge_start_locked(ifp); 1950 VGE_UNLOCK(sc); 1951 } 1952 1953 static void 1954 vge_start_locked(if_t ifp) 1955 { 1956 struct vge_softc *sc; 1957 struct vge_txdesc *txd; 1958 struct mbuf *m_head; 1959 int enq, idx; 1960 1961 sc = if_getsoftc(ifp); 1962 1963 VGE_LOCK_ASSERT(sc); 1964 1965 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1966 (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1967 IFF_DRV_RUNNING) 1968 return; 1969 1970 idx = sc->vge_cdata.vge_tx_prodidx; 1971 VGE_TX_DESC_DEC(idx); 1972 for (enq = 0; !if_sendq_empty(ifp) && 1973 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1974 m_head = if_dequeue(ifp); 1975 if (m_head == NULL) 1976 break; 1977 /* 1978 * Pack the data into the transmit ring. If we 1979 * don't have room, set the OACTIVE flag and wait 1980 * for the NIC to drain the ring. 1981 */ 1982 if (vge_encap(sc, &m_head)) { 1983 if (m_head == NULL) 1984 break; 1985 if_sendq_prepend(ifp, m_head); 1986 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1987 break; 1988 } 1989 1990 txd = &sc->vge_cdata.vge_txdesc[idx]; 1991 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1992 VGE_TX_DESC_INC(idx); 1993 1994 enq++; 1995 /* 1996 * If there's a BPF listener, bounce a copy of this frame 1997 * to him. 1998 */ 1999 ETHER_BPF_MTAP(ifp, m_head); 2000 } 2001 2002 if (enq > 0) { 2003 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 2004 sc->vge_cdata.vge_tx_ring_map, 2005 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2006 /* Issue a transmit command. */ 2007 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 2008 /* 2009 * Set a timeout in case the chip goes out to lunch. 2010 */ 2011 sc->vge_timer = 5; 2012 } 2013 } 2014 2015 static void 2016 vge_init(void *xsc) 2017 { 2018 struct vge_softc *sc = xsc; 2019 2020 VGE_LOCK(sc); 2021 vge_init_locked(sc); 2022 VGE_UNLOCK(sc); 2023 } 2024 2025 static void 2026 vge_init_locked(struct vge_softc *sc) 2027 { 2028 if_t ifp = sc->vge_ifp; 2029 int error, i; 2030 2031 VGE_LOCK_ASSERT(sc); 2032 2033 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2034 return; 2035 2036 /* 2037 * Cancel pending I/O and free all RX/TX buffers. 2038 */ 2039 vge_stop(sc); 2040 vge_reset(sc); 2041 vge_miipoll_start(sc); 2042 2043 /* 2044 * Initialize the RX and TX descriptors and mbufs. 2045 */ 2046 2047 error = vge_rx_list_init(sc); 2048 if (error != 0) { 2049 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2050 return; 2051 } 2052 vge_tx_list_init(sc); 2053 /* Clear MAC statistics. */ 2054 vge_stats_clear(sc); 2055 /* Set our station address */ 2056 for (i = 0; i < ETHER_ADDR_LEN; i++) 2057 CSR_WRITE_1(sc, VGE_PAR0 + i, if_getlladdr(sc->vge_ifp)[i]); 2058 2059 /* 2060 * Set receive FIFO threshold. Also allow transmission and 2061 * reception of VLAN tagged frames. 2062 */ 2063 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2064 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2065 2066 /* Set DMA burst length */ 2067 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2068 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2069 2070 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2071 2072 /* Set collision backoff algorithm */ 2073 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2074 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2075 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2076 2077 /* Disable LPSEL field in priority resolution */ 2078 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2079 2080 /* 2081 * Load the addresses of the DMA queues into the chip. 2082 * Note that we only use one transmit queue. 2083 */ 2084 2085 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2086 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2087 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2088 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2089 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2090 2091 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2092 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2093 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2094 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2095 2096 /* Configure interrupt moderation. */ 2097 vge_intr_holdoff(sc); 2098 2099 /* Enable and wake up the RX descriptor queue */ 2100 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2101 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2102 2103 /* Enable the TX descriptor queue */ 2104 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2105 2106 /* Init the cam filter. */ 2107 vge_cam_clear(sc); 2108 2109 /* Set up receiver filter. */ 2110 vge_rxfilter(sc); 2111 vge_setvlan(sc); 2112 2113 /* Initialize pause timer. */ 2114 CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF); 2115 /* 2116 * Initialize flow control parameters. 2117 * TX XON high threshold : 48 2118 * TX pause low threshold : 24 2119 * Disable hald-duplex flow control 2120 */ 2121 CSR_WRITE_1(sc, VGE_CRC2, 0xFF); 2122 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B); 2123 2124 /* Enable jumbo frame reception (if desired) */ 2125 2126 /* Start the MAC. */ 2127 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2128 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2129 CSR_WRITE_1(sc, VGE_CRS0, 2130 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2131 2132 #ifdef DEVICE_POLLING 2133 /* 2134 * Disable interrupts except link state change if we are polling. 2135 */ 2136 if (if_getcapenable(ifp) & IFCAP_POLLING) { 2137 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2138 } else /* otherwise ... */ 2139 #endif 2140 { 2141 /* 2142 * Enable interrupts. 2143 */ 2144 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2145 } 2146 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2147 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2148 2149 sc->vge_flags &= ~VGE_FLAG_LINK; 2150 vge_ifmedia_upd_locked(sc); 2151 2152 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2153 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2154 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2155 } 2156 2157 /* 2158 * Set media options. 2159 */ 2160 static int 2161 vge_ifmedia_upd(if_t ifp) 2162 { 2163 struct vge_softc *sc; 2164 int error; 2165 2166 sc = if_getsoftc(ifp); 2167 VGE_LOCK(sc); 2168 error = vge_ifmedia_upd_locked(sc); 2169 VGE_UNLOCK(sc); 2170 2171 return (error); 2172 } 2173 2174 static int 2175 vge_ifmedia_upd_locked(struct vge_softc *sc) 2176 { 2177 struct mii_data *mii; 2178 struct mii_softc *miisc; 2179 int error; 2180 2181 mii = device_get_softc(sc->vge_miibus); 2182 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2183 PHY_RESET(miisc); 2184 vge_setmedia(sc); 2185 error = mii_mediachg(mii); 2186 2187 return (error); 2188 } 2189 2190 /* 2191 * Report current media status. 2192 */ 2193 static void 2194 vge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 2195 { 2196 struct vge_softc *sc; 2197 struct mii_data *mii; 2198 2199 sc = if_getsoftc(ifp); 2200 mii = device_get_softc(sc->vge_miibus); 2201 2202 VGE_LOCK(sc); 2203 if ((if_getflags(ifp) & IFF_UP) == 0) { 2204 VGE_UNLOCK(sc); 2205 return; 2206 } 2207 mii_pollstat(mii); 2208 ifmr->ifm_active = mii->mii_media_active; 2209 ifmr->ifm_status = mii->mii_media_status; 2210 VGE_UNLOCK(sc); 2211 } 2212 2213 static void 2214 vge_setmedia(struct vge_softc *sc) 2215 { 2216 struct mii_data *mii; 2217 struct ifmedia_entry *ife; 2218 2219 mii = device_get_softc(sc->vge_miibus); 2220 ife = mii->mii_media.ifm_cur; 2221 2222 /* 2223 * If the user manually selects a media mode, we need to turn 2224 * on the forced MAC mode bit in the DIAGCTL register. If the 2225 * user happens to choose a full duplex mode, we also need to 2226 * set the 'force full duplex' bit. This applies only to 2227 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2228 * mode is disabled, and in 1000baseT mode, full duplex is 2229 * always implied, so we turn on the forced mode bit but leave 2230 * the FDX bit cleared. 2231 */ 2232 2233 switch (IFM_SUBTYPE(ife->ifm_media)) { 2234 case IFM_AUTO: 2235 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2236 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2237 break; 2238 case IFM_1000_T: 2239 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2240 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2241 break; 2242 case IFM_100_TX: 2243 case IFM_10_T: 2244 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2245 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2246 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2247 } else { 2248 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2249 } 2250 break; 2251 default: 2252 device_printf(sc->vge_dev, "unknown media type: %x\n", 2253 IFM_SUBTYPE(ife->ifm_media)); 2254 break; 2255 } 2256 } 2257 2258 static int 2259 vge_ioctl(if_t ifp, u_long command, caddr_t data) 2260 { 2261 struct vge_softc *sc = if_getsoftc(ifp); 2262 struct ifreq *ifr = (struct ifreq *) data; 2263 struct mii_data *mii; 2264 int error = 0, mask; 2265 2266 switch (command) { 2267 case SIOCSIFMTU: 2268 VGE_LOCK(sc); 2269 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU) 2270 error = EINVAL; 2271 else if (if_getmtu(ifp) != ifr->ifr_mtu) { 2272 if (ifr->ifr_mtu > ETHERMTU && 2273 (sc->vge_flags & VGE_FLAG_JUMBO) == 0) 2274 error = EINVAL; 2275 else 2276 if_setmtu(ifp, ifr->ifr_mtu); 2277 } 2278 VGE_UNLOCK(sc); 2279 break; 2280 case SIOCSIFFLAGS: 2281 VGE_LOCK(sc); 2282 if ((if_getflags(ifp) & IFF_UP) != 0) { 2283 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 2284 ((if_getflags(ifp) ^ sc->vge_if_flags) & 2285 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2286 vge_rxfilter(sc); 2287 else 2288 vge_init_locked(sc); 2289 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2290 vge_stop(sc); 2291 sc->vge_if_flags = if_getflags(ifp); 2292 VGE_UNLOCK(sc); 2293 break; 2294 case SIOCADDMULTI: 2295 case SIOCDELMULTI: 2296 VGE_LOCK(sc); 2297 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2298 vge_rxfilter(sc); 2299 VGE_UNLOCK(sc); 2300 break; 2301 case SIOCGIFMEDIA: 2302 case SIOCSIFMEDIA: 2303 mii = device_get_softc(sc->vge_miibus); 2304 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2305 break; 2306 case SIOCSIFCAP: 2307 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 2308 #ifdef DEVICE_POLLING 2309 if (mask & IFCAP_POLLING) { 2310 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2311 error = ether_poll_register(vge_poll, ifp); 2312 if (error) 2313 return (error); 2314 VGE_LOCK(sc); 2315 /* Disable interrupts */ 2316 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2317 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2318 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2319 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 2320 VGE_UNLOCK(sc); 2321 } else { 2322 error = ether_poll_deregister(ifp); 2323 /* Enable interrupts. */ 2324 VGE_LOCK(sc); 2325 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2326 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2327 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2328 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 2329 VGE_UNLOCK(sc); 2330 } 2331 } 2332 #endif /* DEVICE_POLLING */ 2333 VGE_LOCK(sc); 2334 if ((mask & IFCAP_TXCSUM) != 0 && 2335 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 2336 if_togglecapenable(ifp, IFCAP_TXCSUM); 2337 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 2338 if_sethwassistbits(ifp, VGE_CSUM_FEATURES, 0); 2339 else 2340 if_sethwassistbits(ifp, 0, VGE_CSUM_FEATURES); 2341 } 2342 if ((mask & IFCAP_RXCSUM) != 0 && 2343 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) 2344 if_togglecapenable(ifp, IFCAP_RXCSUM); 2345 if ((mask & IFCAP_WOL_UCAST) != 0 && 2346 (if_getcapabilities(ifp) & IFCAP_WOL_UCAST) != 0) 2347 if_togglecapenable(ifp, IFCAP_WOL_UCAST); 2348 if ((mask & IFCAP_WOL_MCAST) != 0 && 2349 (if_getcapabilities(ifp) & IFCAP_WOL_MCAST) != 0) 2350 if_togglecapenable(ifp, IFCAP_WOL_MCAST); 2351 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2352 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0) 2353 if_togglecapenable(ifp, IFCAP_WOL_MAGIC); 2354 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2355 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0) 2356 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); 2357 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2358 (IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) { 2359 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 2360 vge_setvlan(sc); 2361 } 2362 VGE_UNLOCK(sc); 2363 VLAN_CAPABILITIES(ifp); 2364 break; 2365 default: 2366 error = ether_ioctl(ifp, command, data); 2367 break; 2368 } 2369 2370 return (error); 2371 } 2372 2373 static void 2374 vge_watchdog(void *arg) 2375 { 2376 struct vge_softc *sc; 2377 if_t ifp; 2378 2379 sc = arg; 2380 VGE_LOCK_ASSERT(sc); 2381 vge_stats_update(sc); 2382 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2383 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2384 return; 2385 2386 ifp = sc->vge_ifp; 2387 if_printf(ifp, "watchdog timeout\n"); 2388 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2389 2390 vge_txeof(sc); 2391 vge_rxeof(sc, VGE_RX_DESC_CNT); 2392 2393 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2394 vge_init_locked(sc); 2395 } 2396 2397 /* 2398 * Stop the adapter and free any mbufs allocated to the 2399 * RX and TX lists. 2400 */ 2401 static void 2402 vge_stop(struct vge_softc *sc) 2403 { 2404 if_t ifp; 2405 2406 VGE_LOCK_ASSERT(sc); 2407 ifp = sc->vge_ifp; 2408 sc->vge_timer = 0; 2409 callout_stop(&sc->vge_watchdog); 2410 2411 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 2412 2413 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2414 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2415 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2416 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2417 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2418 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2419 2420 vge_stats_update(sc); 2421 VGE_CHAIN_RESET(sc); 2422 vge_txeof(sc); 2423 vge_freebufs(sc); 2424 } 2425 2426 /* 2427 * Device suspend routine. Stop the interface and save some PCI 2428 * settings in case the BIOS doesn't restore them properly on 2429 * resume. 2430 */ 2431 static int 2432 vge_suspend(device_t dev) 2433 { 2434 struct vge_softc *sc; 2435 2436 sc = device_get_softc(dev); 2437 2438 VGE_LOCK(sc); 2439 vge_stop(sc); 2440 vge_setwol(sc); 2441 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2442 VGE_UNLOCK(sc); 2443 2444 return (0); 2445 } 2446 2447 /* 2448 * Device resume routine. Restore some PCI settings in case the BIOS 2449 * doesn't, re-enable busmastering, and restart the interface if 2450 * appropriate. 2451 */ 2452 static int 2453 vge_resume(device_t dev) 2454 { 2455 struct vge_softc *sc; 2456 if_t ifp; 2457 uint16_t pmstat; 2458 2459 sc = device_get_softc(dev); 2460 VGE_LOCK(sc); 2461 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) { 2462 /* Disable PME and clear PME status. */ 2463 pmstat = pci_read_config(sc->vge_dev, 2464 sc->vge_pmcap + PCIR_POWER_STATUS, 2); 2465 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2466 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2467 pci_write_config(sc->vge_dev, 2468 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2469 } 2470 } 2471 vge_clrwol(sc); 2472 /* Restart MII auto-polling. */ 2473 vge_miipoll_start(sc); 2474 ifp = sc->vge_ifp; 2475 /* Reinitialize interface if necessary. */ 2476 if ((if_getflags(ifp) & IFF_UP) != 0) { 2477 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2478 vge_init_locked(sc); 2479 } 2480 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2481 VGE_UNLOCK(sc); 2482 2483 return (0); 2484 } 2485 2486 /* 2487 * Stop all chip I/O so that the kernel's probe routines don't 2488 * get confused by errant DMAs when rebooting. 2489 */ 2490 static int 2491 vge_shutdown(device_t dev) 2492 { 2493 2494 return (vge_suspend(dev)); 2495 } 2496 2497 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2498 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2499 2500 static void 2501 vge_sysctl_node(struct vge_softc *sc) 2502 { 2503 struct sysctl_ctx_list *ctx; 2504 struct sysctl_oid_list *child, *parent; 2505 struct sysctl_oid *tree; 2506 struct vge_hw_stats *stats; 2507 2508 stats = &sc->vge_stats; 2509 ctx = device_get_sysctl_ctx(sc->vge_dev); 2510 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2511 2512 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff", 2513 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff"); 2514 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt", 2515 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet"); 2516 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt", 2517 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet"); 2518 2519 /* Pull in device tunables. */ 2520 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT; 2521 resource_int_value(device_get_name(sc->vge_dev), 2522 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff); 2523 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT; 2524 resource_int_value(device_get_name(sc->vge_dev), 2525 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt); 2526 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT; 2527 resource_int_value(device_get_name(sc->vge_dev), 2528 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt); 2529 2530 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 2531 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VGE statistics"); 2532 parent = SYSCTL_CHILDREN(tree); 2533 2534 /* Rx statistics. */ 2535 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 2536 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics"); 2537 child = SYSCTL_CHILDREN(tree); 2538 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2539 &stats->rx_frames, "frames"); 2540 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2541 &stats->rx_good_frames, "Good frames"); 2542 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2543 &stats->rx_fifo_oflows, "FIFO overflows"); 2544 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2545 &stats->rx_runts, "Too short frames"); 2546 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2547 &stats->rx_runts_errs, "Too short frames with errors"); 2548 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2549 &stats->rx_pkts_64, "64 bytes frames"); 2550 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2551 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2552 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2553 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2554 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2555 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2556 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2557 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2558 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2559 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2560 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2561 &stats->rx_pkts_1519_max, "1519 to max frames"); 2562 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2563 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2564 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2565 &stats->rx_jumbos, "Jumbo frames"); 2566 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2567 &stats->rx_crcerrs, "CRC errors"); 2568 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2569 &stats->rx_pause_frames, "CRC errors"); 2570 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2571 &stats->rx_alignerrs, "Alignment errors"); 2572 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2573 &stats->rx_nobufs, "Frames with no buffer event"); 2574 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2575 &stats->rx_symerrs, "Frames with symbol errors"); 2576 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2577 &stats->rx_lenerrs, "Frames with length mismatched"); 2578 2579 /* Tx statistics. */ 2580 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 2581 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics"); 2582 child = SYSCTL_CHILDREN(tree); 2583 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2584 &stats->tx_good_frames, "Good frames"); 2585 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2586 &stats->tx_pkts_64, "64 bytes frames"); 2587 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2588 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2589 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2590 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2591 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2592 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2593 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2594 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2595 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2596 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2597 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2598 &stats->tx_jumbos, "Jumbo frames"); 2599 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2600 &stats->tx_colls, "Collisions"); 2601 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2602 &stats->tx_latecolls, "Late collisions"); 2603 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2604 &stats->tx_pause, "Pause frames"); 2605 #ifdef VGE_ENABLE_SQEERR 2606 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2607 &stats->tx_sqeerrs, "SQE errors"); 2608 #endif 2609 /* Clear MAC statistics. */ 2610 vge_stats_clear(sc); 2611 } 2612 2613 #undef VGE_SYSCTL_STAT_ADD32 2614 2615 static void 2616 vge_stats_clear(struct vge_softc *sc) 2617 { 2618 int i; 2619 2620 CSR_WRITE_1(sc, VGE_MIBCSR, 2621 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2622 CSR_WRITE_1(sc, VGE_MIBCSR, 2623 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2624 for (i = VGE_TIMEOUT; i > 0; i--) { 2625 DELAY(1); 2626 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2627 break; 2628 } 2629 if (i == 0) 2630 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2631 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2632 ~VGE_MIBCSR_FREEZE); 2633 } 2634 2635 static void 2636 vge_stats_update(struct vge_softc *sc) 2637 { 2638 struct vge_hw_stats *stats; 2639 if_t ifp; 2640 uint32_t mib[VGE_MIB_CNT], val; 2641 int i; 2642 2643 VGE_LOCK_ASSERT(sc); 2644 2645 stats = &sc->vge_stats; 2646 ifp = sc->vge_ifp; 2647 2648 CSR_WRITE_1(sc, VGE_MIBCSR, 2649 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2650 for (i = VGE_TIMEOUT; i > 0; i--) { 2651 DELAY(1); 2652 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2653 break; 2654 } 2655 if (i == 0) { 2656 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2657 vge_stats_clear(sc); 2658 return; 2659 } 2660 2661 bzero(mib, sizeof(mib)); 2662 reset_idx: 2663 /* Set MIB read index to 0. */ 2664 CSR_WRITE_1(sc, VGE_MIBCSR, 2665 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2666 for (i = 0; i < VGE_MIB_CNT; i++) { 2667 val = CSR_READ_4(sc, VGE_MIBDATA); 2668 if (i != VGE_MIB_DATA_IDX(val)) { 2669 /* Reading interrupted. */ 2670 goto reset_idx; 2671 } 2672 mib[i] = val & VGE_MIB_DATA_MASK; 2673 } 2674 2675 /* Rx stats. */ 2676 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2677 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2678 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2679 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2680 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2681 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2682 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2683 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2684 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2685 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2686 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2687 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2688 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2689 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2690 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2691 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2692 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2693 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2694 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2695 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2696 2697 /* Tx stats. */ 2698 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2699 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2700 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2701 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2702 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2703 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2704 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2705 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2706 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2707 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2708 #ifdef VGE_ENABLE_SQEERR 2709 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2710 #endif 2711 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2712 2713 /* Update counters in ifnet. */ 2714 if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]); 2715 2716 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 2717 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2718 2719 if_inc_counter(ifp, IFCOUNTER_OERRORS, 2720 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2721 2722 if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]); 2723 2724 if_inc_counter(ifp, IFCOUNTER_IERRORS, 2725 mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2726 mib[VGE_MIB_RX_RUNTS] + 2727 mib[VGE_MIB_RX_RUNTS_ERRS] + 2728 mib[VGE_MIB_RX_CRCERRS] + 2729 mib[VGE_MIB_RX_ALIGNERRS] + 2730 mib[VGE_MIB_RX_NOBUFS] + 2731 mib[VGE_MIB_RX_SYMERRS] + 2732 mib[VGE_MIB_RX_LENERRS]); 2733 } 2734 2735 static void 2736 vge_intr_holdoff(struct vge_softc *sc) 2737 { 2738 uint8_t intctl; 2739 2740 VGE_LOCK_ASSERT(sc); 2741 2742 /* 2743 * Set Tx interrupt supression threshold. 2744 * It's possible to use single-shot timer in VGE_CRS1 register 2745 * in Tx path such that driver can remove most of Tx completion 2746 * interrupts. However this requires additional access to 2747 * VGE_CRS1 register to reload the timer in addintion to 2748 * activating Tx kick command. Another downside is we don't know 2749 * what single-shot timer value should be used in advance so 2750 * reclaiming transmitted mbufs could be delayed a lot which in 2751 * turn slows down Tx operation. 2752 */ 2753 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR); 2754 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt); 2755 2756 /* Set Rx interrupt suppresion threshold. */ 2757 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2758 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt); 2759 2760 intctl = CSR_READ_1(sc, VGE_INTCTL1); 2761 intctl &= ~VGE_INTCTL_SC_RELOAD; 2762 intctl |= VGE_INTCTL_HC_RELOAD; 2763 if (sc->vge_tx_coal_pkt <= 0) 2764 intctl |= VGE_INTCTL_TXINTSUP_DISABLE; 2765 else 2766 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE; 2767 if (sc->vge_rx_coal_pkt <= 0) 2768 intctl |= VGE_INTCTL_RXINTSUP_DISABLE; 2769 else 2770 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE; 2771 CSR_WRITE_1(sc, VGE_INTCTL1, intctl); 2772 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF); 2773 if (sc->vge_int_holdoff > 0) { 2774 /* Set interrupt holdoff timer. */ 2775 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2776 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 2777 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff)); 2778 /* Enable holdoff timer. */ 2779 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2780 } 2781 } 2782 2783 static void 2784 vge_setlinkspeed(struct vge_softc *sc) 2785 { 2786 struct mii_data *mii; 2787 int aneg, i; 2788 2789 VGE_LOCK_ASSERT(sc); 2790 2791 mii = device_get_softc(sc->vge_miibus); 2792 mii_pollstat(mii); 2793 aneg = 0; 2794 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2795 (IFM_ACTIVE | IFM_AVALID)) { 2796 switch IFM_SUBTYPE(mii->mii_media_active) { 2797 case IFM_10_T: 2798 case IFM_100_TX: 2799 return; 2800 case IFM_1000_T: 2801 aneg++; 2802 default: 2803 break; 2804 } 2805 } 2806 /* Clear forced MAC speed/duplex configuration. */ 2807 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2808 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2809 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0); 2810 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR, 2811 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2812 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2813 BMCR_AUTOEN | BMCR_STARTNEG); 2814 DELAY(1000); 2815 if (aneg != 0) { 2816 /* Poll link state until vge(4) get a 10/100 link. */ 2817 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2818 mii_pollstat(mii); 2819 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2820 == (IFM_ACTIVE | IFM_AVALID)) { 2821 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2822 case IFM_10_T: 2823 case IFM_100_TX: 2824 return; 2825 default: 2826 break; 2827 } 2828 } 2829 VGE_UNLOCK(sc); 2830 pause("vgelnk", hz); 2831 VGE_LOCK(sc); 2832 } 2833 if (i == MII_ANEGTICKS_GIGE) 2834 device_printf(sc->vge_dev, "establishing link failed, " 2835 "WOL may not work!"); 2836 } 2837 /* 2838 * No link, force MAC to have 100Mbps, full-duplex link. 2839 * This is the last resort and may/may not work. 2840 */ 2841 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2842 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2843 } 2844 2845 static void 2846 vge_setwol(struct vge_softc *sc) 2847 { 2848 if_t ifp; 2849 uint16_t pmstat; 2850 uint8_t val; 2851 2852 VGE_LOCK_ASSERT(sc); 2853 2854 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) { 2855 /* No PME capability, PHY power down. */ 2856 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2857 BMCR_PDOWN); 2858 vge_miipoll_stop(sc); 2859 return; 2860 } 2861 2862 ifp = sc->vge_ifp; 2863 2864 /* Clear WOL on pattern match. */ 2865 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2866 /* Disable WOL on magic/unicast packet. */ 2867 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2868 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2869 VGE_WOLCFG_PMEOVR); 2870 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) { 2871 vge_setlinkspeed(sc); 2872 val = 0; 2873 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0) 2874 val |= VGE_WOLCR1_UCAST; 2875 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) 2876 val |= VGE_WOLCR1_MAGIC; 2877 CSR_WRITE_1(sc, VGE_WOLCR1S, val); 2878 val = 0; 2879 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0) 2880 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB; 2881 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR); 2882 /* Disable MII auto-polling. */ 2883 vge_miipoll_stop(sc); 2884 } 2885 CSR_SETBIT_1(sc, VGE_DIAGCTL, 2886 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE); 2887 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2888 2889 /* Clear WOL status on pattern match. */ 2890 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2891 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2892 2893 val = CSR_READ_1(sc, VGE_PWRSTAT); 2894 val |= VGE_STICKHW_SWPTAG; 2895 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2896 /* Put hardware into sleep. */ 2897 val = CSR_READ_1(sc, VGE_PWRSTAT); 2898 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1; 2899 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2900 /* Request PME if WOL is requested. */ 2901 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap + 2902 PCIR_POWER_STATUS, 2); 2903 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2904 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) 2905 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2906 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS, 2907 pmstat, 2); 2908 } 2909 2910 static void 2911 vge_clrwol(struct vge_softc *sc) 2912 { 2913 uint8_t val; 2914 2915 val = CSR_READ_1(sc, VGE_PWRSTAT); 2916 val &= ~VGE_STICKHW_SWPTAG; 2917 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2918 /* Disable WOL and clear power state indicator. */ 2919 val = CSR_READ_1(sc, VGE_PWRSTAT); 2920 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); 2921 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2922 2923 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2924 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2925 2926 /* Clear WOL on pattern match. */ 2927 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2928 /* Disable WOL on magic/unicast packet. */ 2929 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2930 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2931 VGE_WOLCFG_PMEOVR); 2932 /* Clear WOL status on pattern match. */ 2933 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2934 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2935 } 2936