1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2004 5 * Bill Paul <wpaul@windriver.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 /* 37 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44 /* 45 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that 46 * combines a tri-speed ethernet MAC and PHY, with the following 47 * features: 48 * 49 * o Jumbo frame support up to 16K 50 * o Transmit and receive flow control 51 * o IPv4 checksum offload 52 * o VLAN tag insertion and stripping 53 * o TCP large send 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 56 * o 16K RX FIFO and 48K TX FIFO memory 57 * o Interrupt moderation 58 * 59 * The VT6122 supports up to four transmit DMA queues. The descriptors 60 * in the transmit ring can address up to 7 data fragments; frames which 61 * span more than 7 data buffers must be coalesced, but in general the 62 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments 63 * long. The receive descriptors address only a single buffer. 64 * 65 * There are two peculiar design issues with the VT6122. One is that 66 * receive data buffers must be aligned on a 32-bit boundary. This is 67 * not a problem where the VT6122 is used as a LOM device in x86-based 68 * systems, but on architectures that generate unaligned access traps, we 69 * have to do some copying. 70 * 71 * The other issue has to do with the way 64-bit addresses are handled. 72 * The DMA descriptors only allow you to specify 48 bits of addressing 73 * information. The remaining 16 bits are specified using one of the 74 * I/O registers. If you only have a 32-bit system, then this isn't 75 * an issue, but if you have a 64-bit system and more than 4GB of 76 * memory, you must have to make sure your network data buffers reside 77 * in the same 48-bit 'segment.' 78 * 79 * Special thanks to Ryan Fu at VIA Networking for providing documentation 80 * and sample NICs for testing. 81 */ 82 83 #ifdef HAVE_KERNEL_OPTION_HEADERS 84 #include "opt_device_polling.h" 85 #endif 86 87 #include <sys/param.h> 88 #include <sys/endian.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/module.h> 94 #include <sys/kernel.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/ethernet.h> 101 #include <net/if_dl.h> 102 #include <net/if_var.h> 103 #include <net/if_media.h> 104 #include <net/if_types.h> 105 #include <net/if_vlan_var.h> 106 107 #include <net/bpf.h> 108 109 #include <machine/bus.h> 110 #include <machine/resource.h> 111 #include <sys/bus.h> 112 #include <sys/rman.h> 113 114 #include <dev/mii/mii.h> 115 #include <dev/mii/miivar.h> 116 117 #include <dev/pci/pcireg.h> 118 #include <dev/pci/pcivar.h> 119 120 MODULE_DEPEND(vge, pci, 1, 1, 1); 121 MODULE_DEPEND(vge, ether, 1, 1, 1); 122 MODULE_DEPEND(vge, miibus, 1, 1, 1); 123 124 /* "device miibus" required. See GENERIC if you get errors here. */ 125 #include "miibus_if.h" 126 127 #include <dev/vge/if_vgereg.h> 128 #include <dev/vge/if_vgevar.h> 129 130 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 131 132 /* Tunables */ 133 static int msi_disable = 0; 134 TUNABLE_INT("hw.vge.msi_disable", &msi_disable); 135 136 /* 137 * The SQE error counter of MIB seems to report bogus value. 138 * Vendor's workaround does not seem to work on PCIe based 139 * controllers. Disable it until we find better workaround. 140 */ 141 #undef VGE_ENABLE_SQEERR 142 143 /* 144 * Various supported device vendors/types and their names. 145 */ 146 static struct vge_type vge_devs[] = { 147 { VIA_VENDORID, VIA_DEVICEID_61XX, 148 "VIA Networking Velocity Gigabit Ethernet" }, 149 { 0, 0, NULL } 150 }; 151 152 static int vge_attach(device_t); 153 static int vge_detach(device_t); 154 static int vge_probe(device_t); 155 static int vge_resume(device_t); 156 static int vge_shutdown(device_t); 157 static int vge_suspend(device_t); 158 159 static void vge_cam_clear(struct vge_softc *); 160 static int vge_cam_set(struct vge_softc *, uint8_t *); 161 static void vge_clrwol(struct vge_softc *); 162 static void vge_discard_rxbuf(struct vge_softc *, int); 163 static int vge_dma_alloc(struct vge_softc *); 164 static void vge_dma_free(struct vge_softc *); 165 static void vge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 166 #ifdef VGE_EEPROM 167 static void vge_eeprom_getword(struct vge_softc *, int, uint16_t *); 168 #endif 169 static int vge_encap(struct vge_softc *, struct mbuf **); 170 #ifndef __NO_STRICT_ALIGNMENT 171 static __inline void 172 vge_fixup_rx(struct mbuf *); 173 #endif 174 static void vge_freebufs(struct vge_softc *); 175 static void vge_ifmedia_sts(if_t, struct ifmediareq *); 176 static int vge_ifmedia_upd(if_t); 177 static int vge_ifmedia_upd_locked(struct vge_softc *); 178 static void vge_init(void *); 179 static void vge_init_locked(struct vge_softc *); 180 static void vge_intr(void *); 181 static void vge_intr_holdoff(struct vge_softc *); 182 static int vge_ioctl(if_t, u_long, caddr_t); 183 static void vge_link_statchg(void *); 184 static int vge_miibus_readreg(device_t, int, int); 185 static int vge_miibus_writereg(device_t, int, int, int); 186 static void vge_miipoll_start(struct vge_softc *); 187 static void vge_miipoll_stop(struct vge_softc *); 188 static int vge_newbuf(struct vge_softc *, int); 189 static void vge_read_eeprom(struct vge_softc *, caddr_t, int, int, int); 190 static void vge_reset(struct vge_softc *); 191 static int vge_rx_list_init(struct vge_softc *); 192 static int vge_rxeof(struct vge_softc *, int); 193 static void vge_rxfilter(struct vge_softc *); 194 static void vge_setmedia(struct vge_softc *); 195 static void vge_setvlan(struct vge_softc *); 196 static void vge_setwol(struct vge_softc *); 197 static void vge_start(if_t); 198 static void vge_start_locked(if_t); 199 static void vge_stats_clear(struct vge_softc *); 200 static void vge_stats_update(struct vge_softc *); 201 static void vge_stop(struct vge_softc *); 202 static void vge_sysctl_node(struct vge_softc *); 203 static int vge_tx_list_init(struct vge_softc *); 204 static void vge_txeof(struct vge_softc *); 205 static void vge_watchdog(void *); 206 207 static device_method_t vge_methods[] = { 208 /* Device interface */ 209 DEVMETHOD(device_probe, vge_probe), 210 DEVMETHOD(device_attach, vge_attach), 211 DEVMETHOD(device_detach, vge_detach), 212 DEVMETHOD(device_suspend, vge_suspend), 213 DEVMETHOD(device_resume, vge_resume), 214 DEVMETHOD(device_shutdown, vge_shutdown), 215 216 /* MII interface */ 217 DEVMETHOD(miibus_readreg, vge_miibus_readreg), 218 DEVMETHOD(miibus_writereg, vge_miibus_writereg), 219 220 DEVMETHOD_END 221 }; 222 223 static driver_t vge_driver = { 224 "vge", 225 vge_methods, 226 sizeof(struct vge_softc) 227 }; 228 229 DRIVER_MODULE(vge, pci, vge_driver, 0, 0); 230 DRIVER_MODULE(miibus, vge, miibus_driver, 0, 0); 231 232 #ifdef VGE_EEPROM 233 /* 234 * Read a word of data stored in the EEPROM at address 'addr.' 235 */ 236 static void 237 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t *dest) 238 { 239 int i; 240 uint16_t word = 0; 241 242 /* 243 * Enter EEPROM embedded programming mode. In order to 244 * access the EEPROM at all, we first have to set the 245 * EELOAD bit in the CHIPCFG2 register. 246 */ 247 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 248 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 249 250 /* Select the address of the word we want to read */ 251 CSR_WRITE_1(sc, VGE_EEADDR, addr); 252 253 /* Issue read command */ 254 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); 255 256 /* Wait for the done bit to be set. */ 257 for (i = 0; i < VGE_TIMEOUT; i++) { 258 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) 259 break; 260 } 261 262 if (i == VGE_TIMEOUT) { 263 device_printf(sc->vge_dev, "EEPROM read timed out\n"); 264 *dest = 0; 265 return; 266 } 267 268 /* Read the result */ 269 word = CSR_READ_2(sc, VGE_EERDDAT); 270 271 /* Turn off EEPROM access mode. */ 272 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); 273 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); 274 275 *dest = word; 276 } 277 #endif 278 279 /* 280 * Read a sequence of words from the EEPROM. 281 */ 282 static void 283 vge_read_eeprom(struct vge_softc *sc, caddr_t dest, int off, int cnt, int swap) 284 { 285 int i; 286 #ifdef VGE_EEPROM 287 uint16_t word = 0, *ptr; 288 289 for (i = 0; i < cnt; i++) { 290 vge_eeprom_getword(sc, off + i, &word); 291 ptr = (uint16_t *)(dest + (i * 2)); 292 if (swap) 293 *ptr = ntohs(word); 294 else 295 *ptr = word; 296 } 297 #else 298 for (i = 0; i < ETHER_ADDR_LEN; i++) 299 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); 300 #endif 301 } 302 303 static void 304 vge_miipoll_stop(struct vge_softc *sc) 305 { 306 int i; 307 308 CSR_WRITE_1(sc, VGE_MIICMD, 0); 309 310 for (i = 0; i < VGE_TIMEOUT; i++) { 311 DELAY(1); 312 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 313 break; 314 } 315 316 if (i == VGE_TIMEOUT) 317 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 318 } 319 320 static void 321 vge_miipoll_start(struct vge_softc *sc) 322 { 323 int i; 324 325 /* First, make sure we're idle. */ 326 327 CSR_WRITE_1(sc, VGE_MIICMD, 0); 328 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); 329 330 for (i = 0; i < VGE_TIMEOUT; i++) { 331 DELAY(1); 332 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) 333 break; 334 } 335 336 if (i == VGE_TIMEOUT) { 337 device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); 338 return; 339 } 340 341 /* Now enable auto poll mode. */ 342 343 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); 344 345 /* And make sure it started. */ 346 347 for (i = 0; i < VGE_TIMEOUT; i++) { 348 DELAY(1); 349 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) 350 break; 351 } 352 353 if (i == VGE_TIMEOUT) 354 device_printf(sc->vge_dev, "failed to start MII autopoll\n"); 355 } 356 357 static int 358 vge_miibus_readreg(device_t dev, int phy, int reg) 359 { 360 struct vge_softc *sc; 361 int i; 362 uint16_t rval = 0; 363 364 sc = device_get_softc(dev); 365 366 vge_miipoll_stop(sc); 367 368 /* Specify the register we want to read. */ 369 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 370 371 /* Issue read command. */ 372 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); 373 374 /* Wait for the read command bit to self-clear. */ 375 for (i = 0; i < VGE_TIMEOUT; i++) { 376 DELAY(1); 377 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) 378 break; 379 } 380 381 if (i == VGE_TIMEOUT) 382 device_printf(sc->vge_dev, "MII read timed out\n"); 383 else 384 rval = CSR_READ_2(sc, VGE_MIIDATA); 385 386 vge_miipoll_start(sc); 387 388 return (rval); 389 } 390 391 static int 392 vge_miibus_writereg(device_t dev, int phy, int reg, int data) 393 { 394 struct vge_softc *sc; 395 int i, rval = 0; 396 397 sc = device_get_softc(dev); 398 399 vge_miipoll_stop(sc); 400 401 /* Specify the register we want to write. */ 402 CSR_WRITE_1(sc, VGE_MIIADDR, reg); 403 404 /* Specify the data we want to write. */ 405 CSR_WRITE_2(sc, VGE_MIIDATA, data); 406 407 /* Issue write command. */ 408 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); 409 410 /* Wait for the write command bit to self-clear. */ 411 for (i = 0; i < VGE_TIMEOUT; i++) { 412 DELAY(1); 413 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) 414 break; 415 } 416 417 if (i == VGE_TIMEOUT) { 418 device_printf(sc->vge_dev, "MII write timed out\n"); 419 rval = EIO; 420 } 421 422 vge_miipoll_start(sc); 423 424 return (rval); 425 } 426 427 static void 428 vge_cam_clear(struct vge_softc *sc) 429 { 430 int i; 431 432 /* 433 * Turn off all the mask bits. This tells the chip 434 * that none of the entries in the CAM filter are valid. 435 * desired entries will be enabled as we fill the filter in. 436 */ 437 438 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 439 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 440 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); 441 for (i = 0; i < 8; i++) 442 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 443 444 /* Clear the VLAN filter too. */ 445 446 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); 447 for (i = 0; i < 8; i++) 448 CSR_WRITE_1(sc, VGE_CAM0 + i, 0); 449 450 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 451 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 452 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 453 454 sc->vge_camidx = 0; 455 } 456 457 static int 458 vge_cam_set(struct vge_softc *sc, uint8_t *addr) 459 { 460 int i, error = 0; 461 462 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 463 return (ENOSPC); 464 465 /* Select the CAM data page. */ 466 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 467 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); 468 469 /* Set the filter entry we want to update and enable writing. */ 470 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); 471 472 /* Write the address to the CAM registers */ 473 for (i = 0; i < ETHER_ADDR_LEN; i++) 474 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); 475 476 /* Issue a write command. */ 477 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); 478 479 /* Wake for it to clear. */ 480 for (i = 0; i < VGE_TIMEOUT; i++) { 481 DELAY(1); 482 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) 483 break; 484 } 485 486 if (i == VGE_TIMEOUT) { 487 device_printf(sc->vge_dev, "setting CAM filter failed\n"); 488 error = EIO; 489 goto fail; 490 } 491 492 /* Select the CAM mask page. */ 493 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 494 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); 495 496 /* Set the mask bit that enables this filter. */ 497 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 498 1<<(sc->vge_camidx & 7)); 499 500 sc->vge_camidx++; 501 502 fail: 503 /* Turn off access to CAM. */ 504 CSR_WRITE_1(sc, VGE_CAMADDR, 0); 505 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); 506 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); 507 508 return (error); 509 } 510 511 static void 512 vge_setvlan(struct vge_softc *sc) 513 { 514 if_t ifp; 515 uint8_t cfg; 516 517 VGE_LOCK_ASSERT(sc); 518 519 ifp = sc->vge_ifp; 520 cfg = CSR_READ_1(sc, VGE_RXCFG); 521 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 522 cfg |= VGE_VTAG_OPT2; 523 else 524 cfg &= ~VGE_VTAG_OPT2; 525 CSR_WRITE_1(sc, VGE_RXCFG, cfg); 526 } 527 528 static u_int 529 vge_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 530 { 531 struct vge_softc *sc = arg; 532 533 if (sc->vge_camidx == VGE_CAM_MAXADDRS) 534 return (0); 535 536 (void )vge_cam_set(sc, LLADDR(sdl)); 537 538 return (1); 539 } 540 541 static u_int 542 vge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 543 { 544 uint32_t h, *hashes = arg; 545 546 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 547 if (h < 32) 548 hashes[0] |= (1 << h); 549 else 550 hashes[1] |= (1 << (h - 32)); 551 552 return (1); 553 } 554 555 /* 556 * Program the multicast filter. We use the 64-entry CAM filter 557 * for perfect filtering. If there's more than 64 multicast addresses, 558 * we use the hash filter instead. 559 */ 560 static void 561 vge_rxfilter(struct vge_softc *sc) 562 { 563 if_t ifp; 564 uint32_t hashes[2]; 565 uint8_t rxcfg; 566 567 VGE_LOCK_ASSERT(sc); 568 569 /* First, zot all the multicast entries. */ 570 hashes[0] = 0; 571 hashes[1] = 0; 572 573 rxcfg = CSR_READ_1(sc, VGE_RXCTL); 574 rxcfg &= ~(VGE_RXCTL_RX_MCAST | VGE_RXCTL_RX_BCAST | 575 VGE_RXCTL_RX_PROMISC); 576 /* 577 * Always allow VLAN oversized frames and frames for 578 * this host. 579 */ 580 rxcfg |= VGE_RXCTL_RX_GIANT | VGE_RXCTL_RX_UCAST; 581 582 ifp = sc->vge_ifp; 583 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 584 rxcfg |= VGE_RXCTL_RX_BCAST; 585 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 586 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 587 rxcfg |= VGE_RXCTL_RX_PROMISC; 588 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { 589 hashes[0] = 0xFFFFFFFF; 590 hashes[1] = 0xFFFFFFFF; 591 } 592 goto done; 593 } 594 595 vge_cam_clear(sc); 596 597 /* Now program new ones */ 598 if_foreach_llmaddr(ifp, vge_set_maddr, sc); 599 600 /* If there were too many addresses, use the hash filter. */ 601 if (sc->vge_camidx == VGE_CAM_MAXADDRS) { 602 vge_cam_clear(sc); 603 if_foreach_llmaddr(ifp, vge_hash_maddr, hashes); 604 } 605 606 done: 607 if (hashes[0] != 0 || hashes[1] != 0) 608 rxcfg |= VGE_RXCTL_RX_MCAST; 609 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); 610 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); 611 CSR_WRITE_1(sc, VGE_RXCTL, rxcfg); 612 } 613 614 static void 615 vge_reset(struct vge_softc *sc) 616 { 617 int i; 618 619 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); 620 621 for (i = 0; i < VGE_TIMEOUT; i++) { 622 DELAY(5); 623 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) 624 break; 625 } 626 627 if (i == VGE_TIMEOUT) { 628 device_printf(sc->vge_dev, "soft reset timed out\n"); 629 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); 630 DELAY(2000); 631 } 632 633 DELAY(5000); 634 } 635 636 /* 637 * Probe for a VIA gigabit chip. Check the PCI vendor and device 638 * IDs against our list and return a device name if we find a match. 639 */ 640 static int 641 vge_probe(device_t dev) 642 { 643 struct vge_type *t; 644 645 t = vge_devs; 646 647 while (t->vge_name != NULL) { 648 if ((pci_get_vendor(dev) == t->vge_vid) && 649 (pci_get_device(dev) == t->vge_did)) { 650 device_set_desc(dev, t->vge_name); 651 return (BUS_PROBE_DEFAULT); 652 } 653 t++; 654 } 655 656 return (ENXIO); 657 } 658 659 /* 660 * Map a single buffer address. 661 */ 662 663 struct vge_dmamap_arg { 664 bus_addr_t vge_busaddr; 665 }; 666 667 static void 668 vge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 669 { 670 struct vge_dmamap_arg *ctx; 671 672 if (error != 0) 673 return; 674 675 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 676 677 ctx = (struct vge_dmamap_arg *)arg; 678 ctx->vge_busaddr = segs[0].ds_addr; 679 } 680 681 static int 682 vge_dma_alloc(struct vge_softc *sc) 683 { 684 struct vge_dmamap_arg ctx; 685 struct vge_txdesc *txd; 686 struct vge_rxdesc *rxd; 687 bus_addr_t lowaddr, tx_ring_end, rx_ring_end; 688 int error, i; 689 690 /* 691 * It seems old PCI controllers do not support DAC. DAC 692 * configuration can be enabled by accessing VGE_CHIPCFG3 693 * register but honor EEPROM configuration instead of 694 * blindly overriding DAC configuration. PCIe based 695 * controllers are supposed to support 64bit DMA so enable 696 * 64bit DMA on these controllers. 697 */ 698 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 699 lowaddr = BUS_SPACE_MAXADDR; 700 else 701 lowaddr = BUS_SPACE_MAXADDR_32BIT; 702 703 again: 704 /* Create parent ring tag. */ 705 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 706 1, 0, /* algnmnt, boundary */ 707 lowaddr, /* lowaddr */ 708 BUS_SPACE_MAXADDR, /* highaddr */ 709 NULL, NULL, /* filter, filterarg */ 710 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 711 0, /* nsegments */ 712 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 713 0, /* flags */ 714 NULL, NULL, /* lockfunc, lockarg */ 715 &sc->vge_cdata.vge_ring_tag); 716 if (error != 0) { 717 device_printf(sc->vge_dev, 718 "could not create parent DMA tag.\n"); 719 goto fail; 720 } 721 722 /* Create tag for Tx ring. */ 723 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 724 VGE_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 725 BUS_SPACE_MAXADDR, /* lowaddr */ 726 BUS_SPACE_MAXADDR, /* highaddr */ 727 NULL, NULL, /* filter, filterarg */ 728 VGE_TX_LIST_SZ, /* maxsize */ 729 1, /* nsegments */ 730 VGE_TX_LIST_SZ, /* maxsegsize */ 731 0, /* flags */ 732 NULL, NULL, /* lockfunc, lockarg */ 733 &sc->vge_cdata.vge_tx_ring_tag); 734 if (error != 0) { 735 device_printf(sc->vge_dev, 736 "could not allocate Tx ring DMA tag.\n"); 737 goto fail; 738 } 739 740 /* Create tag for Rx ring. */ 741 error = bus_dma_tag_create(sc->vge_cdata.vge_ring_tag,/* parent */ 742 VGE_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 743 BUS_SPACE_MAXADDR, /* lowaddr */ 744 BUS_SPACE_MAXADDR, /* highaddr */ 745 NULL, NULL, /* filter, filterarg */ 746 VGE_RX_LIST_SZ, /* maxsize */ 747 1, /* nsegments */ 748 VGE_RX_LIST_SZ, /* maxsegsize */ 749 0, /* flags */ 750 NULL, NULL, /* lockfunc, lockarg */ 751 &sc->vge_cdata.vge_rx_ring_tag); 752 if (error != 0) { 753 device_printf(sc->vge_dev, 754 "could not allocate Rx ring DMA tag.\n"); 755 goto fail; 756 } 757 758 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 759 error = bus_dmamem_alloc(sc->vge_cdata.vge_tx_ring_tag, 760 (void **)&sc->vge_rdata.vge_tx_ring, 761 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 762 &sc->vge_cdata.vge_tx_ring_map); 763 if (error != 0) { 764 device_printf(sc->vge_dev, 765 "could not allocate DMA'able memory for Tx ring.\n"); 766 goto fail; 767 } 768 769 ctx.vge_busaddr = 0; 770 error = bus_dmamap_load(sc->vge_cdata.vge_tx_ring_tag, 771 sc->vge_cdata.vge_tx_ring_map, sc->vge_rdata.vge_tx_ring, 772 VGE_TX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 773 if (error != 0 || ctx.vge_busaddr == 0) { 774 device_printf(sc->vge_dev, 775 "could not load DMA'able memory for Tx ring.\n"); 776 goto fail; 777 } 778 sc->vge_rdata.vge_tx_ring_paddr = ctx.vge_busaddr; 779 780 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 781 error = bus_dmamem_alloc(sc->vge_cdata.vge_rx_ring_tag, 782 (void **)&sc->vge_rdata.vge_rx_ring, 783 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 784 &sc->vge_cdata.vge_rx_ring_map); 785 if (error != 0) { 786 device_printf(sc->vge_dev, 787 "could not allocate DMA'able memory for Rx ring.\n"); 788 goto fail; 789 } 790 791 ctx.vge_busaddr = 0; 792 error = bus_dmamap_load(sc->vge_cdata.vge_rx_ring_tag, 793 sc->vge_cdata.vge_rx_ring_map, sc->vge_rdata.vge_rx_ring, 794 VGE_RX_LIST_SZ, vge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 795 if (error != 0 || ctx.vge_busaddr == 0) { 796 device_printf(sc->vge_dev, 797 "could not load DMA'able memory for Rx ring.\n"); 798 goto fail; 799 } 800 sc->vge_rdata.vge_rx_ring_paddr = ctx.vge_busaddr; 801 802 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 803 tx_ring_end = sc->vge_rdata.vge_tx_ring_paddr + VGE_TX_LIST_SZ; 804 rx_ring_end = sc->vge_rdata.vge_rx_ring_paddr + VGE_RX_LIST_SZ; 805 if ((VGE_ADDR_HI(tx_ring_end) != 806 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)) || 807 (VGE_ADDR_HI(rx_ring_end) != 808 VGE_ADDR_HI(sc->vge_rdata.vge_rx_ring_paddr)) || 809 VGE_ADDR_HI(tx_ring_end) != VGE_ADDR_HI(rx_ring_end)) { 810 device_printf(sc->vge_dev, "4GB boundary crossed, " 811 "switching to 32bit DMA address mode.\n"); 812 vge_dma_free(sc); 813 /* Limit DMA address space to 32bit and try again. */ 814 lowaddr = BUS_SPACE_MAXADDR_32BIT; 815 goto again; 816 } 817 818 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 819 lowaddr = VGE_BUF_DMA_MAXADDR; 820 else 821 lowaddr = BUS_SPACE_MAXADDR_32BIT; 822 /* Create parent buffer tag. */ 823 error = bus_dma_tag_create(bus_get_dma_tag(sc->vge_dev),/* parent */ 824 1, 0, /* algnmnt, boundary */ 825 lowaddr, /* lowaddr */ 826 BUS_SPACE_MAXADDR, /* highaddr */ 827 NULL, NULL, /* filter, filterarg */ 828 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 829 0, /* nsegments */ 830 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 831 0, /* flags */ 832 NULL, NULL, /* lockfunc, lockarg */ 833 &sc->vge_cdata.vge_buffer_tag); 834 if (error != 0) { 835 device_printf(sc->vge_dev, 836 "could not create parent buffer DMA tag.\n"); 837 goto fail; 838 } 839 840 /* Create tag for Tx buffers. */ 841 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 842 1, 0, /* algnmnt, boundary */ 843 BUS_SPACE_MAXADDR, /* lowaddr */ 844 BUS_SPACE_MAXADDR, /* highaddr */ 845 NULL, NULL, /* filter, filterarg */ 846 MCLBYTES * VGE_MAXTXSEGS, /* maxsize */ 847 VGE_MAXTXSEGS, /* nsegments */ 848 MCLBYTES, /* maxsegsize */ 849 0, /* flags */ 850 NULL, NULL, /* lockfunc, lockarg */ 851 &sc->vge_cdata.vge_tx_tag); 852 if (error != 0) { 853 device_printf(sc->vge_dev, "could not create Tx DMA tag.\n"); 854 goto fail; 855 } 856 857 /* Create tag for Rx buffers. */ 858 error = bus_dma_tag_create(sc->vge_cdata.vge_buffer_tag,/* parent */ 859 VGE_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 860 BUS_SPACE_MAXADDR, /* lowaddr */ 861 BUS_SPACE_MAXADDR, /* highaddr */ 862 NULL, NULL, /* filter, filterarg */ 863 MCLBYTES, /* maxsize */ 864 1, /* nsegments */ 865 MCLBYTES, /* maxsegsize */ 866 0, /* flags */ 867 NULL, NULL, /* lockfunc, lockarg */ 868 &sc->vge_cdata.vge_rx_tag); 869 if (error != 0) { 870 device_printf(sc->vge_dev, "could not create Rx DMA tag.\n"); 871 goto fail; 872 } 873 874 /* Create DMA maps for Tx buffers. */ 875 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 876 txd = &sc->vge_cdata.vge_txdesc[i]; 877 txd->tx_m = NULL; 878 txd->tx_dmamap = NULL; 879 error = bus_dmamap_create(sc->vge_cdata.vge_tx_tag, 0, 880 &txd->tx_dmamap); 881 if (error != 0) { 882 device_printf(sc->vge_dev, 883 "could not create Tx dmamap.\n"); 884 goto fail; 885 } 886 } 887 /* Create DMA maps for Rx buffers. */ 888 if ((error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 889 &sc->vge_cdata.vge_rx_sparemap)) != 0) { 890 device_printf(sc->vge_dev, 891 "could not create spare Rx dmamap.\n"); 892 goto fail; 893 } 894 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 895 rxd = &sc->vge_cdata.vge_rxdesc[i]; 896 rxd->rx_m = NULL; 897 rxd->rx_dmamap = NULL; 898 error = bus_dmamap_create(sc->vge_cdata.vge_rx_tag, 0, 899 &rxd->rx_dmamap); 900 if (error != 0) { 901 device_printf(sc->vge_dev, 902 "could not create Rx dmamap.\n"); 903 goto fail; 904 } 905 } 906 907 fail: 908 return (error); 909 } 910 911 static void 912 vge_dma_free(struct vge_softc *sc) 913 { 914 struct vge_txdesc *txd; 915 struct vge_rxdesc *rxd; 916 int i; 917 918 /* Tx ring. */ 919 if (sc->vge_cdata.vge_tx_ring_tag != NULL) { 920 if (sc->vge_rdata.vge_tx_ring_paddr) 921 bus_dmamap_unload(sc->vge_cdata.vge_tx_ring_tag, 922 sc->vge_cdata.vge_tx_ring_map); 923 if (sc->vge_rdata.vge_tx_ring) 924 bus_dmamem_free(sc->vge_cdata.vge_tx_ring_tag, 925 sc->vge_rdata.vge_tx_ring, 926 sc->vge_cdata.vge_tx_ring_map); 927 sc->vge_rdata.vge_tx_ring = NULL; 928 sc->vge_rdata.vge_tx_ring_paddr = 0; 929 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_ring_tag); 930 sc->vge_cdata.vge_tx_ring_tag = NULL; 931 } 932 /* Rx ring. */ 933 if (sc->vge_cdata.vge_rx_ring_tag != NULL) { 934 if (sc->vge_rdata.vge_rx_ring_paddr) 935 bus_dmamap_unload(sc->vge_cdata.vge_rx_ring_tag, 936 sc->vge_cdata.vge_rx_ring_map); 937 if (sc->vge_rdata.vge_rx_ring) 938 bus_dmamem_free(sc->vge_cdata.vge_rx_ring_tag, 939 sc->vge_rdata.vge_rx_ring, 940 sc->vge_cdata.vge_rx_ring_map); 941 sc->vge_rdata.vge_rx_ring = NULL; 942 sc->vge_rdata.vge_rx_ring_paddr = 0; 943 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_ring_tag); 944 sc->vge_cdata.vge_rx_ring_tag = NULL; 945 } 946 /* Tx buffers. */ 947 if (sc->vge_cdata.vge_tx_tag != NULL) { 948 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 949 txd = &sc->vge_cdata.vge_txdesc[i]; 950 if (txd->tx_dmamap != NULL) { 951 bus_dmamap_destroy(sc->vge_cdata.vge_tx_tag, 952 txd->tx_dmamap); 953 txd->tx_dmamap = NULL; 954 } 955 } 956 bus_dma_tag_destroy(sc->vge_cdata.vge_tx_tag); 957 sc->vge_cdata.vge_tx_tag = NULL; 958 } 959 /* Rx buffers. */ 960 if (sc->vge_cdata.vge_rx_tag != NULL) { 961 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 962 rxd = &sc->vge_cdata.vge_rxdesc[i]; 963 if (rxd->rx_dmamap != NULL) { 964 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 965 rxd->rx_dmamap); 966 rxd->rx_dmamap = NULL; 967 } 968 } 969 if (sc->vge_cdata.vge_rx_sparemap != NULL) { 970 bus_dmamap_destroy(sc->vge_cdata.vge_rx_tag, 971 sc->vge_cdata.vge_rx_sparemap); 972 sc->vge_cdata.vge_rx_sparemap = NULL; 973 } 974 bus_dma_tag_destroy(sc->vge_cdata.vge_rx_tag); 975 sc->vge_cdata.vge_rx_tag = NULL; 976 } 977 978 if (sc->vge_cdata.vge_buffer_tag != NULL) { 979 bus_dma_tag_destroy(sc->vge_cdata.vge_buffer_tag); 980 sc->vge_cdata.vge_buffer_tag = NULL; 981 } 982 if (sc->vge_cdata.vge_ring_tag != NULL) { 983 bus_dma_tag_destroy(sc->vge_cdata.vge_ring_tag); 984 sc->vge_cdata.vge_ring_tag = NULL; 985 } 986 } 987 988 /* 989 * Attach the interface. Allocate softc structures, do ifmedia 990 * setup and ethernet/BPF attach. 991 */ 992 static int 993 vge_attach(device_t dev) 994 { 995 u_char eaddr[ETHER_ADDR_LEN]; 996 struct vge_softc *sc; 997 if_t ifp; 998 int error = 0, cap, i, msic, rid; 999 1000 sc = device_get_softc(dev); 1001 sc->vge_dev = dev; 1002 1003 mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1004 MTX_DEF); 1005 callout_init_mtx(&sc->vge_watchdog, &sc->vge_mtx, 0); 1006 1007 /* 1008 * Map control/status registers. 1009 */ 1010 pci_enable_busmaster(dev); 1011 1012 rid = PCIR_BAR(1); 1013 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1014 RF_ACTIVE); 1015 1016 if (sc->vge_res == NULL) { 1017 device_printf(dev, "couldn't map ports/memory\n"); 1018 error = ENXIO; 1019 goto fail; 1020 } 1021 1022 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 1023 sc->vge_flags |= VGE_FLAG_PCIE; 1024 sc->vge_expcap = cap; 1025 } else 1026 sc->vge_flags |= VGE_FLAG_JUMBO; 1027 if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) { 1028 sc->vge_flags |= VGE_FLAG_PMCAP; 1029 sc->vge_pmcap = cap; 1030 } 1031 rid = 0; 1032 msic = pci_msi_count(dev); 1033 if (msi_disable == 0 && msic > 0) { 1034 msic = 1; 1035 if (pci_alloc_msi(dev, &msic) == 0) { 1036 if (msic == 1) { 1037 sc->vge_flags |= VGE_FLAG_MSI; 1038 device_printf(dev, "Using %d MSI message\n", 1039 msic); 1040 rid = 1; 1041 } else 1042 pci_release_msi(dev); 1043 } 1044 } 1045 1046 /* Allocate interrupt */ 1047 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1048 ((sc->vge_flags & VGE_FLAG_MSI) ? 0 : RF_SHAREABLE) | RF_ACTIVE); 1049 if (sc->vge_irq == NULL) { 1050 device_printf(dev, "couldn't map interrupt\n"); 1051 error = ENXIO; 1052 goto fail; 1053 } 1054 1055 /* Reset the adapter. */ 1056 vge_reset(sc); 1057 /* Reload EEPROM. */ 1058 CSR_WRITE_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); 1059 for (i = 0; i < VGE_TIMEOUT; i++) { 1060 DELAY(5); 1061 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) 1062 break; 1063 } 1064 if (i == VGE_TIMEOUT) 1065 device_printf(dev, "EEPROM reload timed out\n"); 1066 /* 1067 * Clear PACPI as EEPROM reload will set the bit. Otherwise 1068 * MAC will receive magic packet which in turn confuses 1069 * controller. 1070 */ 1071 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); 1072 1073 /* 1074 * Get station address from the EEPROM. 1075 */ 1076 vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); 1077 /* 1078 * Save configured PHY address. 1079 * It seems the PHY address of PCIe controllers just 1080 * reflects media jump strapping status so we assume the 1081 * internal PHY address of PCIe controller is at 1. 1082 */ 1083 if ((sc->vge_flags & VGE_FLAG_PCIE) != 0) 1084 sc->vge_phyaddr = 1; 1085 else 1086 sc->vge_phyaddr = CSR_READ_1(sc, VGE_MIICFG) & 1087 VGE_MIICFG_PHYADDR; 1088 /* Clear WOL and take hardware from powerdown. */ 1089 vge_clrwol(sc); 1090 vge_sysctl_node(sc); 1091 error = vge_dma_alloc(sc); 1092 if (error) 1093 goto fail; 1094 1095 ifp = sc->vge_ifp = if_alloc(IFT_ETHER); 1096 vge_miipoll_start(sc); 1097 /* Do MII setup */ 1098 error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd, 1099 vge_ifmedia_sts, BMSR_DEFCAPMASK, sc->vge_phyaddr, MII_OFFSET_ANY, 1100 MIIF_DOPAUSE); 1101 if (error != 0) { 1102 device_printf(dev, "attaching PHYs failed\n"); 1103 goto fail; 1104 } 1105 1106 if_setsoftc(ifp, sc); 1107 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1108 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1109 if_setioctlfn(ifp, vge_ioctl); 1110 if_setcapabilities(ifp, IFCAP_VLAN_MTU); 1111 if_setstartfn(ifp, vge_start); 1112 if_sethwassist(ifp, VGE_CSUM_FEATURES); 1113 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | 1114 IFCAP_VLAN_HWTAGGING, 0); 1115 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) 1116 if_setcapabilitiesbit(ifp, IFCAP_WOL, 0); 1117 if_setcapenable(ifp, if_getcapabilities(ifp)); 1118 #ifdef DEVICE_POLLING 1119 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 1120 #endif 1121 if_setinitfn(ifp, vge_init); 1122 if_setsendqlen(ifp, VGE_TX_DESC_CNT - 1); 1123 if_setsendqready(ifp); 1124 1125 /* 1126 * Call MI attach routine. 1127 */ 1128 ether_ifattach(ifp, eaddr); 1129 1130 /* Tell the upper layer(s) we support long frames. */ 1131 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 1132 1133 /* Hook interrupt last to avoid having to lock softc */ 1134 error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, 1135 NULL, vge_intr, sc, &sc->vge_intrhand); 1136 1137 if (error) { 1138 device_printf(dev, "couldn't set up irq\n"); 1139 ether_ifdetach(ifp); 1140 goto fail; 1141 } 1142 1143 fail: 1144 if (error) 1145 vge_detach(dev); 1146 1147 return (error); 1148 } 1149 1150 /* 1151 * Shutdown hardware and free up resources. This can be called any 1152 * time after the mutex has been initialized. It is called in both 1153 * the error case in attach and the normal detach case so it needs 1154 * to be careful about only freeing resources that have actually been 1155 * allocated. 1156 */ 1157 static int 1158 vge_detach(device_t dev) 1159 { 1160 struct vge_softc *sc; 1161 if_t ifp; 1162 1163 sc = device_get_softc(dev); 1164 KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); 1165 ifp = sc->vge_ifp; 1166 1167 #ifdef DEVICE_POLLING 1168 if (if_getcapenable(ifp) & IFCAP_POLLING) 1169 ether_poll_deregister(ifp); 1170 #endif 1171 1172 /* These should only be active if attach succeeded */ 1173 if (device_is_attached(dev)) { 1174 ether_ifdetach(ifp); 1175 VGE_LOCK(sc); 1176 vge_stop(sc); 1177 VGE_UNLOCK(sc); 1178 callout_drain(&sc->vge_watchdog); 1179 } 1180 if (sc->vge_miibus) 1181 device_delete_child(dev, sc->vge_miibus); 1182 bus_generic_detach(dev); 1183 1184 if (sc->vge_intrhand) 1185 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); 1186 if (sc->vge_irq) 1187 bus_release_resource(dev, SYS_RES_IRQ, 1188 sc->vge_flags & VGE_FLAG_MSI ? 1 : 0, sc->vge_irq); 1189 if (sc->vge_flags & VGE_FLAG_MSI) 1190 pci_release_msi(dev); 1191 if (sc->vge_res) 1192 bus_release_resource(dev, SYS_RES_MEMORY, 1193 PCIR_BAR(1), sc->vge_res); 1194 if (ifp) 1195 if_free(ifp); 1196 1197 vge_dma_free(sc); 1198 mtx_destroy(&sc->vge_mtx); 1199 1200 return (0); 1201 } 1202 1203 static void 1204 vge_discard_rxbuf(struct vge_softc *sc, int prod) 1205 { 1206 struct vge_rxdesc *rxd; 1207 int i; 1208 1209 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1210 rxd->rx_desc->vge_sts = 0; 1211 rxd->rx_desc->vge_ctl = 0; 1212 1213 /* 1214 * Note: the manual fails to document the fact that for 1215 * proper operation, the driver needs to replentish the RX 1216 * DMA ring 4 descriptors at a time (rather than one at a 1217 * time, like most chips). We can allocate the new buffers 1218 * but we should not set the OWN bits until we're ready 1219 * to hand back 4 of them in one shot. 1220 */ 1221 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1222 for (i = VGE_RXCHUNK; i > 0; i--) { 1223 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1224 rxd = rxd->rxd_prev; 1225 } 1226 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1227 } 1228 } 1229 1230 static int 1231 vge_newbuf(struct vge_softc *sc, int prod) 1232 { 1233 struct vge_rxdesc *rxd; 1234 struct mbuf *m; 1235 bus_dma_segment_t segs[1]; 1236 bus_dmamap_t map; 1237 int i, nsegs; 1238 1239 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1240 if (m == NULL) 1241 return (ENOBUFS); 1242 /* 1243 * This is part of an evil trick to deal with strict-alignment 1244 * architectures. The VIA chip requires RX buffers to be aligned 1245 * on 32-bit boundaries, but that will hose strict-alignment 1246 * architectures. To get around this, we leave some empty space 1247 * at the start of each buffer and for non-strict-alignment hosts, 1248 * we copy the buffer back two bytes to achieve word alignment. 1249 * This is slightly more efficient than allocating a new buffer, 1250 * copying the contents, and discarding the old buffer. 1251 */ 1252 m->m_len = m->m_pkthdr.len = MCLBYTES; 1253 m_adj(m, VGE_RX_BUF_ALIGN); 1254 1255 if (bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_rx_tag, 1256 sc->vge_cdata.vge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1257 m_freem(m); 1258 return (ENOBUFS); 1259 } 1260 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1261 1262 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1263 if (rxd->rx_m != NULL) { 1264 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1265 BUS_DMASYNC_POSTREAD); 1266 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap); 1267 } 1268 map = rxd->rx_dmamap; 1269 rxd->rx_dmamap = sc->vge_cdata.vge_rx_sparemap; 1270 sc->vge_cdata.vge_rx_sparemap = map; 1271 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, rxd->rx_dmamap, 1272 BUS_DMASYNC_PREREAD); 1273 rxd->rx_m = m; 1274 1275 rxd->rx_desc->vge_sts = 0; 1276 rxd->rx_desc->vge_ctl = 0; 1277 rxd->rx_desc->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); 1278 rxd->rx_desc->vge_addrhi = htole32(VGE_ADDR_HI(segs[0].ds_addr) | 1279 (VGE_BUFLEN(segs[0].ds_len) << 16) | VGE_RXDESC_I); 1280 1281 /* 1282 * Note: the manual fails to document the fact that for 1283 * proper operation, the driver needs to replenish the RX 1284 * DMA ring 4 descriptors at a time (rather than one at a 1285 * time, like most chips). We can allocate the new buffers 1286 * but we should not set the OWN bits until we're ready 1287 * to hand back 4 of them in one shot. 1288 */ 1289 if ((prod % VGE_RXCHUNK) == (VGE_RXCHUNK - 1)) { 1290 for (i = VGE_RXCHUNK; i > 0; i--) { 1291 rxd->rx_desc->vge_sts = htole32(VGE_RDSTS_OWN); 1292 rxd = rxd->rxd_prev; 1293 } 1294 sc->vge_cdata.vge_rx_commit += VGE_RXCHUNK; 1295 } 1296 1297 return (0); 1298 } 1299 1300 static int 1301 vge_tx_list_init(struct vge_softc *sc) 1302 { 1303 struct vge_ring_data *rd; 1304 struct vge_txdesc *txd; 1305 int i; 1306 1307 VGE_LOCK_ASSERT(sc); 1308 1309 sc->vge_cdata.vge_tx_prodidx = 0; 1310 sc->vge_cdata.vge_tx_considx = 0; 1311 sc->vge_cdata.vge_tx_cnt = 0; 1312 1313 rd = &sc->vge_rdata; 1314 bzero(rd->vge_tx_ring, VGE_TX_LIST_SZ); 1315 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1316 txd = &sc->vge_cdata.vge_txdesc[i]; 1317 txd->tx_m = NULL; 1318 txd->tx_desc = &rd->vge_tx_ring[i]; 1319 } 1320 1321 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1322 sc->vge_cdata.vge_tx_ring_map, 1323 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1324 1325 return (0); 1326 } 1327 1328 static int 1329 vge_rx_list_init(struct vge_softc *sc) 1330 { 1331 struct vge_ring_data *rd; 1332 struct vge_rxdesc *rxd; 1333 int i; 1334 1335 VGE_LOCK_ASSERT(sc); 1336 1337 sc->vge_cdata.vge_rx_prodidx = 0; 1338 sc->vge_cdata.vge_head = NULL; 1339 sc->vge_cdata.vge_tail = NULL; 1340 sc->vge_cdata.vge_rx_commit = 0; 1341 1342 rd = &sc->vge_rdata; 1343 bzero(rd->vge_rx_ring, VGE_RX_LIST_SZ); 1344 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1345 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1346 rxd->rx_m = NULL; 1347 rxd->rx_desc = &rd->vge_rx_ring[i]; 1348 if (i == 0) 1349 rxd->rxd_prev = 1350 &sc->vge_cdata.vge_rxdesc[VGE_RX_DESC_CNT - 1]; 1351 else 1352 rxd->rxd_prev = &sc->vge_cdata.vge_rxdesc[i - 1]; 1353 if (vge_newbuf(sc, i) != 0) 1354 return (ENOBUFS); 1355 } 1356 1357 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1358 sc->vge_cdata.vge_rx_ring_map, 1359 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1360 1361 sc->vge_cdata.vge_rx_commit = 0; 1362 1363 return (0); 1364 } 1365 1366 static void 1367 vge_freebufs(struct vge_softc *sc) 1368 { 1369 struct vge_txdesc *txd; 1370 struct vge_rxdesc *rxd; 1371 if_t ifp; 1372 int i; 1373 1374 VGE_LOCK_ASSERT(sc); 1375 1376 ifp = sc->vge_ifp; 1377 /* 1378 * Free RX and TX mbufs still in the queues. 1379 */ 1380 for (i = 0; i < VGE_RX_DESC_CNT; i++) { 1381 rxd = &sc->vge_cdata.vge_rxdesc[i]; 1382 if (rxd->rx_m != NULL) { 1383 bus_dmamap_sync(sc->vge_cdata.vge_rx_tag, 1384 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1385 bus_dmamap_unload(sc->vge_cdata.vge_rx_tag, 1386 rxd->rx_dmamap); 1387 m_freem(rxd->rx_m); 1388 rxd->rx_m = NULL; 1389 } 1390 } 1391 1392 for (i = 0; i < VGE_TX_DESC_CNT; i++) { 1393 txd = &sc->vge_cdata.vge_txdesc[i]; 1394 if (txd->tx_m != NULL) { 1395 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, 1396 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1397 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, 1398 txd->tx_dmamap); 1399 m_freem(txd->tx_m); 1400 txd->tx_m = NULL; 1401 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1402 } 1403 } 1404 } 1405 1406 #ifndef __NO_STRICT_ALIGNMENT 1407 static __inline void 1408 vge_fixup_rx(struct mbuf *m) 1409 { 1410 int i; 1411 uint16_t *src, *dst; 1412 1413 src = mtod(m, uint16_t *); 1414 dst = src - 1; 1415 1416 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1417 *dst++ = *src++; 1418 1419 m->m_data -= ETHER_ALIGN; 1420 } 1421 #endif 1422 1423 /* 1424 * RX handler. We support the reception of jumbo frames that have 1425 * been fragmented across multiple 2K mbuf cluster buffers. 1426 */ 1427 static int 1428 vge_rxeof(struct vge_softc *sc, int count) 1429 { 1430 struct mbuf *m; 1431 if_t ifp; 1432 int prod, prog, total_len; 1433 struct vge_rxdesc *rxd; 1434 struct vge_rx_desc *cur_rx; 1435 uint32_t rxstat, rxctl; 1436 1437 VGE_LOCK_ASSERT(sc); 1438 1439 ifp = sc->vge_ifp; 1440 1441 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1442 sc->vge_cdata.vge_rx_ring_map, 1443 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1444 1445 prod = sc->vge_cdata.vge_rx_prodidx; 1446 for (prog = 0; count > 0 && 1447 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; 1448 VGE_RX_DESC_INC(prod)) { 1449 cur_rx = &sc->vge_rdata.vge_rx_ring[prod]; 1450 rxstat = le32toh(cur_rx->vge_sts); 1451 if ((rxstat & VGE_RDSTS_OWN) != 0) 1452 break; 1453 count--; 1454 prog++; 1455 rxctl = le32toh(cur_rx->vge_ctl); 1456 total_len = VGE_RXBYTES(rxstat); 1457 rxd = &sc->vge_cdata.vge_rxdesc[prod]; 1458 m = rxd->rx_m; 1459 1460 /* 1461 * If the 'start of frame' bit is set, this indicates 1462 * either the first fragment in a multi-fragment receive, 1463 * or an intermediate fragment. Either way, we want to 1464 * accumulate the buffers. 1465 */ 1466 if ((rxstat & VGE_RXPKT_SOF) != 0) { 1467 if (vge_newbuf(sc, prod) != 0) { 1468 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1469 VGE_CHAIN_RESET(sc); 1470 vge_discard_rxbuf(sc, prod); 1471 continue; 1472 } 1473 m->m_len = MCLBYTES - VGE_RX_BUF_ALIGN; 1474 if (sc->vge_cdata.vge_head == NULL) { 1475 sc->vge_cdata.vge_head = m; 1476 sc->vge_cdata.vge_tail = m; 1477 } else { 1478 m->m_flags &= ~M_PKTHDR; 1479 sc->vge_cdata.vge_tail->m_next = m; 1480 sc->vge_cdata.vge_tail = m; 1481 } 1482 continue; 1483 } 1484 1485 /* 1486 * Bad/error frames will have the RXOK bit cleared. 1487 * However, there's one error case we want to allow: 1488 * if a VLAN tagged frame arrives and the chip can't 1489 * match it against the CAM filter, it considers this 1490 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. 1491 * We don't want to drop the frame though: our VLAN 1492 * filtering is done in software. 1493 * We also want to receive bad-checksummed frames and 1494 * and frames with bad-length. 1495 */ 1496 if ((rxstat & VGE_RDSTS_RXOK) == 0 && 1497 (rxstat & (VGE_RDSTS_VIDM | VGE_RDSTS_RLERR | 1498 VGE_RDSTS_CSUMERR)) == 0) { 1499 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1500 /* 1501 * If this is part of a multi-fragment packet, 1502 * discard all the pieces. 1503 */ 1504 VGE_CHAIN_RESET(sc); 1505 vge_discard_rxbuf(sc, prod); 1506 continue; 1507 } 1508 1509 if (vge_newbuf(sc, prod) != 0) { 1510 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1511 VGE_CHAIN_RESET(sc); 1512 vge_discard_rxbuf(sc, prod); 1513 continue; 1514 } 1515 1516 /* Chain received mbufs. */ 1517 if (sc->vge_cdata.vge_head != NULL) { 1518 m->m_len = total_len % (MCLBYTES - VGE_RX_BUF_ALIGN); 1519 /* 1520 * Special case: if there's 4 bytes or less 1521 * in this buffer, the mbuf can be discarded: 1522 * the last 4 bytes is the CRC, which we don't 1523 * care about anyway. 1524 */ 1525 if (m->m_len <= ETHER_CRC_LEN) { 1526 sc->vge_cdata.vge_tail->m_len -= 1527 (ETHER_CRC_LEN - m->m_len); 1528 m_freem(m); 1529 } else { 1530 m->m_len -= ETHER_CRC_LEN; 1531 m->m_flags &= ~M_PKTHDR; 1532 sc->vge_cdata.vge_tail->m_next = m; 1533 } 1534 m = sc->vge_cdata.vge_head; 1535 m->m_flags |= M_PKTHDR; 1536 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1537 } else { 1538 m->m_flags |= M_PKTHDR; 1539 m->m_pkthdr.len = m->m_len = 1540 (total_len - ETHER_CRC_LEN); 1541 } 1542 1543 #ifndef __NO_STRICT_ALIGNMENT 1544 vge_fixup_rx(m); 1545 #endif 1546 m->m_pkthdr.rcvif = ifp; 1547 1548 /* Do RX checksumming if enabled */ 1549 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 1550 (rxctl & VGE_RDCTL_FRAG) == 0) { 1551 /* Check IP header checksum */ 1552 if ((rxctl & VGE_RDCTL_IPPKT) != 0) 1553 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1554 if ((rxctl & VGE_RDCTL_IPCSUMOK) != 0) 1555 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1556 1557 /* Check TCP/UDP checksum */ 1558 if (rxctl & (VGE_RDCTL_TCPPKT | VGE_RDCTL_UDPPKT) && 1559 rxctl & VGE_RDCTL_PROTOCSUMOK) { 1560 m->m_pkthdr.csum_flags |= 1561 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1562 m->m_pkthdr.csum_data = 0xffff; 1563 } 1564 } 1565 1566 if ((rxstat & VGE_RDSTS_VTAG) != 0) { 1567 /* 1568 * The 32-bit rxctl register is stored in little-endian. 1569 * However, the 16-bit vlan tag is stored in big-endian, 1570 * so we have to byte swap it. 1571 */ 1572 m->m_pkthdr.ether_vtag = 1573 bswap16(rxctl & VGE_RDCTL_VLANID); 1574 m->m_flags |= M_VLANTAG; 1575 } 1576 1577 VGE_UNLOCK(sc); 1578 if_input(ifp, m); 1579 VGE_LOCK(sc); 1580 sc->vge_cdata.vge_head = NULL; 1581 sc->vge_cdata.vge_tail = NULL; 1582 } 1583 1584 if (prog > 0) { 1585 sc->vge_cdata.vge_rx_prodidx = prod; 1586 bus_dmamap_sync(sc->vge_cdata.vge_rx_ring_tag, 1587 sc->vge_cdata.vge_rx_ring_map, 1588 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1589 /* Update residue counter. */ 1590 if (sc->vge_cdata.vge_rx_commit != 0) { 1591 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, 1592 sc->vge_cdata.vge_rx_commit); 1593 sc->vge_cdata.vge_rx_commit = 0; 1594 } 1595 } 1596 return (prog); 1597 } 1598 1599 static void 1600 vge_txeof(struct vge_softc *sc) 1601 { 1602 if_t ifp; 1603 struct vge_tx_desc *cur_tx; 1604 struct vge_txdesc *txd; 1605 uint32_t txstat; 1606 int cons, prod; 1607 1608 VGE_LOCK_ASSERT(sc); 1609 1610 ifp = sc->vge_ifp; 1611 1612 if (sc->vge_cdata.vge_tx_cnt == 0) 1613 return; 1614 1615 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1616 sc->vge_cdata.vge_tx_ring_map, 1617 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1618 1619 /* 1620 * Go through our tx list and free mbufs for those 1621 * frames that have been transmitted. 1622 */ 1623 cons = sc->vge_cdata.vge_tx_considx; 1624 prod = sc->vge_cdata.vge_tx_prodidx; 1625 for (; cons != prod; VGE_TX_DESC_INC(cons)) { 1626 cur_tx = &sc->vge_rdata.vge_tx_ring[cons]; 1627 txstat = le32toh(cur_tx->vge_sts); 1628 if ((txstat & VGE_TDSTS_OWN) != 0) 1629 break; 1630 sc->vge_cdata.vge_tx_cnt--; 1631 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1632 1633 txd = &sc->vge_cdata.vge_txdesc[cons]; 1634 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1635 BUS_DMASYNC_POSTWRITE); 1636 bus_dmamap_unload(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap); 1637 1638 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1639 __func__)); 1640 m_freem(txd->tx_m); 1641 txd->tx_m = NULL; 1642 txd->tx_desc->vge_frag[0].vge_addrhi = 0; 1643 } 1644 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1645 sc->vge_cdata.vge_tx_ring_map, 1646 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1647 sc->vge_cdata.vge_tx_considx = cons; 1648 if (sc->vge_cdata.vge_tx_cnt == 0) 1649 sc->vge_timer = 0; 1650 } 1651 1652 static void 1653 vge_link_statchg(void *xsc) 1654 { 1655 struct vge_softc *sc; 1656 if_t ifp; 1657 uint8_t physts; 1658 1659 sc = xsc; 1660 ifp = sc->vge_ifp; 1661 VGE_LOCK_ASSERT(sc); 1662 1663 physts = CSR_READ_1(sc, VGE_PHYSTS0); 1664 if ((physts & VGE_PHYSTS_RESETSTS) == 0) { 1665 if ((physts & VGE_PHYSTS_LINK) == 0) { 1666 sc->vge_flags &= ~VGE_FLAG_LINK; 1667 if_link_state_change(sc->vge_ifp, 1668 LINK_STATE_DOWN); 1669 } else { 1670 sc->vge_flags |= VGE_FLAG_LINK; 1671 if_link_state_change(sc->vge_ifp, 1672 LINK_STATE_UP); 1673 CSR_WRITE_1(sc, VGE_CRC2, VGE_CR2_FDX_TXFLOWCTL_ENABLE | 1674 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1675 if ((physts & VGE_PHYSTS_FDX) != 0) { 1676 if ((physts & VGE_PHYSTS_TXFLOWCAP) != 0) 1677 CSR_WRITE_1(sc, VGE_CRS2, 1678 VGE_CR2_FDX_TXFLOWCTL_ENABLE); 1679 if ((physts & VGE_PHYSTS_RXFLOWCAP) != 0) 1680 CSR_WRITE_1(sc, VGE_CRS2, 1681 VGE_CR2_FDX_RXFLOWCTL_ENABLE); 1682 } 1683 if (!if_sendq_empty(ifp)) 1684 vge_start_locked(ifp); 1685 } 1686 } 1687 /* 1688 * Restart MII auto-polling because link state change interrupt 1689 * will disable it. 1690 */ 1691 vge_miipoll_start(sc); 1692 } 1693 1694 #ifdef DEVICE_POLLING 1695 static int 1696 vge_poll (if_t ifp, enum poll_cmd cmd, int count) 1697 { 1698 struct vge_softc *sc = if_getsoftc(ifp); 1699 int rx_npkts = 0; 1700 1701 VGE_LOCK(sc); 1702 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 1703 goto done; 1704 1705 rx_npkts = vge_rxeof(sc, count); 1706 vge_txeof(sc); 1707 1708 if (!if_sendq_empty(ifp)) 1709 vge_start_locked(ifp); 1710 1711 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 1712 uint32_t status; 1713 status = CSR_READ_4(sc, VGE_ISR); 1714 if (status == 0xFFFFFFFF) 1715 goto done; 1716 if (status) 1717 CSR_WRITE_4(sc, VGE_ISR, status); 1718 1719 /* 1720 * XXX check behaviour on receiver stalls. 1721 */ 1722 1723 if (status & VGE_ISR_TXDMA_STALL || 1724 status & VGE_ISR_RXDMA_STALL) { 1725 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1726 vge_init_locked(sc); 1727 } 1728 1729 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1730 vge_rxeof(sc, count); 1731 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1732 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1733 } 1734 } 1735 done: 1736 VGE_UNLOCK(sc); 1737 return (rx_npkts); 1738 } 1739 #endif /* DEVICE_POLLING */ 1740 1741 static void 1742 vge_intr(void *arg) 1743 { 1744 struct vge_softc *sc; 1745 if_t ifp; 1746 uint32_t status; 1747 1748 sc = arg; 1749 VGE_LOCK(sc); 1750 1751 ifp = sc->vge_ifp; 1752 if ((sc->vge_flags & VGE_FLAG_SUSPENDED) != 0 || 1753 (if_getflags(ifp) & IFF_UP) == 0) { 1754 VGE_UNLOCK(sc); 1755 return; 1756 } 1757 1758 #ifdef DEVICE_POLLING 1759 if (if_getcapenable(ifp) & IFCAP_POLLING) { 1760 status = CSR_READ_4(sc, VGE_ISR); 1761 CSR_WRITE_4(sc, VGE_ISR, status); 1762 if (status != 0xFFFFFFFF && (status & VGE_ISR_LINKSTS) != 0) 1763 vge_link_statchg(sc); 1764 VGE_UNLOCK(sc); 1765 return; 1766 } 1767 #endif 1768 1769 /* Disable interrupts */ 1770 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 1771 status = CSR_READ_4(sc, VGE_ISR); 1772 CSR_WRITE_4(sc, VGE_ISR, status | VGE_ISR_HOLDOFF_RELOAD); 1773 /* If the card has gone away the read returns 0xffff. */ 1774 if (status == 0xFFFFFFFF || (status & VGE_INTRS) == 0) 1775 goto done; 1776 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1777 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) 1778 vge_rxeof(sc, VGE_RX_DESC_CNT); 1779 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { 1780 vge_rxeof(sc, VGE_RX_DESC_CNT); 1781 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 1782 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 1783 } 1784 1785 if (status & (VGE_ISR_TXOK0|VGE_ISR_TXOK_HIPRIO)) 1786 vge_txeof(sc); 1787 1788 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) { 1789 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1790 vge_init_locked(sc); 1791 } 1792 1793 if (status & VGE_ISR_LINKSTS) 1794 vge_link_statchg(sc); 1795 } 1796 done: 1797 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1798 /* Re-enable interrupts */ 1799 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 1800 1801 if (!if_sendq_empty(ifp)) 1802 vge_start_locked(ifp); 1803 } 1804 VGE_UNLOCK(sc); 1805 } 1806 1807 static int 1808 vge_encap(struct vge_softc *sc, struct mbuf **m_head) 1809 { 1810 struct vge_txdesc *txd; 1811 struct vge_tx_frag *frag; 1812 struct mbuf *m; 1813 bus_dma_segment_t txsegs[VGE_MAXTXSEGS]; 1814 int error, i, nsegs, padlen; 1815 uint32_t cflags; 1816 1817 VGE_LOCK_ASSERT(sc); 1818 1819 M_ASSERTPKTHDR((*m_head)); 1820 1821 /* Argh. This chip does not autopad short frames. */ 1822 if ((*m_head)->m_pkthdr.len < VGE_MIN_FRAMELEN) { 1823 m = *m_head; 1824 padlen = VGE_MIN_FRAMELEN - m->m_pkthdr.len; 1825 if (M_WRITABLE(m) == 0) { 1826 /* Get a writable copy. */ 1827 m = m_dup(*m_head, M_NOWAIT); 1828 m_freem(*m_head); 1829 if (m == NULL) { 1830 *m_head = NULL; 1831 return (ENOBUFS); 1832 } 1833 *m_head = m; 1834 } 1835 if (M_TRAILINGSPACE(m) < padlen) { 1836 m = m_defrag(m, M_NOWAIT); 1837 if (m == NULL) { 1838 m_freem(*m_head); 1839 *m_head = NULL; 1840 return (ENOBUFS); 1841 } 1842 } 1843 /* 1844 * Manually pad short frames, and zero the pad space 1845 * to avoid leaking data. 1846 */ 1847 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1848 m->m_pkthdr.len += padlen; 1849 m->m_len = m->m_pkthdr.len; 1850 *m_head = m; 1851 } 1852 1853 txd = &sc->vge_cdata.vge_txdesc[sc->vge_cdata.vge_tx_prodidx]; 1854 1855 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1856 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1857 if (error == EFBIG) { 1858 m = m_collapse(*m_head, M_NOWAIT, VGE_MAXTXSEGS); 1859 if (m == NULL) { 1860 m_freem(*m_head); 1861 *m_head = NULL; 1862 return (ENOMEM); 1863 } 1864 *m_head = m; 1865 error = bus_dmamap_load_mbuf_sg(sc->vge_cdata.vge_tx_tag, 1866 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1867 if (error != 0) { 1868 m_freem(*m_head); 1869 *m_head = NULL; 1870 return (error); 1871 } 1872 } else if (error != 0) 1873 return (error); 1874 bus_dmamap_sync(sc->vge_cdata.vge_tx_tag, txd->tx_dmamap, 1875 BUS_DMASYNC_PREWRITE); 1876 1877 m = *m_head; 1878 cflags = 0; 1879 1880 /* Configure checksum offload. */ 1881 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1882 cflags |= VGE_TDCTL_IPCSUM; 1883 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1884 cflags |= VGE_TDCTL_TCPCSUM; 1885 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1886 cflags |= VGE_TDCTL_UDPCSUM; 1887 1888 /* Configure VLAN. */ 1889 if ((m->m_flags & M_VLANTAG) != 0) 1890 cflags |= m->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG; 1891 txd->tx_desc->vge_sts = htole32(m->m_pkthdr.len << 16); 1892 /* 1893 * XXX 1894 * Velocity family seems to support TSO but no information 1895 * for MSS configuration is available. Also the number of 1896 * fragments supported by a descriptor is too small to hold 1897 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, 1898 * VGE_TD_LS_SOF and VGE_TD_LS_EOF could be used to build 1899 * longer chain of buffers but no additional information is 1900 * available. 1901 * 1902 * When telling the chip how many segments there are, we 1903 * must use nsegs + 1 instead of just nsegs. Darned if I 1904 * know why. This also means we can't use the last fragment 1905 * field of Tx descriptor. 1906 */ 1907 txd->tx_desc->vge_ctl = htole32(cflags | ((nsegs + 1) << 28) | 1908 VGE_TD_LS_NORM); 1909 for (i = 0; i < nsegs; i++) { 1910 frag = &txd->tx_desc->vge_frag[i]; 1911 frag->vge_addrlo = htole32(VGE_ADDR_LO(txsegs[i].ds_addr)); 1912 frag->vge_addrhi = htole32(VGE_ADDR_HI(txsegs[i].ds_addr) | 1913 (VGE_BUFLEN(txsegs[i].ds_len) << 16)); 1914 } 1915 1916 sc->vge_cdata.vge_tx_cnt++; 1917 VGE_TX_DESC_INC(sc->vge_cdata.vge_tx_prodidx); 1918 1919 /* 1920 * Finally request interrupt and give the first descriptor 1921 * ownership to hardware. 1922 */ 1923 txd->tx_desc->vge_ctl |= htole32(VGE_TDCTL_TIC); 1924 txd->tx_desc->vge_sts |= htole32(VGE_TDSTS_OWN); 1925 txd->tx_m = m; 1926 1927 return (0); 1928 } 1929 1930 /* 1931 * Main transmit routine. 1932 */ 1933 1934 static void 1935 vge_start(if_t ifp) 1936 { 1937 struct vge_softc *sc; 1938 1939 sc = if_getsoftc(ifp); 1940 VGE_LOCK(sc); 1941 vge_start_locked(ifp); 1942 VGE_UNLOCK(sc); 1943 } 1944 1945 static void 1946 vge_start_locked(if_t ifp) 1947 { 1948 struct vge_softc *sc; 1949 struct vge_txdesc *txd; 1950 struct mbuf *m_head; 1951 int enq, idx; 1952 1953 sc = if_getsoftc(ifp); 1954 1955 VGE_LOCK_ASSERT(sc); 1956 1957 if ((sc->vge_flags & VGE_FLAG_LINK) == 0 || 1958 (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1959 IFF_DRV_RUNNING) 1960 return; 1961 1962 idx = sc->vge_cdata.vge_tx_prodidx; 1963 VGE_TX_DESC_DEC(idx); 1964 for (enq = 0; !if_sendq_empty(ifp) && 1965 sc->vge_cdata.vge_tx_cnt < VGE_TX_DESC_CNT - 1; ) { 1966 m_head = if_dequeue(ifp); 1967 if (m_head == NULL) 1968 break; 1969 /* 1970 * Pack the data into the transmit ring. If we 1971 * don't have room, set the OACTIVE flag and wait 1972 * for the NIC to drain the ring. 1973 */ 1974 if (vge_encap(sc, &m_head)) { 1975 if (m_head == NULL) 1976 break; 1977 if_sendq_prepend(ifp, m_head); 1978 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1979 break; 1980 } 1981 1982 txd = &sc->vge_cdata.vge_txdesc[idx]; 1983 txd->tx_desc->vge_frag[0].vge_addrhi |= htole32(VGE_TXDESC_Q); 1984 VGE_TX_DESC_INC(idx); 1985 1986 enq++; 1987 /* 1988 * If there's a BPF listener, bounce a copy of this frame 1989 * to him. 1990 */ 1991 ETHER_BPF_MTAP(ifp, m_head); 1992 } 1993 1994 if (enq > 0) { 1995 bus_dmamap_sync(sc->vge_cdata.vge_tx_ring_tag, 1996 sc->vge_cdata.vge_tx_ring_map, 1997 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1998 /* Issue a transmit command. */ 1999 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); 2000 /* 2001 * Set a timeout in case the chip goes out to lunch. 2002 */ 2003 sc->vge_timer = 5; 2004 } 2005 } 2006 2007 static void 2008 vge_init(void *xsc) 2009 { 2010 struct vge_softc *sc = xsc; 2011 2012 VGE_LOCK(sc); 2013 vge_init_locked(sc); 2014 VGE_UNLOCK(sc); 2015 } 2016 2017 static void 2018 vge_init_locked(struct vge_softc *sc) 2019 { 2020 if_t ifp = sc->vge_ifp; 2021 int error, i; 2022 2023 VGE_LOCK_ASSERT(sc); 2024 2025 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2026 return; 2027 2028 /* 2029 * Cancel pending I/O and free all RX/TX buffers. 2030 */ 2031 vge_stop(sc); 2032 vge_reset(sc); 2033 vge_miipoll_start(sc); 2034 2035 /* 2036 * Initialize the RX and TX descriptors and mbufs. 2037 */ 2038 2039 error = vge_rx_list_init(sc); 2040 if (error != 0) { 2041 device_printf(sc->vge_dev, "no memory for Rx buffers.\n"); 2042 return; 2043 } 2044 vge_tx_list_init(sc); 2045 /* Clear MAC statistics. */ 2046 vge_stats_clear(sc); 2047 /* Set our station address */ 2048 for (i = 0; i < ETHER_ADDR_LEN; i++) 2049 CSR_WRITE_1(sc, VGE_PAR0 + i, if_getlladdr(sc->vge_ifp)[i]); 2050 2051 /* 2052 * Set receive FIFO threshold. Also allow transmission and 2053 * reception of VLAN tagged frames. 2054 */ 2055 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); 2056 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES); 2057 2058 /* Set DMA burst length */ 2059 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); 2060 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); 2061 2062 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); 2063 2064 /* Set collision backoff algorithm */ 2065 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| 2066 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); 2067 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); 2068 2069 /* Disable LPSEL field in priority resolution */ 2070 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); 2071 2072 /* 2073 * Load the addresses of the DMA queues into the chip. 2074 * Note that we only use one transmit queue. 2075 */ 2076 2077 CSR_WRITE_4(sc, VGE_TXDESC_HIADDR, 2078 VGE_ADDR_HI(sc->vge_rdata.vge_tx_ring_paddr)); 2079 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, 2080 VGE_ADDR_LO(sc->vge_rdata.vge_tx_ring_paddr)); 2081 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); 2082 2083 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 2084 VGE_ADDR_LO(sc->vge_rdata.vge_rx_ring_paddr)); 2085 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); 2086 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); 2087 2088 /* Configure interrupt moderation. */ 2089 vge_intr_holdoff(sc); 2090 2091 /* Enable and wake up the RX descriptor queue */ 2092 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); 2093 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); 2094 2095 /* Enable the TX descriptor queue */ 2096 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); 2097 2098 /* Init the cam filter. */ 2099 vge_cam_clear(sc); 2100 2101 /* Set up receiver filter. */ 2102 vge_rxfilter(sc); 2103 vge_setvlan(sc); 2104 2105 /* Initialize pause timer. */ 2106 CSR_WRITE_2(sc, VGE_TX_PAUSE_TIMER, 0xFFFF); 2107 /* 2108 * Initialize flow control parameters. 2109 * TX XON high threshold : 48 2110 * TX pause low threshold : 24 2111 * Disable hald-duplex flow control 2112 */ 2113 CSR_WRITE_1(sc, VGE_CRC2, 0xFF); 2114 CSR_WRITE_1(sc, VGE_CRS2, VGE_CR2_XON_ENABLE | 0x0B); 2115 2116 /* Enable jumbo frame reception (if desired) */ 2117 2118 /* Start the MAC. */ 2119 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); 2120 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); 2121 CSR_WRITE_1(sc, VGE_CRS0, 2122 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); 2123 2124 #ifdef DEVICE_POLLING 2125 /* 2126 * Disable interrupts except link state change if we are polling. 2127 */ 2128 if (if_getcapenable(ifp) & IFCAP_POLLING) { 2129 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2130 } else /* otherwise ... */ 2131 #endif 2132 { 2133 /* 2134 * Enable interrupts. 2135 */ 2136 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2137 } 2138 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2139 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2140 2141 sc->vge_flags &= ~VGE_FLAG_LINK; 2142 vge_ifmedia_upd_locked(sc); 2143 2144 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2145 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2146 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2147 } 2148 2149 /* 2150 * Set media options. 2151 */ 2152 static int 2153 vge_ifmedia_upd(if_t ifp) 2154 { 2155 struct vge_softc *sc; 2156 int error; 2157 2158 sc = if_getsoftc(ifp); 2159 VGE_LOCK(sc); 2160 error = vge_ifmedia_upd_locked(sc); 2161 VGE_UNLOCK(sc); 2162 2163 return (error); 2164 } 2165 2166 static int 2167 vge_ifmedia_upd_locked(struct vge_softc *sc) 2168 { 2169 struct mii_data *mii; 2170 struct mii_softc *miisc; 2171 int error; 2172 2173 mii = device_get_softc(sc->vge_miibus); 2174 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2175 PHY_RESET(miisc); 2176 vge_setmedia(sc); 2177 error = mii_mediachg(mii); 2178 2179 return (error); 2180 } 2181 2182 /* 2183 * Report current media status. 2184 */ 2185 static void 2186 vge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 2187 { 2188 struct vge_softc *sc; 2189 struct mii_data *mii; 2190 2191 sc = if_getsoftc(ifp); 2192 mii = device_get_softc(sc->vge_miibus); 2193 2194 VGE_LOCK(sc); 2195 if ((if_getflags(ifp) & IFF_UP) == 0) { 2196 VGE_UNLOCK(sc); 2197 return; 2198 } 2199 mii_pollstat(mii); 2200 ifmr->ifm_active = mii->mii_media_active; 2201 ifmr->ifm_status = mii->mii_media_status; 2202 VGE_UNLOCK(sc); 2203 } 2204 2205 static void 2206 vge_setmedia(struct vge_softc *sc) 2207 { 2208 struct mii_data *mii; 2209 struct ifmedia_entry *ife; 2210 2211 mii = device_get_softc(sc->vge_miibus); 2212 ife = mii->mii_media.ifm_cur; 2213 2214 /* 2215 * If the user manually selects a media mode, we need to turn 2216 * on the forced MAC mode bit in the DIAGCTL register. If the 2217 * user happens to choose a full duplex mode, we also need to 2218 * set the 'force full duplex' bit. This applies only to 2219 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC 2220 * mode is disabled, and in 1000baseT mode, full duplex is 2221 * always implied, so we turn on the forced mode bit but leave 2222 * the FDX bit cleared. 2223 */ 2224 2225 switch (IFM_SUBTYPE(ife->ifm_media)) { 2226 case IFM_AUTO: 2227 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2228 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2229 break; 2230 case IFM_1000_T: 2231 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2232 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2233 break; 2234 case IFM_100_TX: 2235 case IFM_10_T: 2236 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2237 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { 2238 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2239 } else { 2240 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2241 } 2242 break; 2243 default: 2244 device_printf(sc->vge_dev, "unknown media type: %x\n", 2245 IFM_SUBTYPE(ife->ifm_media)); 2246 break; 2247 } 2248 } 2249 2250 static int 2251 vge_ioctl(if_t ifp, u_long command, caddr_t data) 2252 { 2253 struct vge_softc *sc = if_getsoftc(ifp); 2254 struct ifreq *ifr = (struct ifreq *) data; 2255 struct mii_data *mii; 2256 int error = 0, mask; 2257 2258 switch (command) { 2259 case SIOCSIFMTU: 2260 VGE_LOCK(sc); 2261 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VGE_JUMBO_MTU) 2262 error = EINVAL; 2263 else if (if_getmtu(ifp) != ifr->ifr_mtu) { 2264 if (ifr->ifr_mtu > ETHERMTU && 2265 (sc->vge_flags & VGE_FLAG_JUMBO) == 0) 2266 error = EINVAL; 2267 else 2268 if_setmtu(ifp, ifr->ifr_mtu); 2269 } 2270 VGE_UNLOCK(sc); 2271 break; 2272 case SIOCSIFFLAGS: 2273 VGE_LOCK(sc); 2274 if ((if_getflags(ifp) & IFF_UP) != 0) { 2275 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 2276 ((if_getflags(ifp) ^ sc->vge_if_flags) & 2277 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2278 vge_rxfilter(sc); 2279 else 2280 vge_init_locked(sc); 2281 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2282 vge_stop(sc); 2283 sc->vge_if_flags = if_getflags(ifp); 2284 VGE_UNLOCK(sc); 2285 break; 2286 case SIOCADDMULTI: 2287 case SIOCDELMULTI: 2288 VGE_LOCK(sc); 2289 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2290 vge_rxfilter(sc); 2291 VGE_UNLOCK(sc); 2292 break; 2293 case SIOCGIFMEDIA: 2294 case SIOCSIFMEDIA: 2295 mii = device_get_softc(sc->vge_miibus); 2296 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2297 break; 2298 case SIOCSIFCAP: 2299 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 2300 #ifdef DEVICE_POLLING 2301 if (mask & IFCAP_POLLING) { 2302 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2303 error = ether_poll_register(vge_poll, ifp); 2304 if (error) 2305 return (error); 2306 VGE_LOCK(sc); 2307 /* Disable interrupts */ 2308 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS_POLLING); 2309 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2310 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2311 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 2312 VGE_UNLOCK(sc); 2313 } else { 2314 error = ether_poll_deregister(ifp); 2315 /* Enable interrupts. */ 2316 VGE_LOCK(sc); 2317 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); 2318 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2319 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); 2320 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 2321 VGE_UNLOCK(sc); 2322 } 2323 } 2324 #endif /* DEVICE_POLLING */ 2325 VGE_LOCK(sc); 2326 if ((mask & IFCAP_TXCSUM) != 0 && 2327 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 2328 if_togglecapenable(ifp, IFCAP_TXCSUM); 2329 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 2330 if_sethwassistbits(ifp, VGE_CSUM_FEATURES, 0); 2331 else 2332 if_sethwassistbits(ifp, 0, VGE_CSUM_FEATURES); 2333 } 2334 if ((mask & IFCAP_RXCSUM) != 0 && 2335 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) 2336 if_togglecapenable(ifp, IFCAP_RXCSUM); 2337 if ((mask & IFCAP_WOL_UCAST) != 0 && 2338 (if_getcapabilities(ifp) & IFCAP_WOL_UCAST) != 0) 2339 if_togglecapenable(ifp, IFCAP_WOL_UCAST); 2340 if ((mask & IFCAP_WOL_MCAST) != 0 && 2341 (if_getcapabilities(ifp) & IFCAP_WOL_MCAST) != 0) 2342 if_togglecapenable(ifp, IFCAP_WOL_MCAST); 2343 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2344 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0) 2345 if_togglecapenable(ifp, IFCAP_WOL_MAGIC); 2346 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2347 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0) 2348 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); 2349 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2350 (IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) { 2351 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 2352 vge_setvlan(sc); 2353 } 2354 VGE_UNLOCK(sc); 2355 VLAN_CAPABILITIES(ifp); 2356 break; 2357 default: 2358 error = ether_ioctl(ifp, command, data); 2359 break; 2360 } 2361 2362 return (error); 2363 } 2364 2365 static void 2366 vge_watchdog(void *arg) 2367 { 2368 struct vge_softc *sc; 2369 if_t ifp; 2370 2371 sc = arg; 2372 VGE_LOCK_ASSERT(sc); 2373 vge_stats_update(sc); 2374 callout_reset(&sc->vge_watchdog, hz, vge_watchdog, sc); 2375 if (sc->vge_timer == 0 || --sc->vge_timer > 0) 2376 return; 2377 2378 ifp = sc->vge_ifp; 2379 if_printf(ifp, "watchdog timeout\n"); 2380 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2381 2382 vge_txeof(sc); 2383 vge_rxeof(sc, VGE_RX_DESC_CNT); 2384 2385 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2386 vge_init_locked(sc); 2387 } 2388 2389 /* 2390 * Stop the adapter and free any mbufs allocated to the 2391 * RX and TX lists. 2392 */ 2393 static void 2394 vge_stop(struct vge_softc *sc) 2395 { 2396 if_t ifp; 2397 2398 VGE_LOCK_ASSERT(sc); 2399 ifp = sc->vge_ifp; 2400 sc->vge_timer = 0; 2401 callout_stop(&sc->vge_watchdog); 2402 2403 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 2404 2405 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); 2406 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); 2407 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); 2408 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); 2409 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); 2410 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); 2411 2412 vge_stats_update(sc); 2413 VGE_CHAIN_RESET(sc); 2414 vge_txeof(sc); 2415 vge_freebufs(sc); 2416 } 2417 2418 /* 2419 * Device suspend routine. Stop the interface and save some PCI 2420 * settings in case the BIOS doesn't restore them properly on 2421 * resume. 2422 */ 2423 static int 2424 vge_suspend(device_t dev) 2425 { 2426 struct vge_softc *sc; 2427 2428 sc = device_get_softc(dev); 2429 2430 VGE_LOCK(sc); 2431 vge_stop(sc); 2432 vge_setwol(sc); 2433 sc->vge_flags |= VGE_FLAG_SUSPENDED; 2434 VGE_UNLOCK(sc); 2435 2436 return (0); 2437 } 2438 2439 /* 2440 * Device resume routine. Restore some PCI settings in case the BIOS 2441 * doesn't, re-enable busmastering, and restart the interface if 2442 * appropriate. 2443 */ 2444 static int 2445 vge_resume(device_t dev) 2446 { 2447 struct vge_softc *sc; 2448 if_t ifp; 2449 uint16_t pmstat; 2450 2451 sc = device_get_softc(dev); 2452 VGE_LOCK(sc); 2453 if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) { 2454 /* Disable PME and clear PME status. */ 2455 pmstat = pci_read_config(sc->vge_dev, 2456 sc->vge_pmcap + PCIR_POWER_STATUS, 2); 2457 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2458 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2459 pci_write_config(sc->vge_dev, 2460 sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2461 } 2462 } 2463 vge_clrwol(sc); 2464 /* Restart MII auto-polling. */ 2465 vge_miipoll_start(sc); 2466 ifp = sc->vge_ifp; 2467 /* Reinitialize interface if necessary. */ 2468 if ((if_getflags(ifp) & IFF_UP) != 0) { 2469 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2470 vge_init_locked(sc); 2471 } 2472 sc->vge_flags &= ~VGE_FLAG_SUSPENDED; 2473 VGE_UNLOCK(sc); 2474 2475 return (0); 2476 } 2477 2478 /* 2479 * Stop all chip I/O so that the kernel's probe routines don't 2480 * get confused by errant DMAs when rebooting. 2481 */ 2482 static int 2483 vge_shutdown(device_t dev) 2484 { 2485 2486 return (vge_suspend(dev)); 2487 } 2488 2489 #define VGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2490 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2491 2492 static void 2493 vge_sysctl_node(struct vge_softc *sc) 2494 { 2495 struct sysctl_ctx_list *ctx; 2496 struct sysctl_oid_list *child, *parent; 2497 struct sysctl_oid *tree; 2498 struct vge_hw_stats *stats; 2499 2500 stats = &sc->vge_stats; 2501 ctx = device_get_sysctl_ctx(sc->vge_dev); 2502 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vge_dev)); 2503 2504 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_holdoff", 2505 CTLFLAG_RW, &sc->vge_int_holdoff, 0, "interrupt holdoff"); 2506 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_coal_pkt", 2507 CTLFLAG_RW, &sc->vge_rx_coal_pkt, 0, "rx coalescing packet"); 2508 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_coal_pkt", 2509 CTLFLAG_RW, &sc->vge_tx_coal_pkt, 0, "tx coalescing packet"); 2510 2511 /* Pull in device tunables. */ 2512 sc->vge_int_holdoff = VGE_INT_HOLDOFF_DEFAULT; 2513 resource_int_value(device_get_name(sc->vge_dev), 2514 device_get_unit(sc->vge_dev), "int_holdoff", &sc->vge_int_holdoff); 2515 sc->vge_rx_coal_pkt = VGE_RX_COAL_PKT_DEFAULT; 2516 resource_int_value(device_get_name(sc->vge_dev), 2517 device_get_unit(sc->vge_dev), "rx_coal_pkt", &sc->vge_rx_coal_pkt); 2518 sc->vge_tx_coal_pkt = VGE_TX_COAL_PKT_DEFAULT; 2519 resource_int_value(device_get_name(sc->vge_dev), 2520 device_get_unit(sc->vge_dev), "tx_coal_pkt", &sc->vge_tx_coal_pkt); 2521 2522 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 2523 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VGE statistics"); 2524 parent = SYSCTL_CHILDREN(tree); 2525 2526 /* Rx statistics. */ 2527 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 2528 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics"); 2529 child = SYSCTL_CHILDREN(tree); 2530 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames", 2531 &stats->rx_frames, "frames"); 2532 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2533 &stats->rx_good_frames, "Good frames"); 2534 VGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2535 &stats->rx_fifo_oflows, "FIFO overflows"); 2536 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts", 2537 &stats->rx_runts, "Too short frames"); 2538 VGE_SYSCTL_STAT_ADD32(ctx, child, "runts_errs", 2539 &stats->rx_runts_errs, "Too short frames with errors"); 2540 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2541 &stats->rx_pkts_64, "64 bytes frames"); 2542 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2543 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 2544 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2545 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 2546 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2547 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 2548 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2549 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 2550 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2551 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2552 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 2553 &stats->rx_pkts_1519_max, "1519 to max frames"); 2554 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max_errs", 2555 &stats->rx_pkts_1519_max_errs, "1519 to max frames with error"); 2556 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2557 &stats->rx_jumbos, "Jumbo frames"); 2558 VGE_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", 2559 &stats->rx_crcerrs, "CRC errors"); 2560 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2561 &stats->rx_pause_frames, "Pause frames"); 2562 VGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2563 &stats->rx_alignerrs, "Alignment errors"); 2564 VGE_SYSCTL_STAT_ADD32(ctx, child, "nobufs", 2565 &stats->rx_nobufs, "Frames with no buffer event"); 2566 VGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2567 &stats->rx_symerrs, "Frames with symbol errors"); 2568 VGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2569 &stats->rx_lenerrs, "Frames with length mismatched"); 2570 2571 /* Tx statistics. */ 2572 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 2573 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics"); 2574 child = SYSCTL_CHILDREN(tree); 2575 VGE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2576 &stats->tx_good_frames, "Good frames"); 2577 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 2578 &stats->tx_pkts_64, "64 bytes frames"); 2579 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 2580 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 2581 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 2582 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 2583 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 2584 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 2585 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 2586 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 2587 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 2588 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 2589 VGE_SYSCTL_STAT_ADD32(ctx, child, "frames_jumbo", 2590 &stats->tx_jumbos, "Jumbo frames"); 2591 VGE_SYSCTL_STAT_ADD32(ctx, child, "colls", 2592 &stats->tx_colls, "Collisions"); 2593 VGE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2594 &stats->tx_latecolls, "Late collisions"); 2595 VGE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 2596 &stats->tx_pause, "Pause frames"); 2597 #ifdef VGE_ENABLE_SQEERR 2598 VGE_SYSCTL_STAT_ADD32(ctx, child, "sqeerrs", 2599 &stats->tx_sqeerrs, "SQE errors"); 2600 #endif 2601 /* Clear MAC statistics. */ 2602 vge_stats_clear(sc); 2603 } 2604 2605 #undef VGE_SYSCTL_STAT_ADD32 2606 2607 static void 2608 vge_stats_clear(struct vge_softc *sc) 2609 { 2610 int i; 2611 2612 CSR_WRITE_1(sc, VGE_MIBCSR, 2613 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FREEZE); 2614 CSR_WRITE_1(sc, VGE_MIBCSR, 2615 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_CLR); 2616 for (i = VGE_TIMEOUT; i > 0; i--) { 2617 DELAY(1); 2618 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_CLR) == 0) 2619 break; 2620 } 2621 if (i == 0) 2622 device_printf(sc->vge_dev, "MIB clear timed out!\n"); 2623 CSR_WRITE_1(sc, VGE_MIBCSR, CSR_READ_1(sc, VGE_MIBCSR) & 2624 ~VGE_MIBCSR_FREEZE); 2625 } 2626 2627 static void 2628 vge_stats_update(struct vge_softc *sc) 2629 { 2630 struct vge_hw_stats *stats; 2631 if_t ifp; 2632 uint32_t mib[VGE_MIB_CNT], val; 2633 int i; 2634 2635 VGE_LOCK_ASSERT(sc); 2636 2637 stats = &sc->vge_stats; 2638 ifp = sc->vge_ifp; 2639 2640 CSR_WRITE_1(sc, VGE_MIBCSR, 2641 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_FLUSH); 2642 for (i = VGE_TIMEOUT; i > 0; i--) { 2643 DELAY(1); 2644 if ((CSR_READ_1(sc, VGE_MIBCSR) & VGE_MIBCSR_FLUSH) == 0) 2645 break; 2646 } 2647 if (i == 0) { 2648 device_printf(sc->vge_dev, "MIB counter dump timed out!\n"); 2649 vge_stats_clear(sc); 2650 return; 2651 } 2652 2653 bzero(mib, sizeof(mib)); 2654 reset_idx: 2655 /* Set MIB read index to 0. */ 2656 CSR_WRITE_1(sc, VGE_MIBCSR, 2657 CSR_READ_1(sc, VGE_MIBCSR) | VGE_MIBCSR_RINI); 2658 for (i = 0; i < VGE_MIB_CNT; i++) { 2659 val = CSR_READ_4(sc, VGE_MIBDATA); 2660 if (i != VGE_MIB_DATA_IDX(val)) { 2661 /* Reading interrupted. */ 2662 goto reset_idx; 2663 } 2664 mib[i] = val & VGE_MIB_DATA_MASK; 2665 } 2666 2667 /* Rx stats. */ 2668 stats->rx_frames += mib[VGE_MIB_RX_FRAMES]; 2669 stats->rx_good_frames += mib[VGE_MIB_RX_GOOD_FRAMES]; 2670 stats->rx_fifo_oflows += mib[VGE_MIB_RX_FIFO_OVERRUNS]; 2671 stats->rx_runts += mib[VGE_MIB_RX_RUNTS]; 2672 stats->rx_runts_errs += mib[VGE_MIB_RX_RUNTS_ERRS]; 2673 stats->rx_pkts_64 += mib[VGE_MIB_RX_PKTS_64]; 2674 stats->rx_pkts_65_127 += mib[VGE_MIB_RX_PKTS_65_127]; 2675 stats->rx_pkts_128_255 += mib[VGE_MIB_RX_PKTS_128_255]; 2676 stats->rx_pkts_256_511 += mib[VGE_MIB_RX_PKTS_256_511]; 2677 stats->rx_pkts_512_1023 += mib[VGE_MIB_RX_PKTS_512_1023]; 2678 stats->rx_pkts_1024_1518 += mib[VGE_MIB_RX_PKTS_1024_1518]; 2679 stats->rx_pkts_1519_max += mib[VGE_MIB_RX_PKTS_1519_MAX]; 2680 stats->rx_pkts_1519_max_errs += mib[VGE_MIB_RX_PKTS_1519_MAX_ERRS]; 2681 stats->rx_jumbos += mib[VGE_MIB_RX_JUMBOS]; 2682 stats->rx_crcerrs += mib[VGE_MIB_RX_CRCERRS]; 2683 stats->rx_pause_frames += mib[VGE_MIB_RX_PAUSE]; 2684 stats->rx_alignerrs += mib[VGE_MIB_RX_ALIGNERRS]; 2685 stats->rx_nobufs += mib[VGE_MIB_RX_NOBUFS]; 2686 stats->rx_symerrs += mib[VGE_MIB_RX_SYMERRS]; 2687 stats->rx_lenerrs += mib[VGE_MIB_RX_LENERRS]; 2688 2689 /* Tx stats. */ 2690 stats->tx_good_frames += mib[VGE_MIB_TX_GOOD_FRAMES]; 2691 stats->tx_pkts_64 += mib[VGE_MIB_TX_PKTS_64]; 2692 stats->tx_pkts_65_127 += mib[VGE_MIB_TX_PKTS_65_127]; 2693 stats->tx_pkts_128_255 += mib[VGE_MIB_TX_PKTS_128_255]; 2694 stats->tx_pkts_256_511 += mib[VGE_MIB_TX_PKTS_256_511]; 2695 stats->tx_pkts_512_1023 += mib[VGE_MIB_TX_PKTS_512_1023]; 2696 stats->tx_pkts_1024_1518 += mib[VGE_MIB_TX_PKTS_1024_1518]; 2697 stats->tx_jumbos += mib[VGE_MIB_TX_JUMBOS]; 2698 stats->tx_colls += mib[VGE_MIB_TX_COLLS]; 2699 stats->tx_pause += mib[VGE_MIB_TX_PAUSE]; 2700 #ifdef VGE_ENABLE_SQEERR 2701 stats->tx_sqeerrs += mib[VGE_MIB_TX_SQEERRS]; 2702 #endif 2703 stats->tx_latecolls += mib[VGE_MIB_TX_LATECOLLS]; 2704 2705 /* Update counters in ifnet. */ 2706 if_inc_counter(ifp, IFCOUNTER_OPACKETS, mib[VGE_MIB_TX_GOOD_FRAMES]); 2707 2708 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 2709 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2710 2711 if_inc_counter(ifp, IFCOUNTER_OERRORS, 2712 mib[VGE_MIB_TX_COLLS] + mib[VGE_MIB_TX_LATECOLLS]); 2713 2714 if_inc_counter(ifp, IFCOUNTER_IPACKETS, mib[VGE_MIB_RX_GOOD_FRAMES]); 2715 2716 if_inc_counter(ifp, IFCOUNTER_IERRORS, 2717 mib[VGE_MIB_RX_FIFO_OVERRUNS] + 2718 mib[VGE_MIB_RX_RUNTS] + 2719 mib[VGE_MIB_RX_RUNTS_ERRS] + 2720 mib[VGE_MIB_RX_CRCERRS] + 2721 mib[VGE_MIB_RX_ALIGNERRS] + 2722 mib[VGE_MIB_RX_NOBUFS] + 2723 mib[VGE_MIB_RX_SYMERRS] + 2724 mib[VGE_MIB_RX_LENERRS]); 2725 } 2726 2727 static void 2728 vge_intr_holdoff(struct vge_softc *sc) 2729 { 2730 uint8_t intctl; 2731 2732 VGE_LOCK_ASSERT(sc); 2733 2734 /* 2735 * Set Tx interrupt supression threshold. 2736 * It's possible to use single-shot timer in VGE_CRS1 register 2737 * in Tx path such that driver can remove most of Tx completion 2738 * interrupts. However this requires additional access to 2739 * VGE_CRS1 register to reload the timer in addintion to 2740 * activating Tx kick command. Another downside is we don't know 2741 * what single-shot timer value should be used in advance so 2742 * reclaiming transmitted mbufs could be delayed a lot which in 2743 * turn slows down Tx operation. 2744 */ 2745 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_TXSUPPTHR); 2746 CSR_WRITE_1(sc, VGE_TXSUPPTHR, sc->vge_tx_coal_pkt); 2747 2748 /* Set Rx interrupt suppresion threshold. */ 2749 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); 2750 CSR_WRITE_1(sc, VGE_RXSUPPTHR, sc->vge_rx_coal_pkt); 2751 2752 intctl = CSR_READ_1(sc, VGE_INTCTL1); 2753 intctl &= ~VGE_INTCTL_SC_RELOAD; 2754 intctl |= VGE_INTCTL_HC_RELOAD; 2755 if (sc->vge_tx_coal_pkt <= 0) 2756 intctl |= VGE_INTCTL_TXINTSUP_DISABLE; 2757 else 2758 intctl &= ~VGE_INTCTL_TXINTSUP_DISABLE; 2759 if (sc->vge_rx_coal_pkt <= 0) 2760 intctl |= VGE_INTCTL_RXINTSUP_DISABLE; 2761 else 2762 intctl &= ~VGE_INTCTL_RXINTSUP_DISABLE; 2763 CSR_WRITE_1(sc, VGE_INTCTL1, intctl); 2764 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_HOLDOFF); 2765 if (sc->vge_int_holdoff > 0) { 2766 /* Set interrupt holdoff timer. */ 2767 CSR_WRITE_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); 2768 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 2769 VGE_INT_HOLDOFF_USEC(sc->vge_int_holdoff)); 2770 /* Enable holdoff timer. */ 2771 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); 2772 } 2773 } 2774 2775 static void 2776 vge_setlinkspeed(struct vge_softc *sc) 2777 { 2778 struct mii_data *mii; 2779 int aneg, i; 2780 2781 VGE_LOCK_ASSERT(sc); 2782 2783 mii = device_get_softc(sc->vge_miibus); 2784 mii_pollstat(mii); 2785 aneg = 0; 2786 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2787 (IFM_ACTIVE | IFM_AVALID)) { 2788 switch IFM_SUBTYPE(mii->mii_media_active) { 2789 case IFM_10_T: 2790 case IFM_100_TX: 2791 return; 2792 case IFM_1000_T: 2793 aneg++; 2794 default: 2795 break; 2796 } 2797 } 2798 /* Clear forced MAC speed/duplex configuration. */ 2799 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2800 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); 2801 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_100T2CR, 0); 2802 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_ANAR, 2803 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2804 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2805 BMCR_AUTOEN | BMCR_STARTNEG); 2806 DELAY(1000); 2807 if (aneg != 0) { 2808 /* Poll link state until vge(4) get a 10/100 link. */ 2809 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2810 mii_pollstat(mii); 2811 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2812 == (IFM_ACTIVE | IFM_AVALID)) { 2813 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2814 case IFM_10_T: 2815 case IFM_100_TX: 2816 return; 2817 default: 2818 break; 2819 } 2820 } 2821 VGE_UNLOCK(sc); 2822 pause("vgelnk", hz); 2823 VGE_LOCK(sc); 2824 } 2825 if (i == MII_ANEGTICKS_GIGE) 2826 device_printf(sc->vge_dev, "establishing link failed, " 2827 "WOL may not work!"); 2828 } 2829 /* 2830 * No link, force MAC to have 100Mbps, full-duplex link. 2831 * This is the last resort and may/may not work. 2832 */ 2833 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2834 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2835 } 2836 2837 static void 2838 vge_setwol(struct vge_softc *sc) 2839 { 2840 if_t ifp; 2841 uint16_t pmstat; 2842 uint8_t val; 2843 2844 VGE_LOCK_ASSERT(sc); 2845 2846 if ((sc->vge_flags & VGE_FLAG_PMCAP) == 0) { 2847 /* No PME capability, PHY power down. */ 2848 vge_miibus_writereg(sc->vge_dev, sc->vge_phyaddr, MII_BMCR, 2849 BMCR_PDOWN); 2850 vge_miipoll_stop(sc); 2851 return; 2852 } 2853 2854 ifp = sc->vge_ifp; 2855 2856 /* Clear WOL on pattern match. */ 2857 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2858 /* Disable WOL on magic/unicast packet. */ 2859 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2860 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2861 VGE_WOLCFG_PMEOVR); 2862 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) { 2863 vge_setlinkspeed(sc); 2864 val = 0; 2865 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0) 2866 val |= VGE_WOLCR1_UCAST; 2867 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) 2868 val |= VGE_WOLCR1_MAGIC; 2869 CSR_WRITE_1(sc, VGE_WOLCR1S, val); 2870 val = 0; 2871 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0) 2872 val |= VGE_WOLCFG_SAM | VGE_WOLCFG_SAB; 2873 CSR_WRITE_1(sc, VGE_WOLCFGS, val | VGE_WOLCFG_PMEOVR); 2874 /* Disable MII auto-polling. */ 2875 vge_miipoll_stop(sc); 2876 } 2877 CSR_SETBIT_1(sc, VGE_DIAGCTL, 2878 VGE_DIAGCTL_MACFORCE | VGE_DIAGCTL_FDXFORCE); 2879 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2880 2881 /* Clear WOL status on pattern match. */ 2882 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2883 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2884 2885 val = CSR_READ_1(sc, VGE_PWRSTAT); 2886 val |= VGE_STICKHW_SWPTAG; 2887 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2888 /* Put hardware into sleep. */ 2889 val = CSR_READ_1(sc, VGE_PWRSTAT); 2890 val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1; 2891 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2892 /* Request PME if WOL is requested. */ 2893 pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap + 2894 PCIR_POWER_STATUS, 2); 2895 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2896 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) 2897 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2898 pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS, 2899 pmstat, 2); 2900 } 2901 2902 static void 2903 vge_clrwol(struct vge_softc *sc) 2904 { 2905 uint8_t val; 2906 2907 val = CSR_READ_1(sc, VGE_PWRSTAT); 2908 val &= ~VGE_STICKHW_SWPTAG; 2909 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2910 /* Disable WOL and clear power state indicator. */ 2911 val = CSR_READ_1(sc, VGE_PWRSTAT); 2912 val &= ~(VGE_STICKHW_DS0 | VGE_STICKHW_DS1); 2913 CSR_WRITE_1(sc, VGE_PWRSTAT, val); 2914 2915 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_GMII); 2916 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); 2917 2918 /* Clear WOL on pattern match. */ 2919 CSR_WRITE_1(sc, VGE_WOLCR0C, VGE_WOLCR0_PATTERN_ALL); 2920 /* Disable WOL on magic/unicast packet. */ 2921 CSR_WRITE_1(sc, VGE_WOLCR1C, 0x0F); 2922 CSR_WRITE_1(sc, VGE_WOLCFGC, VGE_WOLCFG_SAB | VGE_WOLCFG_SAM | 2923 VGE_WOLCFG_PMEOVR); 2924 /* Clear WOL status on pattern match. */ 2925 CSR_WRITE_1(sc, VGE_WOLSR0C, 0xFF); 2926 CSR_WRITE_1(sc, VGE_WOLSR1C, 0xFF); 2927 } 2928