1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23 #include <sys/cdefs.h> 24 __FBSDID("$FreeBSD$"); 25 26 #ifdef HAVE_KERNEL_OPTION_HEADERS 27 #include "opt_device_polling.h" 28 #endif 29 30 #include <sys/param.h> 31 #include <sys/endian.h> 32 #include <sys/systm.h> 33 #include <sys/sockio.h> 34 #include <sys/mbuf.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #include <sys/queue.h> 39 #include <sys/socket.h> 40 #include <sys/sysctl.h> 41 #include <sys/taskqueue.h> 42 43 #include <net/if.h> 44 #include <net/if_arp.h> 45 #include <net/ethernet.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/if_types.h> 49 #include <net/if_vlan_var.h> 50 51 #include <net/bpf.h> 52 53 #include <machine/bus.h> 54 #include <machine/resource.h> 55 #include <sys/bus.h> 56 #include <sys/rman.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include <dev/pci/pcireg.h> 62 #include <dev/pci/pcivar.h> 63 64 #include <dev/nfe/if_nfereg.h> 65 #include <dev/nfe/if_nfevar.h> 66 67 MODULE_DEPEND(nfe, pci, 1, 1, 1); 68 MODULE_DEPEND(nfe, ether, 1, 1, 1); 69 MODULE_DEPEND(nfe, miibus, 1, 1, 1); 70 71 /* "device miibus" required. See GENERIC if you get errors here. */ 72 #include "miibus_if.h" 73 74 static int nfe_probe(device_t); 75 static int nfe_attach(device_t); 76 static int nfe_detach(device_t); 77 static int nfe_suspend(device_t); 78 static int nfe_resume(device_t); 79 static int nfe_shutdown(device_t); 80 static int nfe_can_use_msix(struct nfe_softc *); 81 static void nfe_power(struct nfe_softc *); 82 static int nfe_miibus_readreg(device_t, int, int); 83 static int nfe_miibus_writereg(device_t, int, int, int); 84 static void nfe_miibus_statchg(device_t); 85 static void nfe_mac_config(struct nfe_softc *, struct mii_data *); 86 static void nfe_set_intr(struct nfe_softc *); 87 static __inline void nfe_enable_intr(struct nfe_softc *); 88 static __inline void nfe_disable_intr(struct nfe_softc *); 89 static int nfe_ioctl(struct ifnet *, u_long, caddr_t); 90 static void nfe_alloc_msix(struct nfe_softc *, int); 91 static int nfe_intr(void *); 92 static void nfe_int_task(void *, int); 93 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); 94 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); 95 static int nfe_newbuf(struct nfe_softc *, int); 96 static int nfe_jnewbuf(struct nfe_softc *, int); 97 static int nfe_rxeof(struct nfe_softc *, int, int *); 98 static int nfe_jrxeof(struct nfe_softc *, int, int *); 99 static void nfe_txeof(struct nfe_softc *); 100 static int nfe_encap(struct nfe_softc *, struct mbuf **); 101 static void nfe_setmulti(struct nfe_softc *); 102 static void nfe_tx_task(void *, int); 103 static void nfe_start(struct ifnet *); 104 static void nfe_watchdog(struct ifnet *); 105 static void nfe_init(void *); 106 static void nfe_init_locked(void *); 107 static void nfe_stop(struct ifnet *); 108 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 109 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 110 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 111 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 112 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 113 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 114 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 115 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 116 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 117 static int nfe_ifmedia_upd(struct ifnet *); 118 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 119 static void nfe_tick(void *); 120 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 121 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); 122 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); 123 124 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 125 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); 126 static void nfe_sysctl_node(struct nfe_softc *); 127 static void nfe_stats_clear(struct nfe_softc *); 128 static void nfe_stats_update(struct nfe_softc *); 129 static void nfe_set_linkspeed(struct nfe_softc *); 130 static void nfe_set_wol(struct nfe_softc *); 131 132 #ifdef NFE_DEBUG 133 static int nfedebug = 0; 134 #define DPRINTF(sc, ...) do { \ 135 if (nfedebug) \ 136 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 137 } while (0) 138 #define DPRINTFN(sc, n, ...) do { \ 139 if (nfedebug >= (n)) \ 140 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 141 } while (0) 142 #else 143 #define DPRINTF(sc, ...) 144 #define DPRINTFN(sc, n, ...) 145 #endif 146 147 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 148 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 149 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 150 151 /* Tunables. */ 152 static int msi_disable = 0; 153 static int msix_disable = 0; 154 static int jumbo_disable = 0; 155 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); 156 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable); 157 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); 158 159 static device_method_t nfe_methods[] = { 160 /* Device interface */ 161 DEVMETHOD(device_probe, nfe_probe), 162 DEVMETHOD(device_attach, nfe_attach), 163 DEVMETHOD(device_detach, nfe_detach), 164 DEVMETHOD(device_suspend, nfe_suspend), 165 DEVMETHOD(device_resume, nfe_resume), 166 DEVMETHOD(device_shutdown, nfe_shutdown), 167 168 /* bus interface */ 169 DEVMETHOD(bus_print_child, bus_generic_print_child), 170 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 171 172 /* MII interface */ 173 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 174 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 175 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 176 177 { NULL, NULL } 178 }; 179 180 static driver_t nfe_driver = { 181 "nfe", 182 nfe_methods, 183 sizeof(struct nfe_softc) 184 }; 185 186 static devclass_t nfe_devclass; 187 188 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 189 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 190 191 static struct nfe_type nfe_devs[] = { 192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 193 "NVIDIA nForce MCP Networking Adapter"}, 194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 195 "NVIDIA nForce2 MCP2 Networking Adapter"}, 196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 197 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 199 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 201 "NVIDIA nForce3 MCP3 Networking Adapter"}, 202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 203 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 205 "NVIDIA nForce3 MCP7 Networking Adapter"}, 206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 207 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 209 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ 212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 213 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ 214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 215 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 217 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 219 "NVIDIA nForce MCP55 Networking Adapter"}, 220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 221 "NVIDIA nForce MCP55 Networking Adapter"}, 222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 223 "NVIDIA nForce MCP61 Networking Adapter"}, 224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 225 "NVIDIA nForce MCP61 Networking Adapter"}, 226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 227 "NVIDIA nForce MCP61 Networking Adapter"}, 228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 229 "NVIDIA nForce MCP61 Networking Adapter"}, 230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 231 "NVIDIA nForce MCP65 Networking Adapter"}, 232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 233 "NVIDIA nForce MCP65 Networking Adapter"}, 234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 235 "NVIDIA nForce MCP65 Networking Adapter"}, 236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 237 "NVIDIA nForce MCP65 Networking Adapter"}, 238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 239 "NVIDIA nForce MCP67 Networking Adapter"}, 240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 241 "NVIDIA nForce MCP67 Networking Adapter"}, 242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 243 "NVIDIA nForce MCP67 Networking Adapter"}, 244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 245 "NVIDIA nForce MCP67 Networking Adapter"}, 246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 247 "NVIDIA nForce MCP73 Networking Adapter"}, 248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 249 "NVIDIA nForce MCP73 Networking Adapter"}, 250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 251 "NVIDIA nForce MCP73 Networking Adapter"}, 252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 253 "NVIDIA nForce MCP73 Networking Adapter"}, 254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 255 "NVIDIA nForce MCP77 Networking Adapter"}, 256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 257 "NVIDIA nForce MCP77 Networking Adapter"}, 258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 259 "NVIDIA nForce MCP77 Networking Adapter"}, 260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 261 "NVIDIA nForce MCP77 Networking Adapter"}, 262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 263 "NVIDIA nForce MCP79 Networking Adapter"}, 264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 265 "NVIDIA nForce MCP79 Networking Adapter"}, 266 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 267 "NVIDIA nForce MCP79 Networking Adapter"}, 268 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 269 "NVIDIA nForce MCP79 Networking Adapter"}, 270 {0, 0, NULL} 271 }; 272 273 274 /* Probe for supported hardware ID's */ 275 static int 276 nfe_probe(device_t dev) 277 { 278 struct nfe_type *t; 279 280 t = nfe_devs; 281 /* Check for matching PCI DEVICE ID's */ 282 while (t->name != NULL) { 283 if ((pci_get_vendor(dev) == t->vid_id) && 284 (pci_get_device(dev) == t->dev_id)) { 285 device_set_desc(dev, t->name); 286 return (BUS_PROBE_DEFAULT); 287 } 288 t++; 289 } 290 291 return (ENXIO); 292 } 293 294 static void 295 nfe_alloc_msix(struct nfe_softc *sc, int count) 296 { 297 int rid; 298 299 rid = PCIR_BAR(2); 300 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, 301 &rid, RF_ACTIVE); 302 if (sc->nfe_msix_res == NULL) { 303 device_printf(sc->nfe_dev, 304 "couldn't allocate MSIX table resource\n"); 305 return; 306 } 307 rid = PCIR_BAR(3); 308 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, 309 SYS_RES_MEMORY, &rid, RF_ACTIVE); 310 if (sc->nfe_msix_pba_res == NULL) { 311 device_printf(sc->nfe_dev, 312 "couldn't allocate MSIX PBA resource\n"); 313 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), 314 sc->nfe_msix_res); 315 sc->nfe_msix_res = NULL; 316 return; 317 } 318 319 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { 320 if (count == NFE_MSI_MESSAGES) { 321 if (bootverbose) 322 device_printf(sc->nfe_dev, 323 "Using %d MSIX messages\n", count); 324 sc->nfe_msix = 1; 325 } else { 326 if (bootverbose) 327 device_printf(sc->nfe_dev, 328 "couldn't allocate MSIX\n"); 329 pci_release_msi(sc->nfe_dev); 330 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 331 PCIR_BAR(3), sc->nfe_msix_pba_res); 332 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 333 PCIR_BAR(2), sc->nfe_msix_res); 334 sc->nfe_msix_pba_res = NULL; 335 sc->nfe_msix_res = NULL; 336 } 337 } 338 } 339 340 static int 341 nfe_attach(device_t dev) 342 { 343 struct nfe_softc *sc; 344 struct ifnet *ifp; 345 bus_addr_t dma_addr_max; 346 int error = 0, i, msic, reg, rid; 347 348 sc = device_get_softc(dev); 349 sc->nfe_dev = dev; 350 351 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 352 MTX_DEF); 353 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 354 355 pci_enable_busmaster(dev); 356 357 rid = PCIR_BAR(0); 358 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 359 RF_ACTIVE); 360 if (sc->nfe_res[0] == NULL) { 361 device_printf(dev, "couldn't map memory resources\n"); 362 mtx_destroy(&sc->nfe_mtx); 363 return (ENXIO); 364 } 365 366 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 367 uint16_t v, width; 368 369 v = pci_read_config(dev, reg + 0x08, 2); 370 /* Change max. read request size to 4096. */ 371 v &= ~(7 << 12); 372 v |= (5 << 12); 373 pci_write_config(dev, reg + 0x08, v, 2); 374 375 v = pci_read_config(dev, reg + 0x0c, 2); 376 /* link capability */ 377 v = (v >> 4) & 0x0f; 378 width = pci_read_config(dev, reg + 0x12, 2); 379 /* negotiated link width */ 380 width = (width >> 4) & 0x3f; 381 if (v != width) 382 device_printf(sc->nfe_dev, 383 "warning, negotiated width of link(x%d) != " 384 "max. width of link(x%d)\n", width, v); 385 } 386 387 if (nfe_can_use_msix(sc) == 0) { 388 device_printf(sc->nfe_dev, 389 "MSI/MSI-X capability black-listed, will use INTx\n"); 390 msix_disable = 1; 391 msi_disable = 1; 392 } 393 394 /* Allocate interrupt */ 395 if (msix_disable == 0 || msi_disable == 0) { 396 if (msix_disable == 0 && 397 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) 398 nfe_alloc_msix(sc, msic); 399 if (msi_disable == 0 && sc->nfe_msix == 0 && 400 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && 401 pci_alloc_msi(dev, &msic) == 0) { 402 if (msic == NFE_MSI_MESSAGES) { 403 if (bootverbose) 404 device_printf(dev, 405 "Using %d MSI messages\n", msic); 406 sc->nfe_msi = 1; 407 } else 408 pci_release_msi(dev); 409 } 410 } 411 412 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { 413 rid = 0; 414 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 415 RF_SHAREABLE | RF_ACTIVE); 416 if (sc->nfe_irq[0] == NULL) { 417 device_printf(dev, "couldn't allocate IRQ resources\n"); 418 error = ENXIO; 419 goto fail; 420 } 421 } else { 422 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 423 sc->nfe_irq[i] = bus_alloc_resource_any(dev, 424 SYS_RES_IRQ, &rid, RF_ACTIVE); 425 if (sc->nfe_irq[i] == NULL) { 426 device_printf(dev, 427 "couldn't allocate IRQ resources for " 428 "message %d\n", rid); 429 error = ENXIO; 430 goto fail; 431 } 432 } 433 /* Map interrupts to vector 0. */ 434 if (sc->nfe_msix != 0) { 435 NFE_WRITE(sc, NFE_MSIX_MAP0, 0); 436 NFE_WRITE(sc, NFE_MSIX_MAP1, 0); 437 } else if (sc->nfe_msi != 0) { 438 NFE_WRITE(sc, NFE_MSI_MAP0, 0); 439 NFE_WRITE(sc, NFE_MSI_MAP1, 0); 440 } 441 } 442 443 /* Set IRQ status/mask register. */ 444 sc->nfe_irq_status = NFE_IRQ_STATUS; 445 sc->nfe_irq_mask = NFE_IRQ_MASK; 446 sc->nfe_intrs = NFE_IRQ_WANTED; 447 sc->nfe_nointrs = 0; 448 if (sc->nfe_msix != 0) { 449 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; 450 sc->nfe_nointrs = NFE_IRQ_WANTED; 451 } else if (sc->nfe_msi != 0) { 452 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; 453 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; 454 } 455 456 sc->nfe_devid = pci_get_device(dev); 457 sc->nfe_revid = pci_get_revid(dev); 458 sc->nfe_flags = 0; 459 460 switch (sc->nfe_devid) { 461 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 462 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 463 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 464 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 465 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 466 break; 467 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 468 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 469 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1; 470 break; 471 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 472 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 473 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 474 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 475 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 476 NFE_MIB_V1; 477 break; 478 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 479 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 480 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 481 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 482 break; 483 484 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 485 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 486 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 487 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 488 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 489 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 490 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 491 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 492 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 493 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 494 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 495 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 496 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | 497 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 498 break; 499 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 500 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 501 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 502 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 503 /* XXX flow control */ 504 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | 505 NFE_CORRECT_MACADDR | NFE_MIB_V3; 506 break; 507 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 508 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 509 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 510 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 511 /* XXX flow control */ 512 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 513 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; 514 break; 515 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 516 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 517 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 518 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 519 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 520 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | 521 NFE_MIB_V2; 522 break; 523 } 524 525 nfe_power(sc); 526 /* Check for reversed ethernet address */ 527 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 528 sc->nfe_flags |= NFE_CORRECT_MACADDR; 529 nfe_get_macaddr(sc, sc->eaddr); 530 /* 531 * Allocate the parent bus DMA tag appropriate for PCI. 532 */ 533 dma_addr_max = BUS_SPACE_MAXADDR_32BIT; 534 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) 535 dma_addr_max = NFE_DMA_MAXADDR; 536 error = bus_dma_tag_create( 537 bus_get_dma_tag(sc->nfe_dev), /* parent */ 538 1, 0, /* alignment, boundary */ 539 dma_addr_max, /* lowaddr */ 540 BUS_SPACE_MAXADDR, /* highaddr */ 541 NULL, NULL, /* filter, filterarg */ 542 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 543 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 544 0, /* flags */ 545 NULL, NULL, /* lockfunc, lockarg */ 546 &sc->nfe_parent_tag); 547 if (error) 548 goto fail; 549 550 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); 551 if (ifp == NULL) { 552 device_printf(dev, "can not if_alloc()\n"); 553 error = ENOSPC; 554 goto fail; 555 } 556 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp); 557 558 /* 559 * Allocate Tx and Rx rings. 560 */ 561 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) 562 goto fail; 563 564 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) 565 goto fail; 566 567 nfe_alloc_jrx_ring(sc, &sc->jrxq); 568 /* Create sysctl node. */ 569 nfe_sysctl_node(sc); 570 571 ifp->if_softc = sc; 572 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 573 ifp->if_mtu = ETHERMTU; 574 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 575 ifp->if_ioctl = nfe_ioctl; 576 ifp->if_start = nfe_start; 577 ifp->if_hwassist = 0; 578 ifp->if_capabilities = 0; 579 ifp->if_init = nfe_init; 580 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1); 581 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1; 582 IFQ_SET_READY(&ifp->if_snd); 583 584 if (sc->nfe_flags & NFE_HW_CSUM) { 585 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; 586 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO; 587 } 588 ifp->if_capenable = ifp->if_capabilities; 589 590 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 591 /* VLAN capability setup. */ 592 ifp->if_capabilities |= IFCAP_VLAN_MTU; 593 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { 594 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 595 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0) 596 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | 597 IFCAP_VLAN_HWTSO; 598 } 599 600 if (pci_find_extcap(dev, PCIY_PMG, ®) == 0) 601 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 602 ifp->if_capenable = ifp->if_capabilities; 603 604 /* 605 * Tell the upper layer(s) we support long frames. 606 * Must appear after the call to ether_ifattach() because 607 * ether_ifattach() sets ifi_hdrlen to the default value. 608 */ 609 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 610 611 #ifdef DEVICE_POLLING 612 ifp->if_capabilities |= IFCAP_POLLING; 613 #endif 614 615 /* Do MII setup */ 616 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd, 617 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 618 MIIF_DOPAUSE); 619 if (error != 0) { 620 device_printf(dev, "attaching PHYs failed\n"); 621 goto fail; 622 } 623 ether_ifattach(ifp, sc->eaddr); 624 625 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); 626 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, 627 taskqueue_thread_enqueue, &sc->nfe_tq); 628 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", 629 device_get_nameunit(sc->nfe_dev)); 630 error = 0; 631 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 632 error = bus_setup_intr(dev, sc->nfe_irq[0], 633 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 634 &sc->nfe_intrhand[0]); 635 } else { 636 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 637 error = bus_setup_intr(dev, sc->nfe_irq[i], 638 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 639 &sc->nfe_intrhand[i]); 640 if (error != 0) 641 break; 642 } 643 } 644 if (error) { 645 device_printf(dev, "couldn't set up irq\n"); 646 taskqueue_free(sc->nfe_tq); 647 sc->nfe_tq = NULL; 648 ether_ifdetach(ifp); 649 goto fail; 650 } 651 652 fail: 653 if (error) 654 nfe_detach(dev); 655 656 return (error); 657 } 658 659 660 static int 661 nfe_detach(device_t dev) 662 { 663 struct nfe_softc *sc; 664 struct ifnet *ifp; 665 uint8_t eaddr[ETHER_ADDR_LEN]; 666 int i, rid; 667 668 sc = device_get_softc(dev); 669 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 670 ifp = sc->nfe_ifp; 671 672 #ifdef DEVICE_POLLING 673 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 674 ether_poll_deregister(ifp); 675 #endif 676 if (device_is_attached(dev)) { 677 NFE_LOCK(sc); 678 nfe_stop(ifp); 679 ifp->if_flags &= ~IFF_UP; 680 NFE_UNLOCK(sc); 681 callout_drain(&sc->nfe_stat_ch); 682 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task); 683 ether_ifdetach(ifp); 684 } 685 686 if (ifp) { 687 /* restore ethernet address */ 688 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 689 for (i = 0; i < ETHER_ADDR_LEN; i++) { 690 eaddr[i] = sc->eaddr[5 - i]; 691 } 692 } else 693 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); 694 nfe_set_macaddr(sc, eaddr); 695 if_free(ifp); 696 } 697 if (sc->nfe_miibus) 698 device_delete_child(dev, sc->nfe_miibus); 699 bus_generic_detach(dev); 700 if (sc->nfe_tq != NULL) { 701 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); 702 taskqueue_free(sc->nfe_tq); 703 sc->nfe_tq = NULL; 704 } 705 706 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 707 if (sc->nfe_intrhand[i] != NULL) { 708 bus_teardown_intr(dev, sc->nfe_irq[i], 709 sc->nfe_intrhand[i]); 710 sc->nfe_intrhand[i] = NULL; 711 } 712 } 713 714 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 715 if (sc->nfe_irq[0] != NULL) 716 bus_release_resource(dev, SYS_RES_IRQ, 0, 717 sc->nfe_irq[0]); 718 } else { 719 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 720 if (sc->nfe_irq[i] != NULL) { 721 bus_release_resource(dev, SYS_RES_IRQ, rid, 722 sc->nfe_irq[i]); 723 sc->nfe_irq[i] = NULL; 724 } 725 } 726 pci_release_msi(dev); 727 } 728 if (sc->nfe_msix_pba_res != NULL) { 729 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), 730 sc->nfe_msix_pba_res); 731 sc->nfe_msix_pba_res = NULL; 732 } 733 if (sc->nfe_msix_res != NULL) { 734 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 735 sc->nfe_msix_res); 736 sc->nfe_msix_res = NULL; 737 } 738 if (sc->nfe_res[0] != NULL) { 739 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 740 sc->nfe_res[0]); 741 sc->nfe_res[0] = NULL; 742 } 743 744 nfe_free_tx_ring(sc, &sc->txq); 745 nfe_free_rx_ring(sc, &sc->rxq); 746 nfe_free_jrx_ring(sc, &sc->jrxq); 747 748 if (sc->nfe_parent_tag) { 749 bus_dma_tag_destroy(sc->nfe_parent_tag); 750 sc->nfe_parent_tag = NULL; 751 } 752 753 mtx_destroy(&sc->nfe_mtx); 754 755 return (0); 756 } 757 758 759 static int 760 nfe_suspend(device_t dev) 761 { 762 struct nfe_softc *sc; 763 764 sc = device_get_softc(dev); 765 766 NFE_LOCK(sc); 767 nfe_stop(sc->nfe_ifp); 768 nfe_set_wol(sc); 769 sc->nfe_suspended = 1; 770 NFE_UNLOCK(sc); 771 772 return (0); 773 } 774 775 776 static int 777 nfe_resume(device_t dev) 778 { 779 struct nfe_softc *sc; 780 struct ifnet *ifp; 781 782 sc = device_get_softc(dev); 783 784 NFE_LOCK(sc); 785 nfe_power(sc); 786 ifp = sc->nfe_ifp; 787 if (ifp->if_flags & IFF_UP) 788 nfe_init_locked(sc); 789 sc->nfe_suspended = 0; 790 NFE_UNLOCK(sc); 791 792 return (0); 793 } 794 795 796 static int 797 nfe_can_use_msix(struct nfe_softc *sc) 798 { 799 static struct msix_blacklist { 800 char *maker; 801 char *product; 802 } msix_blacklists[] = { 803 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" } 804 }; 805 806 struct msix_blacklist *mblp; 807 char *maker, *product; 808 int count, n, use_msix; 809 810 /* 811 * Search base board manufacturer and product name table 812 * to see this system has a known MSI/MSI-X issue. 813 */ 814 maker = getenv("smbios.planar.maker"); 815 product = getenv("smbios.planar.product"); 816 use_msix = 1; 817 if (maker != NULL && product != NULL) { 818 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]); 819 mblp = msix_blacklists; 820 for (n = 0; n < count; n++) { 821 if (strcmp(maker, mblp->maker) == 0 && 822 strcmp(product, mblp->product) == 0) { 823 use_msix = 0; 824 break; 825 } 826 mblp++; 827 } 828 } 829 if (maker != NULL) 830 freeenv(maker); 831 if (product != NULL) 832 freeenv(product); 833 834 return (use_msix); 835 } 836 837 838 /* Take PHY/NIC out of powerdown, from Linux */ 839 static void 840 nfe_power(struct nfe_softc *sc) 841 { 842 uint32_t pwr; 843 844 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) 845 return; 846 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 847 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 848 DELAY(100); 849 NFE_WRITE(sc, NFE_MAC_RESET, 0); 850 DELAY(100); 851 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 852 pwr = NFE_READ(sc, NFE_PWR2_CTL); 853 pwr &= ~NFE_PWR2_WAKEUP_MASK; 854 if (sc->nfe_revid >= 0xa3 && 855 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || 856 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) 857 pwr |= NFE_PWR2_REVA3; 858 NFE_WRITE(sc, NFE_PWR2_CTL, pwr); 859 } 860 861 862 static void 863 nfe_miibus_statchg(device_t dev) 864 { 865 struct nfe_softc *sc; 866 struct mii_data *mii; 867 struct ifnet *ifp; 868 uint32_t rxctl, txctl; 869 870 sc = device_get_softc(dev); 871 872 mii = device_get_softc(sc->nfe_miibus); 873 ifp = sc->nfe_ifp; 874 875 sc->nfe_link = 0; 876 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 877 (IFM_ACTIVE | IFM_AVALID)) { 878 switch (IFM_SUBTYPE(mii->mii_media_active)) { 879 case IFM_10_T: 880 case IFM_100_TX: 881 case IFM_1000_T: 882 sc->nfe_link = 1; 883 break; 884 default: 885 break; 886 } 887 } 888 889 nfe_mac_config(sc, mii); 890 txctl = NFE_READ(sc, NFE_TX_CTL); 891 rxctl = NFE_READ(sc, NFE_RX_CTL); 892 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 893 txctl |= NFE_TX_START; 894 rxctl |= NFE_RX_START; 895 } else { 896 txctl &= ~NFE_TX_START; 897 rxctl &= ~NFE_RX_START; 898 } 899 NFE_WRITE(sc, NFE_TX_CTL, txctl); 900 NFE_WRITE(sc, NFE_RX_CTL, rxctl); 901 } 902 903 904 static void 905 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii) 906 { 907 uint32_t link, misc, phy, seed; 908 uint32_t val; 909 910 NFE_LOCK_ASSERT(sc); 911 912 phy = NFE_READ(sc, NFE_PHY_IFACE); 913 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 914 915 seed = NFE_READ(sc, NFE_RNDSEED); 916 seed &= ~NFE_SEED_MASK; 917 918 misc = NFE_MISC1_MAGIC; 919 link = NFE_MEDIA_SET; 920 921 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) { 922 phy |= NFE_PHY_HDX; /* half-duplex */ 923 misc |= NFE_MISC1_HDX; 924 } 925 926 switch (IFM_SUBTYPE(mii->mii_media_active)) { 927 case IFM_1000_T: /* full-duplex only */ 928 link |= NFE_MEDIA_1000T; 929 seed |= NFE_SEED_1000T; 930 phy |= NFE_PHY_1000T; 931 break; 932 case IFM_100_TX: 933 link |= NFE_MEDIA_100TX; 934 seed |= NFE_SEED_100TX; 935 phy |= NFE_PHY_100TX; 936 break; 937 case IFM_10_T: 938 link |= NFE_MEDIA_10T; 939 seed |= NFE_SEED_10T; 940 break; 941 } 942 943 if ((phy & 0x10000000) != 0) { 944 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 945 val = NFE_R1_MAGIC_1000; 946 else 947 val = NFE_R1_MAGIC_10_100; 948 } else 949 val = NFE_R1_MAGIC_DEFAULT; 950 NFE_WRITE(sc, NFE_SETUP_R1, val); 951 952 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 953 954 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 955 NFE_WRITE(sc, NFE_MISC1, misc); 956 NFE_WRITE(sc, NFE_LINKSPEED, link); 957 958 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 959 /* It seems all hardwares supports Rx pause frames. */ 960 val = NFE_READ(sc, NFE_RXFILTER); 961 if ((IFM_OPTIONS(mii->mii_media_active) & 962 IFM_ETH_RXPAUSE) != 0) 963 val |= NFE_PFF_RX_PAUSE; 964 else 965 val &= ~NFE_PFF_RX_PAUSE; 966 NFE_WRITE(sc, NFE_RXFILTER, val); 967 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 968 val = NFE_READ(sc, NFE_MISC1); 969 if ((IFM_OPTIONS(mii->mii_media_active) & 970 IFM_ETH_TXPAUSE) != 0) { 971 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 972 NFE_TX_PAUSE_FRAME_ENABLE); 973 val |= NFE_MISC1_TX_PAUSE; 974 } else { 975 val &= ~NFE_MISC1_TX_PAUSE; 976 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 977 NFE_TX_PAUSE_FRAME_DISABLE); 978 } 979 NFE_WRITE(sc, NFE_MISC1, val); 980 } 981 } else { 982 /* disable rx/tx pause frames */ 983 val = NFE_READ(sc, NFE_RXFILTER); 984 val &= ~NFE_PFF_RX_PAUSE; 985 NFE_WRITE(sc, NFE_RXFILTER, val); 986 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 987 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 988 NFE_TX_PAUSE_FRAME_DISABLE); 989 val = NFE_READ(sc, NFE_MISC1); 990 val &= ~NFE_MISC1_TX_PAUSE; 991 NFE_WRITE(sc, NFE_MISC1, val); 992 } 993 } 994 } 995 996 997 static int 998 nfe_miibus_readreg(device_t dev, int phy, int reg) 999 { 1000 struct nfe_softc *sc = device_get_softc(dev); 1001 uint32_t val; 1002 int ntries; 1003 1004 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1005 1006 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 1007 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 1008 DELAY(100); 1009 } 1010 1011 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 1012 1013 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 1014 DELAY(100); 1015 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 1016 break; 1017 } 1018 if (ntries == NFE_TIMEOUT) { 1019 DPRINTFN(sc, 2, "timeout waiting for PHY\n"); 1020 return 0; 1021 } 1022 1023 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 1024 DPRINTFN(sc, 2, "could not read PHY\n"); 1025 return 0; 1026 } 1027 1028 val = NFE_READ(sc, NFE_PHY_DATA); 1029 if (val != 0xffffffff && val != 0) 1030 sc->mii_phyaddr = phy; 1031 1032 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 1033 1034 return (val); 1035 } 1036 1037 1038 static int 1039 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 1040 { 1041 struct nfe_softc *sc = device_get_softc(dev); 1042 uint32_t ctl; 1043 int ntries; 1044 1045 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1046 1047 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 1048 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 1049 DELAY(100); 1050 } 1051 1052 NFE_WRITE(sc, NFE_PHY_DATA, val); 1053 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 1054 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 1055 1056 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 1057 DELAY(100); 1058 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 1059 break; 1060 } 1061 #ifdef NFE_DEBUG 1062 if (nfedebug >= 2 && ntries == NFE_TIMEOUT) 1063 device_printf(sc->nfe_dev, "could not write to PHY\n"); 1064 #endif 1065 return (0); 1066 } 1067 1068 struct nfe_dmamap_arg { 1069 bus_addr_t nfe_busaddr; 1070 }; 1071 1072 static int 1073 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1074 { 1075 struct nfe_dmamap_arg ctx; 1076 struct nfe_rx_data *data; 1077 void *desc; 1078 int i, error, descsize; 1079 1080 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1081 desc = ring->desc64; 1082 descsize = sizeof (struct nfe_desc64); 1083 } else { 1084 desc = ring->desc32; 1085 descsize = sizeof (struct nfe_desc32); 1086 } 1087 1088 ring->cur = ring->next = 0; 1089 1090 error = bus_dma_tag_create(sc->nfe_parent_tag, 1091 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1092 BUS_SPACE_MAXADDR, /* lowaddr */ 1093 BUS_SPACE_MAXADDR, /* highaddr */ 1094 NULL, NULL, /* filter, filterarg */ 1095 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1096 NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 1097 0, /* flags */ 1098 NULL, NULL, /* lockfunc, lockarg */ 1099 &ring->rx_desc_tag); 1100 if (error != 0) { 1101 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1102 goto fail; 1103 } 1104 1105 /* allocate memory to desc */ 1106 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | 1107 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); 1108 if (error != 0) { 1109 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1110 goto fail; 1111 } 1112 if (sc->nfe_flags & NFE_40BIT_ADDR) 1113 ring->desc64 = desc; 1114 else 1115 ring->desc32 = desc; 1116 1117 /* map desc to device visible address space */ 1118 ctx.nfe_busaddr = 0; 1119 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, 1120 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1121 if (error != 0) { 1122 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1123 goto fail; 1124 } 1125 ring->physaddr = ctx.nfe_busaddr; 1126 1127 error = bus_dma_tag_create(sc->nfe_parent_tag, 1128 1, 0, /* alignment, boundary */ 1129 BUS_SPACE_MAXADDR, /* lowaddr */ 1130 BUS_SPACE_MAXADDR, /* highaddr */ 1131 NULL, NULL, /* filter, filterarg */ 1132 MCLBYTES, 1, /* maxsize, nsegments */ 1133 MCLBYTES, /* maxsegsize */ 1134 0, /* flags */ 1135 NULL, NULL, /* lockfunc, lockarg */ 1136 &ring->rx_data_tag); 1137 if (error != 0) { 1138 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); 1139 goto fail; 1140 } 1141 1142 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); 1143 if (error != 0) { 1144 device_printf(sc->nfe_dev, 1145 "could not create Rx DMA spare map\n"); 1146 goto fail; 1147 } 1148 1149 /* 1150 * Pre-allocate Rx buffers and populate Rx ring. 1151 */ 1152 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1153 data = &sc->rxq.data[i]; 1154 data->rx_data_map = NULL; 1155 data->m = NULL; 1156 error = bus_dmamap_create(ring->rx_data_tag, 0, 1157 &data->rx_data_map); 1158 if (error != 0) { 1159 device_printf(sc->nfe_dev, 1160 "could not create Rx DMA map\n"); 1161 goto fail; 1162 } 1163 } 1164 1165 fail: 1166 return (error); 1167 } 1168 1169 1170 static void 1171 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1172 { 1173 struct nfe_dmamap_arg ctx; 1174 struct nfe_rx_data *data; 1175 void *desc; 1176 int i, error, descsize; 1177 1178 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1179 return; 1180 if (jumbo_disable != 0) { 1181 device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); 1182 sc->nfe_jumbo_disable = 1; 1183 return; 1184 } 1185 1186 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1187 desc = ring->jdesc64; 1188 descsize = sizeof (struct nfe_desc64); 1189 } else { 1190 desc = ring->jdesc32; 1191 descsize = sizeof (struct nfe_desc32); 1192 } 1193 1194 ring->jcur = ring->jnext = 0; 1195 1196 /* Create DMA tag for jumbo Rx ring. */ 1197 error = bus_dma_tag_create(sc->nfe_parent_tag, 1198 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1199 BUS_SPACE_MAXADDR, /* lowaddr */ 1200 BUS_SPACE_MAXADDR, /* highaddr */ 1201 NULL, NULL, /* filter, filterarg */ 1202 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1203 1, /* nsegments */ 1204 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 1205 0, /* flags */ 1206 NULL, NULL, /* lockfunc, lockarg */ 1207 &ring->jrx_desc_tag); 1208 if (error != 0) { 1209 device_printf(sc->nfe_dev, 1210 "could not create jumbo ring DMA tag\n"); 1211 goto fail; 1212 } 1213 1214 /* Create DMA tag for jumbo Rx buffers. */ 1215 error = bus_dma_tag_create(sc->nfe_parent_tag, 1216 1, 0, /* alignment, boundary */ 1217 BUS_SPACE_MAXADDR, /* lowaddr */ 1218 BUS_SPACE_MAXADDR, /* highaddr */ 1219 NULL, NULL, /* filter, filterarg */ 1220 MJUM9BYTES, /* maxsize */ 1221 1, /* nsegments */ 1222 MJUM9BYTES, /* maxsegsize */ 1223 0, /* flags */ 1224 NULL, NULL, /* lockfunc, lockarg */ 1225 &ring->jrx_data_tag); 1226 if (error != 0) { 1227 device_printf(sc->nfe_dev, 1228 "could not create jumbo Rx buffer DMA tag\n"); 1229 goto fail; 1230 } 1231 1232 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1233 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | 1234 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); 1235 if (error != 0) { 1236 device_printf(sc->nfe_dev, 1237 "could not allocate DMA'able memory for jumbo Rx ring\n"); 1238 goto fail; 1239 } 1240 if (sc->nfe_flags & NFE_40BIT_ADDR) 1241 ring->jdesc64 = desc; 1242 else 1243 ring->jdesc32 = desc; 1244 1245 ctx.nfe_busaddr = 0; 1246 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, 1247 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1248 if (error != 0) { 1249 device_printf(sc->nfe_dev, 1250 "could not load DMA'able memory for jumbo Rx ring\n"); 1251 goto fail; 1252 } 1253 ring->jphysaddr = ctx.nfe_busaddr; 1254 1255 /* Create DMA maps for jumbo Rx buffers. */ 1256 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); 1257 if (error != 0) { 1258 device_printf(sc->nfe_dev, 1259 "could not create jumbo Rx DMA spare map\n"); 1260 goto fail; 1261 } 1262 1263 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1264 data = &sc->jrxq.jdata[i]; 1265 data->rx_data_map = NULL; 1266 data->m = NULL; 1267 error = bus_dmamap_create(ring->jrx_data_tag, 0, 1268 &data->rx_data_map); 1269 if (error != 0) { 1270 device_printf(sc->nfe_dev, 1271 "could not create jumbo Rx DMA map\n"); 1272 goto fail; 1273 } 1274 } 1275 1276 return; 1277 1278 fail: 1279 /* 1280 * Running without jumbo frame support is ok for most cases 1281 * so don't fail on creating dma tag/map for jumbo frame. 1282 */ 1283 nfe_free_jrx_ring(sc, ring); 1284 device_printf(sc->nfe_dev, "disabling jumbo frame support due to " 1285 "resource shortage\n"); 1286 sc->nfe_jumbo_disable = 1; 1287 } 1288 1289 1290 static int 1291 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1292 { 1293 void *desc; 1294 size_t descsize; 1295 int i; 1296 1297 ring->cur = ring->next = 0; 1298 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1299 desc = ring->desc64; 1300 descsize = sizeof (struct nfe_desc64); 1301 } else { 1302 desc = ring->desc32; 1303 descsize = sizeof (struct nfe_desc32); 1304 } 1305 bzero(desc, descsize * NFE_RX_RING_COUNT); 1306 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1307 if (nfe_newbuf(sc, i) != 0) 1308 return (ENOBUFS); 1309 } 1310 1311 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 1312 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1313 1314 return (0); 1315 } 1316 1317 1318 static int 1319 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1320 { 1321 void *desc; 1322 size_t descsize; 1323 int i; 1324 1325 ring->jcur = ring->jnext = 0; 1326 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1327 desc = ring->jdesc64; 1328 descsize = sizeof (struct nfe_desc64); 1329 } else { 1330 desc = ring->jdesc32; 1331 descsize = sizeof (struct nfe_desc32); 1332 } 1333 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); 1334 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1335 if (nfe_jnewbuf(sc, i) != 0) 1336 return (ENOBUFS); 1337 } 1338 1339 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, 1340 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1341 1342 return (0); 1343 } 1344 1345 1346 static void 1347 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1348 { 1349 struct nfe_rx_data *data; 1350 void *desc; 1351 int i, descsize; 1352 1353 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1354 desc = ring->desc64; 1355 descsize = sizeof (struct nfe_desc64); 1356 } else { 1357 desc = ring->desc32; 1358 descsize = sizeof (struct nfe_desc32); 1359 } 1360 1361 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1362 data = &ring->data[i]; 1363 if (data->rx_data_map != NULL) { 1364 bus_dmamap_destroy(ring->rx_data_tag, 1365 data->rx_data_map); 1366 data->rx_data_map = NULL; 1367 } 1368 if (data->m != NULL) { 1369 m_freem(data->m); 1370 data->m = NULL; 1371 } 1372 } 1373 if (ring->rx_data_tag != NULL) { 1374 if (ring->rx_spare_map != NULL) { 1375 bus_dmamap_destroy(ring->rx_data_tag, 1376 ring->rx_spare_map); 1377 ring->rx_spare_map = NULL; 1378 } 1379 bus_dma_tag_destroy(ring->rx_data_tag); 1380 ring->rx_data_tag = NULL; 1381 } 1382 1383 if (desc != NULL) { 1384 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 1385 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 1386 ring->desc64 = NULL; 1387 ring->desc32 = NULL; 1388 ring->rx_desc_map = NULL; 1389 } 1390 if (ring->rx_desc_tag != NULL) { 1391 bus_dma_tag_destroy(ring->rx_desc_tag); 1392 ring->rx_desc_tag = NULL; 1393 } 1394 } 1395 1396 1397 static void 1398 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1399 { 1400 struct nfe_rx_data *data; 1401 void *desc; 1402 int i, descsize; 1403 1404 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1405 return; 1406 1407 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1408 desc = ring->jdesc64; 1409 descsize = sizeof (struct nfe_desc64); 1410 } else { 1411 desc = ring->jdesc32; 1412 descsize = sizeof (struct nfe_desc32); 1413 } 1414 1415 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1416 data = &ring->jdata[i]; 1417 if (data->rx_data_map != NULL) { 1418 bus_dmamap_destroy(ring->jrx_data_tag, 1419 data->rx_data_map); 1420 data->rx_data_map = NULL; 1421 } 1422 if (data->m != NULL) { 1423 m_freem(data->m); 1424 data->m = NULL; 1425 } 1426 } 1427 if (ring->jrx_data_tag != NULL) { 1428 if (ring->jrx_spare_map != NULL) { 1429 bus_dmamap_destroy(ring->jrx_data_tag, 1430 ring->jrx_spare_map); 1431 ring->jrx_spare_map = NULL; 1432 } 1433 bus_dma_tag_destroy(ring->jrx_data_tag); 1434 ring->jrx_data_tag = NULL; 1435 } 1436 1437 if (desc != NULL) { 1438 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); 1439 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); 1440 ring->jdesc64 = NULL; 1441 ring->jdesc32 = NULL; 1442 ring->jrx_desc_map = NULL; 1443 } 1444 1445 if (ring->jrx_desc_tag != NULL) { 1446 bus_dma_tag_destroy(ring->jrx_desc_tag); 1447 ring->jrx_desc_tag = NULL; 1448 } 1449 } 1450 1451 1452 static int 1453 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1454 { 1455 struct nfe_dmamap_arg ctx; 1456 int i, error; 1457 void *desc; 1458 int descsize; 1459 1460 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1461 desc = ring->desc64; 1462 descsize = sizeof (struct nfe_desc64); 1463 } else { 1464 desc = ring->desc32; 1465 descsize = sizeof (struct nfe_desc32); 1466 } 1467 1468 ring->queued = 0; 1469 ring->cur = ring->next = 0; 1470 1471 error = bus_dma_tag_create(sc->nfe_parent_tag, 1472 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1473 BUS_SPACE_MAXADDR, /* lowaddr */ 1474 BUS_SPACE_MAXADDR, /* highaddr */ 1475 NULL, NULL, /* filter, filterarg */ 1476 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1477 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 1478 0, /* flags */ 1479 NULL, NULL, /* lockfunc, lockarg */ 1480 &ring->tx_desc_tag); 1481 if (error != 0) { 1482 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1483 goto fail; 1484 } 1485 1486 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | 1487 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); 1488 if (error != 0) { 1489 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1490 goto fail; 1491 } 1492 if (sc->nfe_flags & NFE_40BIT_ADDR) 1493 ring->desc64 = desc; 1494 else 1495 ring->desc32 = desc; 1496 1497 ctx.nfe_busaddr = 0; 1498 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, 1499 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1500 if (error != 0) { 1501 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1502 goto fail; 1503 } 1504 ring->physaddr = ctx.nfe_busaddr; 1505 1506 error = bus_dma_tag_create(sc->nfe_parent_tag, 1507 1, 0, 1508 BUS_SPACE_MAXADDR, 1509 BUS_SPACE_MAXADDR, 1510 NULL, NULL, 1511 NFE_TSO_MAXSIZE, 1512 NFE_MAX_SCATTER, 1513 NFE_TSO_MAXSGSIZE, 1514 0, 1515 NULL, NULL, 1516 &ring->tx_data_tag); 1517 if (error != 0) { 1518 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); 1519 goto fail; 1520 } 1521 1522 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1523 error = bus_dmamap_create(ring->tx_data_tag, 0, 1524 &ring->data[i].tx_data_map); 1525 if (error != 0) { 1526 device_printf(sc->nfe_dev, 1527 "could not create Tx DMA map\n"); 1528 goto fail; 1529 } 1530 } 1531 1532 fail: 1533 return (error); 1534 } 1535 1536 1537 static void 1538 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1539 { 1540 void *desc; 1541 size_t descsize; 1542 1543 sc->nfe_force_tx = 0; 1544 ring->queued = 0; 1545 ring->cur = ring->next = 0; 1546 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1547 desc = ring->desc64; 1548 descsize = sizeof (struct nfe_desc64); 1549 } else { 1550 desc = ring->desc32; 1551 descsize = sizeof (struct nfe_desc32); 1552 } 1553 bzero(desc, descsize * NFE_TX_RING_COUNT); 1554 1555 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1556 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1557 } 1558 1559 1560 static void 1561 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1562 { 1563 struct nfe_tx_data *data; 1564 void *desc; 1565 int i, descsize; 1566 1567 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1568 desc = ring->desc64; 1569 descsize = sizeof (struct nfe_desc64); 1570 } else { 1571 desc = ring->desc32; 1572 descsize = sizeof (struct nfe_desc32); 1573 } 1574 1575 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1576 data = &ring->data[i]; 1577 1578 if (data->m != NULL) { 1579 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, 1580 BUS_DMASYNC_POSTWRITE); 1581 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); 1582 m_freem(data->m); 1583 data->m = NULL; 1584 } 1585 if (data->tx_data_map != NULL) { 1586 bus_dmamap_destroy(ring->tx_data_tag, 1587 data->tx_data_map); 1588 data->tx_data_map = NULL; 1589 } 1590 } 1591 1592 if (ring->tx_data_tag != NULL) { 1593 bus_dma_tag_destroy(ring->tx_data_tag); 1594 ring->tx_data_tag = NULL; 1595 } 1596 1597 if (desc != NULL) { 1598 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1599 BUS_DMASYNC_POSTWRITE); 1600 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 1601 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 1602 ring->desc64 = NULL; 1603 ring->desc32 = NULL; 1604 ring->tx_desc_map = NULL; 1605 bus_dma_tag_destroy(ring->tx_desc_tag); 1606 ring->tx_desc_tag = NULL; 1607 } 1608 } 1609 1610 #ifdef DEVICE_POLLING 1611 static poll_handler_t nfe_poll; 1612 1613 1614 static int 1615 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1616 { 1617 struct nfe_softc *sc = ifp->if_softc; 1618 uint32_t r; 1619 int rx_npkts = 0; 1620 1621 NFE_LOCK(sc); 1622 1623 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1624 NFE_UNLOCK(sc); 1625 return (rx_npkts); 1626 } 1627 1628 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1629 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts); 1630 else 1631 rx_npkts = nfe_rxeof(sc, count, &rx_npkts); 1632 nfe_txeof(sc); 1633 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1634 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1635 1636 if (cmd == POLL_AND_CHECK_STATUS) { 1637 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1638 NFE_UNLOCK(sc); 1639 return (rx_npkts); 1640 } 1641 NFE_WRITE(sc, sc->nfe_irq_status, r); 1642 1643 if (r & NFE_IRQ_LINK) { 1644 NFE_READ(sc, NFE_PHY_STATUS); 1645 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1646 DPRINTF(sc, "link state changed\n"); 1647 } 1648 } 1649 NFE_UNLOCK(sc); 1650 return (rx_npkts); 1651 } 1652 #endif /* DEVICE_POLLING */ 1653 1654 static void 1655 nfe_set_intr(struct nfe_softc *sc) 1656 { 1657 1658 if (sc->nfe_msi != 0) 1659 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1660 } 1661 1662 1663 /* In MSIX, a write to mask reegisters behaves as XOR. */ 1664 static __inline void 1665 nfe_enable_intr(struct nfe_softc *sc) 1666 { 1667 1668 if (sc->nfe_msix != 0) { 1669 /* XXX Should have a better way to enable interrupts! */ 1670 if (NFE_READ(sc, sc->nfe_irq_mask) == 0) 1671 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1672 } else 1673 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1674 } 1675 1676 1677 static __inline void 1678 nfe_disable_intr(struct nfe_softc *sc) 1679 { 1680 1681 if (sc->nfe_msix != 0) { 1682 /* XXX Should have a better way to disable interrupts! */ 1683 if (NFE_READ(sc, sc->nfe_irq_mask) != 0) 1684 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1685 } else 1686 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1687 } 1688 1689 1690 static int 1691 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1692 { 1693 struct nfe_softc *sc; 1694 struct ifreq *ifr; 1695 struct mii_data *mii; 1696 int error, init, mask; 1697 1698 sc = ifp->if_softc; 1699 ifr = (struct ifreq *) data; 1700 error = 0; 1701 init = 0; 1702 switch (cmd) { 1703 case SIOCSIFMTU: 1704 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) 1705 error = EINVAL; 1706 else if (ifp->if_mtu != ifr->ifr_mtu) { 1707 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || 1708 (sc->nfe_jumbo_disable != 0)) && 1709 ifr->ifr_mtu > ETHERMTU) 1710 error = EINVAL; 1711 else { 1712 NFE_LOCK(sc); 1713 ifp->if_mtu = ifr->ifr_mtu; 1714 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1715 nfe_init_locked(sc); 1716 NFE_UNLOCK(sc); 1717 } 1718 } 1719 break; 1720 case SIOCSIFFLAGS: 1721 NFE_LOCK(sc); 1722 if (ifp->if_flags & IFF_UP) { 1723 /* 1724 * If only the PROMISC or ALLMULTI flag changes, then 1725 * don't do a full re-init of the chip, just update 1726 * the Rx filter. 1727 */ 1728 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && 1729 ((ifp->if_flags ^ sc->nfe_if_flags) & 1730 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1731 nfe_setmulti(sc); 1732 else 1733 nfe_init_locked(sc); 1734 } else { 1735 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1736 nfe_stop(ifp); 1737 } 1738 sc->nfe_if_flags = ifp->if_flags; 1739 NFE_UNLOCK(sc); 1740 error = 0; 1741 break; 1742 case SIOCADDMULTI: 1743 case SIOCDELMULTI: 1744 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1745 NFE_LOCK(sc); 1746 nfe_setmulti(sc); 1747 NFE_UNLOCK(sc); 1748 error = 0; 1749 } 1750 break; 1751 case SIOCSIFMEDIA: 1752 case SIOCGIFMEDIA: 1753 mii = device_get_softc(sc->nfe_miibus); 1754 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1755 break; 1756 case SIOCSIFCAP: 1757 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1758 #ifdef DEVICE_POLLING 1759 if ((mask & IFCAP_POLLING) != 0) { 1760 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1761 error = ether_poll_register(nfe_poll, ifp); 1762 if (error) 1763 break; 1764 NFE_LOCK(sc); 1765 nfe_disable_intr(sc); 1766 ifp->if_capenable |= IFCAP_POLLING; 1767 NFE_UNLOCK(sc); 1768 } else { 1769 error = ether_poll_deregister(ifp); 1770 /* Enable interrupt even in error case */ 1771 NFE_LOCK(sc); 1772 nfe_enable_intr(sc); 1773 ifp->if_capenable &= ~IFCAP_POLLING; 1774 NFE_UNLOCK(sc); 1775 } 1776 } 1777 #endif /* DEVICE_POLLING */ 1778 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1779 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 1780 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1781 if ((mask & IFCAP_TXCSUM) != 0 && 1782 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1783 ifp->if_capenable ^= IFCAP_TXCSUM; 1784 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1785 ifp->if_hwassist |= NFE_CSUM_FEATURES; 1786 else 1787 ifp->if_hwassist &= ~NFE_CSUM_FEATURES; 1788 } 1789 if ((mask & IFCAP_RXCSUM) != 0 && 1790 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 1791 ifp->if_capenable ^= IFCAP_RXCSUM; 1792 init++; 1793 } 1794 if ((mask & IFCAP_TSO4) != 0 && 1795 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 1796 ifp->if_capenable ^= IFCAP_TSO4; 1797 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 1798 ifp->if_hwassist |= CSUM_TSO; 1799 else 1800 ifp->if_hwassist &= ~CSUM_TSO; 1801 } 1802 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1803 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1804 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1805 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1806 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1807 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1808 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1809 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 1810 init++; 1811 } 1812 /* 1813 * XXX 1814 * It seems that VLAN stripping requires Rx checksum offload. 1815 * Unfortunately FreeBSD has no way to disable only Rx side 1816 * VLAN stripping. So when we know Rx checksum offload is 1817 * disabled turn entire hardware VLAN assist off. 1818 */ 1819 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) { 1820 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 1821 init++; 1822 ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING | 1823 IFCAP_VLAN_HWTSO); 1824 } 1825 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1826 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1827 nfe_init(sc); 1828 } 1829 VLAN_CAPABILITIES(ifp); 1830 break; 1831 default: 1832 error = ether_ioctl(ifp, cmd, data); 1833 break; 1834 } 1835 1836 return (error); 1837 } 1838 1839 1840 static int 1841 nfe_intr(void *arg) 1842 { 1843 struct nfe_softc *sc; 1844 uint32_t status; 1845 1846 sc = (struct nfe_softc *)arg; 1847 1848 status = NFE_READ(sc, sc->nfe_irq_status); 1849 if (status == 0 || status == 0xffffffff) 1850 return (FILTER_STRAY); 1851 nfe_disable_intr(sc); 1852 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1853 1854 return (FILTER_HANDLED); 1855 } 1856 1857 1858 static void 1859 nfe_int_task(void *arg, int pending) 1860 { 1861 struct nfe_softc *sc = arg; 1862 struct ifnet *ifp = sc->nfe_ifp; 1863 uint32_t r; 1864 int domore; 1865 1866 NFE_LOCK(sc); 1867 1868 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1869 nfe_enable_intr(sc); 1870 NFE_UNLOCK(sc); 1871 return; /* not for us */ 1872 } 1873 NFE_WRITE(sc, sc->nfe_irq_status, r); 1874 1875 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); 1876 1877 #ifdef DEVICE_POLLING 1878 if (ifp->if_capenable & IFCAP_POLLING) { 1879 NFE_UNLOCK(sc); 1880 return; 1881 } 1882 #endif 1883 1884 if (r & NFE_IRQ_LINK) { 1885 NFE_READ(sc, NFE_PHY_STATUS); 1886 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1887 DPRINTF(sc, "link state changed\n"); 1888 } 1889 1890 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1891 NFE_UNLOCK(sc); 1892 nfe_enable_intr(sc); 1893 return; 1894 } 1895 1896 domore = 0; 1897 /* check Rx ring */ 1898 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1899 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL); 1900 else 1901 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL); 1902 /* check Tx ring */ 1903 nfe_txeof(sc); 1904 1905 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1906 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1907 1908 NFE_UNLOCK(sc); 1909 1910 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { 1911 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1912 return; 1913 } 1914 1915 /* Reenable interrupts. */ 1916 nfe_enable_intr(sc); 1917 } 1918 1919 1920 static __inline void 1921 nfe_discard_rxbuf(struct nfe_softc *sc, int idx) 1922 { 1923 struct nfe_desc32 *desc32; 1924 struct nfe_desc64 *desc64; 1925 struct nfe_rx_data *data; 1926 struct mbuf *m; 1927 1928 data = &sc->rxq.data[idx]; 1929 m = data->m; 1930 1931 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1932 desc64 = &sc->rxq.desc64[idx]; 1933 /* VLAN packet may have overwritten it. */ 1934 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1935 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1936 desc64->length = htole16(m->m_len); 1937 desc64->flags = htole16(NFE_RX_READY); 1938 } else { 1939 desc32 = &sc->rxq.desc32[idx]; 1940 desc32->length = htole16(m->m_len); 1941 desc32->flags = htole16(NFE_RX_READY); 1942 } 1943 } 1944 1945 1946 static __inline void 1947 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) 1948 { 1949 struct nfe_desc32 *desc32; 1950 struct nfe_desc64 *desc64; 1951 struct nfe_rx_data *data; 1952 struct mbuf *m; 1953 1954 data = &sc->jrxq.jdata[idx]; 1955 m = data->m; 1956 1957 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1958 desc64 = &sc->jrxq.jdesc64[idx]; 1959 /* VLAN packet may have overwritten it. */ 1960 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1961 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1962 desc64->length = htole16(m->m_len); 1963 desc64->flags = htole16(NFE_RX_READY); 1964 } else { 1965 desc32 = &sc->jrxq.jdesc32[idx]; 1966 desc32->length = htole16(m->m_len); 1967 desc32->flags = htole16(NFE_RX_READY); 1968 } 1969 } 1970 1971 1972 static int 1973 nfe_newbuf(struct nfe_softc *sc, int idx) 1974 { 1975 struct nfe_rx_data *data; 1976 struct nfe_desc32 *desc32; 1977 struct nfe_desc64 *desc64; 1978 struct mbuf *m; 1979 bus_dma_segment_t segs[1]; 1980 bus_dmamap_t map; 1981 int nsegs; 1982 1983 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1984 if (m == NULL) 1985 return (ENOBUFS); 1986 1987 m->m_len = m->m_pkthdr.len = MCLBYTES; 1988 m_adj(m, ETHER_ALIGN); 1989 1990 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, 1991 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 1992 m_freem(m); 1993 return (ENOBUFS); 1994 } 1995 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1996 1997 data = &sc->rxq.data[idx]; 1998 if (data->m != NULL) { 1999 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2000 BUS_DMASYNC_POSTREAD); 2001 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); 2002 } 2003 map = data->rx_data_map; 2004 data->rx_data_map = sc->rxq.rx_spare_map; 2005 sc->rxq.rx_spare_map = map; 2006 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 2007 BUS_DMASYNC_PREREAD); 2008 data->paddr = segs[0].ds_addr; 2009 data->m = m; 2010 /* update mapping address in h/w descriptor */ 2011 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2012 desc64 = &sc->rxq.desc64[idx]; 2013 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2014 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2015 desc64->length = htole16(segs[0].ds_len); 2016 desc64->flags = htole16(NFE_RX_READY); 2017 } else { 2018 desc32 = &sc->rxq.desc32[idx]; 2019 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2020 desc32->length = htole16(segs[0].ds_len); 2021 desc32->flags = htole16(NFE_RX_READY); 2022 } 2023 2024 return (0); 2025 } 2026 2027 2028 static int 2029 nfe_jnewbuf(struct nfe_softc *sc, int idx) 2030 { 2031 struct nfe_rx_data *data; 2032 struct nfe_desc32 *desc32; 2033 struct nfe_desc64 *desc64; 2034 struct mbuf *m; 2035 bus_dma_segment_t segs[1]; 2036 bus_dmamap_t map; 2037 int nsegs; 2038 2039 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 2040 if (m == NULL) 2041 return (ENOBUFS); 2042 if ((m->m_flags & M_EXT) == 0) { 2043 m_freem(m); 2044 return (ENOBUFS); 2045 } 2046 m->m_pkthdr.len = m->m_len = MJUM9BYTES; 2047 m_adj(m, ETHER_ALIGN); 2048 2049 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, 2050 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 2051 m_freem(m); 2052 return (ENOBUFS); 2053 } 2054 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2055 2056 data = &sc->jrxq.jdata[idx]; 2057 if (data->m != NULL) { 2058 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2059 BUS_DMASYNC_POSTREAD); 2060 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); 2061 } 2062 map = data->rx_data_map; 2063 data->rx_data_map = sc->jrxq.jrx_spare_map; 2064 sc->jrxq.jrx_spare_map = map; 2065 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 2066 BUS_DMASYNC_PREREAD); 2067 data->paddr = segs[0].ds_addr; 2068 data->m = m; 2069 /* update mapping address in h/w descriptor */ 2070 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2071 desc64 = &sc->jrxq.jdesc64[idx]; 2072 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2073 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2074 desc64->length = htole16(segs[0].ds_len); 2075 desc64->flags = htole16(NFE_RX_READY); 2076 } else { 2077 desc32 = &sc->jrxq.jdesc32[idx]; 2078 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2079 desc32->length = htole16(segs[0].ds_len); 2080 desc32->flags = htole16(NFE_RX_READY); 2081 } 2082 2083 return (0); 2084 } 2085 2086 2087 static int 2088 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2089 { 2090 struct ifnet *ifp = sc->nfe_ifp; 2091 struct nfe_desc32 *desc32; 2092 struct nfe_desc64 *desc64; 2093 struct nfe_rx_data *data; 2094 struct mbuf *m; 2095 uint16_t flags; 2096 int len, prog, rx_npkts; 2097 uint32_t vtag = 0; 2098 2099 rx_npkts = 0; 2100 NFE_LOCK_ASSERT(sc); 2101 2102 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2103 BUS_DMASYNC_POSTREAD); 2104 2105 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { 2106 if (count <= 0) 2107 break; 2108 count--; 2109 2110 data = &sc->rxq.data[sc->rxq.cur]; 2111 2112 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2113 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 2114 vtag = le32toh(desc64->physaddr[1]); 2115 flags = le16toh(desc64->flags); 2116 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2117 } else { 2118 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 2119 flags = le16toh(desc32->flags); 2120 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2121 } 2122 2123 if (flags & NFE_RX_READY) 2124 break; 2125 prog++; 2126 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2127 if (!(flags & NFE_RX_VALID_V1)) { 2128 ifp->if_ierrors++; 2129 nfe_discard_rxbuf(sc, sc->rxq.cur); 2130 continue; 2131 } 2132 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2133 flags &= ~NFE_RX_ERROR; 2134 len--; /* fix buffer length */ 2135 } 2136 } else { 2137 if (!(flags & NFE_RX_VALID_V2)) { 2138 ifp->if_ierrors++; 2139 nfe_discard_rxbuf(sc, sc->rxq.cur); 2140 continue; 2141 } 2142 2143 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2144 flags &= ~NFE_RX_ERROR; 2145 len--; /* fix buffer length */ 2146 } 2147 } 2148 2149 if (flags & NFE_RX_ERROR) { 2150 ifp->if_ierrors++; 2151 nfe_discard_rxbuf(sc, sc->rxq.cur); 2152 continue; 2153 } 2154 2155 m = data->m; 2156 if (nfe_newbuf(sc, sc->rxq.cur) != 0) { 2157 ifp->if_iqdrops++; 2158 nfe_discard_rxbuf(sc, sc->rxq.cur); 2159 continue; 2160 } 2161 2162 if ((vtag & NFE_RX_VTAG) != 0 && 2163 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2164 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2165 m->m_flags |= M_VLANTAG; 2166 } 2167 2168 m->m_pkthdr.len = m->m_len = len; 2169 m->m_pkthdr.rcvif = ifp; 2170 2171 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2172 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2173 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2174 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2175 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2176 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2177 m->m_pkthdr.csum_flags |= 2178 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2179 m->m_pkthdr.csum_data = 0xffff; 2180 } 2181 } 2182 } 2183 2184 ifp->if_ipackets++; 2185 2186 NFE_UNLOCK(sc); 2187 (*ifp->if_input)(ifp, m); 2188 NFE_LOCK(sc); 2189 rx_npkts++; 2190 } 2191 2192 if (prog > 0) 2193 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2194 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2195 2196 if (rx_npktsp != NULL) 2197 *rx_npktsp = rx_npkts; 2198 return (count > 0 ? 0 : EAGAIN); 2199 } 2200 2201 2202 static int 2203 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2204 { 2205 struct ifnet *ifp = sc->nfe_ifp; 2206 struct nfe_desc32 *desc32; 2207 struct nfe_desc64 *desc64; 2208 struct nfe_rx_data *data; 2209 struct mbuf *m; 2210 uint16_t flags; 2211 int len, prog, rx_npkts; 2212 uint32_t vtag = 0; 2213 2214 rx_npkts = 0; 2215 NFE_LOCK_ASSERT(sc); 2216 2217 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2218 BUS_DMASYNC_POSTREAD); 2219 2220 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), 2221 vtag = 0) { 2222 if (count <= 0) 2223 break; 2224 count--; 2225 2226 data = &sc->jrxq.jdata[sc->jrxq.jcur]; 2227 2228 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2229 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; 2230 vtag = le32toh(desc64->physaddr[1]); 2231 flags = le16toh(desc64->flags); 2232 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2233 } else { 2234 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; 2235 flags = le16toh(desc32->flags); 2236 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2237 } 2238 2239 if (flags & NFE_RX_READY) 2240 break; 2241 prog++; 2242 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2243 if (!(flags & NFE_RX_VALID_V1)) { 2244 ifp->if_ierrors++; 2245 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2246 continue; 2247 } 2248 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2249 flags &= ~NFE_RX_ERROR; 2250 len--; /* fix buffer length */ 2251 } 2252 } else { 2253 if (!(flags & NFE_RX_VALID_V2)) { 2254 ifp->if_ierrors++; 2255 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2256 continue; 2257 } 2258 2259 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2260 flags &= ~NFE_RX_ERROR; 2261 len--; /* fix buffer length */ 2262 } 2263 } 2264 2265 if (flags & NFE_RX_ERROR) { 2266 ifp->if_ierrors++; 2267 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2268 continue; 2269 } 2270 2271 m = data->m; 2272 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { 2273 ifp->if_iqdrops++; 2274 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2275 continue; 2276 } 2277 2278 if ((vtag & NFE_RX_VTAG) != 0 && 2279 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2280 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2281 m->m_flags |= M_VLANTAG; 2282 } 2283 2284 m->m_pkthdr.len = m->m_len = len; 2285 m->m_pkthdr.rcvif = ifp; 2286 2287 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2288 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2289 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2290 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2291 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2292 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2293 m->m_pkthdr.csum_flags |= 2294 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2295 m->m_pkthdr.csum_data = 0xffff; 2296 } 2297 } 2298 } 2299 2300 ifp->if_ipackets++; 2301 2302 NFE_UNLOCK(sc); 2303 (*ifp->if_input)(ifp, m); 2304 NFE_LOCK(sc); 2305 rx_npkts++; 2306 } 2307 2308 if (prog > 0) 2309 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2310 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2311 2312 if (rx_npktsp != NULL) 2313 *rx_npktsp = rx_npkts; 2314 return (count > 0 ? 0 : EAGAIN); 2315 } 2316 2317 2318 static void 2319 nfe_txeof(struct nfe_softc *sc) 2320 { 2321 struct ifnet *ifp = sc->nfe_ifp; 2322 struct nfe_desc32 *desc32; 2323 struct nfe_desc64 *desc64; 2324 struct nfe_tx_data *data = NULL; 2325 uint16_t flags; 2326 int cons, prog; 2327 2328 NFE_LOCK_ASSERT(sc); 2329 2330 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2331 BUS_DMASYNC_POSTREAD); 2332 2333 prog = 0; 2334 for (cons = sc->txq.next; cons != sc->txq.cur; 2335 NFE_INC(cons, NFE_TX_RING_COUNT)) { 2336 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2337 desc64 = &sc->txq.desc64[cons]; 2338 flags = le16toh(desc64->flags); 2339 } else { 2340 desc32 = &sc->txq.desc32[cons]; 2341 flags = le16toh(desc32->flags); 2342 } 2343 2344 if (flags & NFE_TX_VALID) 2345 break; 2346 2347 prog++; 2348 sc->txq.queued--; 2349 data = &sc->txq.data[cons]; 2350 2351 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2352 if ((flags & NFE_TX_LASTFRAG_V1) == 0) 2353 continue; 2354 if ((flags & NFE_TX_ERROR_V1) != 0) { 2355 device_printf(sc->nfe_dev, 2356 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); 2357 2358 ifp->if_oerrors++; 2359 } else 2360 ifp->if_opackets++; 2361 } else { 2362 if ((flags & NFE_TX_LASTFRAG_V2) == 0) 2363 continue; 2364 if ((flags & NFE_TX_ERROR_V2) != 0) { 2365 device_printf(sc->nfe_dev, 2366 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); 2367 ifp->if_oerrors++; 2368 } else 2369 ifp->if_opackets++; 2370 } 2371 2372 /* last fragment of the mbuf chain transmitted */ 2373 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); 2374 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, 2375 BUS_DMASYNC_POSTWRITE); 2376 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); 2377 m_freem(data->m); 2378 data->m = NULL; 2379 } 2380 2381 if (prog > 0) { 2382 sc->nfe_force_tx = 0; 2383 sc->txq.next = cons; 2384 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2385 if (sc->txq.queued == 0) 2386 sc->nfe_watchdog_timer = 0; 2387 } 2388 } 2389 2390 static int 2391 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) 2392 { 2393 struct nfe_desc32 *desc32 = NULL; 2394 struct nfe_desc64 *desc64 = NULL; 2395 bus_dmamap_t map; 2396 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 2397 int error, i, nsegs, prod, si; 2398 uint32_t tso_segsz; 2399 uint16_t cflags, flags; 2400 struct mbuf *m; 2401 2402 prod = si = sc->txq.cur; 2403 map = sc->txq.data[prod].tx_data_map; 2404 2405 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, 2406 &nsegs, BUS_DMA_NOWAIT); 2407 if (error == EFBIG) { 2408 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER); 2409 if (m == NULL) { 2410 m_freem(*m_head); 2411 *m_head = NULL; 2412 return (ENOBUFS); 2413 } 2414 *m_head = m; 2415 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, 2416 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2417 if (error != 0) { 2418 m_freem(*m_head); 2419 *m_head = NULL; 2420 return (ENOBUFS); 2421 } 2422 } else if (error != 0) 2423 return (error); 2424 if (nsegs == 0) { 2425 m_freem(*m_head); 2426 *m_head = NULL; 2427 return (EIO); 2428 } 2429 2430 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { 2431 bus_dmamap_unload(sc->txq.tx_data_tag, map); 2432 return (ENOBUFS); 2433 } 2434 2435 m = *m_head; 2436 cflags = flags = 0; 2437 tso_segsz = 0; 2438 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2439 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 2440 NFE_TX_TSO_SHIFT; 2441 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 2442 cflags |= NFE_TX_TSO; 2443 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { 2444 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2445 cflags |= NFE_TX_IP_CSUM; 2446 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2447 cflags |= NFE_TX_TCP_UDP_CSUM; 2448 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2449 cflags |= NFE_TX_TCP_UDP_CSUM; 2450 } 2451 2452 for (i = 0; i < nsegs; i++) { 2453 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2454 desc64 = &sc->txq.desc64[prod]; 2455 desc64->physaddr[0] = 2456 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 2457 desc64->physaddr[1] = 2458 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2459 desc64->vtag = 0; 2460 desc64->length = htole16(segs[i].ds_len - 1); 2461 desc64->flags = htole16(flags); 2462 } else { 2463 desc32 = &sc->txq.desc32[prod]; 2464 desc32->physaddr = 2465 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2466 desc32->length = htole16(segs[i].ds_len - 1); 2467 desc32->flags = htole16(flags); 2468 } 2469 2470 /* 2471 * Setting of the valid bit in the first descriptor is 2472 * deferred until the whole chain is fully setup. 2473 */ 2474 flags |= NFE_TX_VALID; 2475 2476 sc->txq.queued++; 2477 NFE_INC(prod, NFE_TX_RING_COUNT); 2478 } 2479 2480 /* 2481 * the whole mbuf chain has been DMA mapped, fix last/first descriptor. 2482 * csum flags, vtag and TSO belong to the first fragment only. 2483 */ 2484 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2485 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 2486 desc64 = &sc->txq.desc64[si]; 2487 if ((m->m_flags & M_VLANTAG) != 0) 2488 desc64->vtag = htole32(NFE_TX_VTAG | 2489 m->m_pkthdr.ether_vtag); 2490 if (tso_segsz != 0) { 2491 /* 2492 * XXX 2493 * The following indicates the descriptor element 2494 * is a 32bit quantity. 2495 */ 2496 desc64->length |= htole16((uint16_t)tso_segsz); 2497 desc64->flags |= htole16(tso_segsz >> 16); 2498 } 2499 /* 2500 * finally, set the valid/checksum/TSO bit in the first 2501 * descriptor. 2502 */ 2503 desc64->flags |= htole16(NFE_TX_VALID | cflags); 2504 } else { 2505 if (sc->nfe_flags & NFE_JUMBO_SUP) 2506 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); 2507 else 2508 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); 2509 desc32 = &sc->txq.desc32[si]; 2510 if (tso_segsz != 0) { 2511 /* 2512 * XXX 2513 * The following indicates the descriptor element 2514 * is a 32bit quantity. 2515 */ 2516 desc32->length |= htole16((uint16_t)tso_segsz); 2517 desc32->flags |= htole16(tso_segsz >> 16); 2518 } 2519 /* 2520 * finally, set the valid/checksum/TSO bit in the first 2521 * descriptor. 2522 */ 2523 desc32->flags |= htole16(NFE_TX_VALID | cflags); 2524 } 2525 2526 sc->txq.cur = prod; 2527 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; 2528 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; 2529 sc->txq.data[prod].tx_data_map = map; 2530 sc->txq.data[prod].m = m; 2531 2532 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 2533 2534 return (0); 2535 } 2536 2537 2538 static void 2539 nfe_setmulti(struct nfe_softc *sc) 2540 { 2541 struct ifnet *ifp = sc->nfe_ifp; 2542 struct ifmultiaddr *ifma; 2543 int i; 2544 uint32_t filter; 2545 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2546 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 2547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2548 }; 2549 2550 NFE_LOCK_ASSERT(sc); 2551 2552 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2553 bzero(addr, ETHER_ADDR_LEN); 2554 bzero(mask, ETHER_ADDR_LEN); 2555 goto done; 2556 } 2557 2558 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2559 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2560 2561 if_maddr_rlock(ifp); 2562 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2563 u_char *addrp; 2564 2565 if (ifma->ifma_addr->sa_family != AF_LINK) 2566 continue; 2567 2568 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2569 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2570 u_int8_t mcaddr = addrp[i]; 2571 addr[i] &= mcaddr; 2572 mask[i] &= ~mcaddr; 2573 } 2574 } 2575 if_maddr_runlock(ifp); 2576 2577 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2578 mask[i] |= addr[i]; 2579 } 2580 2581 done: 2582 addr[0] |= 0x01; /* make sure multicast bit is set */ 2583 2584 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2585 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2586 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2587 addr[5] << 8 | addr[4]); 2588 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2589 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2590 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2591 mask[5] << 8 | mask[4]); 2592 2593 filter = NFE_READ(sc, NFE_RXFILTER); 2594 filter &= NFE_PFF_RX_PAUSE; 2595 filter |= NFE_RXFILTER_MAGIC; 2596 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; 2597 NFE_WRITE(sc, NFE_RXFILTER, filter); 2598 } 2599 2600 2601 static void 2602 nfe_tx_task(void *arg, int pending) 2603 { 2604 struct ifnet *ifp; 2605 2606 ifp = (struct ifnet *)arg; 2607 nfe_start(ifp); 2608 } 2609 2610 2611 static void 2612 nfe_start(struct ifnet *ifp) 2613 { 2614 struct nfe_softc *sc = ifp->if_softc; 2615 struct mbuf *m0; 2616 int enq; 2617 2618 NFE_LOCK(sc); 2619 2620 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2621 IFF_DRV_RUNNING || sc->nfe_link == 0) { 2622 NFE_UNLOCK(sc); 2623 return; 2624 } 2625 2626 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2627 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2628 if (m0 == NULL) 2629 break; 2630 2631 if (nfe_encap(sc, &m0) != 0) { 2632 if (m0 == NULL) 2633 break; 2634 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2635 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2636 break; 2637 } 2638 enq++; 2639 ETHER_BPF_MTAP(ifp, m0); 2640 } 2641 2642 if (enq > 0) { 2643 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2644 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2645 2646 /* kick Tx */ 2647 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2648 2649 /* 2650 * Set a timeout in case the chip goes out to lunch. 2651 */ 2652 sc->nfe_watchdog_timer = 5; 2653 } 2654 2655 NFE_UNLOCK(sc); 2656 } 2657 2658 2659 static void 2660 nfe_watchdog(struct ifnet *ifp) 2661 { 2662 struct nfe_softc *sc = ifp->if_softc; 2663 2664 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) 2665 return; 2666 2667 /* Check if we've lost Tx completion interrupt. */ 2668 nfe_txeof(sc); 2669 if (sc->txq.queued == 0) { 2670 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2671 "-- recovering\n"); 2672 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2673 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 2674 return; 2675 } 2676 /* Check if we've lost start Tx command. */ 2677 sc->nfe_force_tx++; 2678 if (sc->nfe_force_tx <= 3) { 2679 /* 2680 * If this is the case for watchdog timeout, the following 2681 * code should go to nfe_txeof(). 2682 */ 2683 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2684 return; 2685 } 2686 sc->nfe_force_tx = 0; 2687 2688 if_printf(ifp, "watchdog timeout\n"); 2689 2690 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2691 ifp->if_oerrors++; 2692 nfe_init_locked(sc); 2693 } 2694 2695 2696 static void 2697 nfe_init(void *xsc) 2698 { 2699 struct nfe_softc *sc = xsc; 2700 2701 NFE_LOCK(sc); 2702 nfe_init_locked(sc); 2703 NFE_UNLOCK(sc); 2704 } 2705 2706 2707 static void 2708 nfe_init_locked(void *xsc) 2709 { 2710 struct nfe_softc *sc = xsc; 2711 struct ifnet *ifp = sc->nfe_ifp; 2712 struct mii_data *mii; 2713 uint32_t val; 2714 int error; 2715 2716 NFE_LOCK_ASSERT(sc); 2717 2718 mii = device_get_softc(sc->nfe_miibus); 2719 2720 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2721 return; 2722 2723 nfe_stop(ifp); 2724 2725 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 2726 2727 nfe_init_tx_ring(sc, &sc->txq); 2728 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) 2729 error = nfe_init_jrx_ring(sc, &sc->jrxq); 2730 else 2731 error = nfe_init_rx_ring(sc, &sc->rxq); 2732 if (error != 0) { 2733 device_printf(sc->nfe_dev, 2734 "initialization failed: no memory for rx buffers\n"); 2735 nfe_stop(ifp); 2736 return; 2737 } 2738 2739 val = 0; 2740 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) 2741 val |= NFE_MAC_ADDR_INORDER; 2742 NFE_WRITE(sc, NFE_TX_UNK, val); 2743 NFE_WRITE(sc, NFE_STATUS, 0); 2744 2745 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) 2746 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); 2747 2748 sc->rxtxctl = NFE_RXTX_BIT2; 2749 if (sc->nfe_flags & NFE_40BIT_ADDR) 2750 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 2751 else if (sc->nfe_flags & NFE_JUMBO_SUP) 2752 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 2753 2754 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2755 sc->rxtxctl |= NFE_RXTX_RXCSUM; 2756 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2757 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 2758 2759 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 2760 DELAY(10); 2761 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2762 2763 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2764 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 2765 else 2766 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 2767 2768 NFE_WRITE(sc, NFE_SETUP_R6, 0); 2769 2770 /* set MAC address */ 2771 nfe_set_macaddr(sc, IF_LLADDR(ifp)); 2772 2773 /* tell MAC where rings are in memory */ 2774 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { 2775 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2776 NFE_ADDR_HI(sc->jrxq.jphysaddr)); 2777 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2778 NFE_ADDR_LO(sc->jrxq.jphysaddr)); 2779 } else { 2780 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2781 NFE_ADDR_HI(sc->rxq.physaddr)); 2782 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2783 NFE_ADDR_LO(sc->rxq.physaddr)); 2784 } 2785 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); 2786 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 2787 2788 NFE_WRITE(sc, NFE_RING_SIZE, 2789 (NFE_RX_RING_COUNT - 1) << 16 | 2790 (NFE_TX_RING_COUNT - 1)); 2791 2792 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); 2793 2794 /* force MAC to wakeup */ 2795 val = NFE_READ(sc, NFE_PWR_STATE); 2796 if ((val & NFE_PWR_WAKEUP) == 0) 2797 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); 2798 DELAY(10); 2799 val = NFE_READ(sc, NFE_PWR_STATE); 2800 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); 2801 2802 #if 1 2803 /* configure interrupts coalescing/mitigation */ 2804 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 2805 #else 2806 /* no interrupt mitigation: one interrupt per packet */ 2807 NFE_WRITE(sc, NFE_IMTIMER, 970); 2808 #endif 2809 2810 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); 2811 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 2812 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 2813 2814 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 2815 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 2816 2817 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 2818 /* Disable WOL. */ 2819 NFE_WRITE(sc, NFE_WOL_CTL, 0); 2820 2821 sc->rxtxctl &= ~NFE_RXTX_BIT2; 2822 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2823 DELAY(10); 2824 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 2825 2826 /* set Rx filter */ 2827 nfe_setmulti(sc); 2828 2829 /* enable Rx */ 2830 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 2831 2832 /* enable Tx */ 2833 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 2834 2835 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 2836 2837 /* Clear hardware stats. */ 2838 nfe_stats_clear(sc); 2839 2840 #ifdef DEVICE_POLLING 2841 if (ifp->if_capenable & IFCAP_POLLING) 2842 nfe_disable_intr(sc); 2843 else 2844 #endif 2845 nfe_set_intr(sc); 2846 nfe_enable_intr(sc); /* enable interrupts */ 2847 2848 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2849 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2850 2851 sc->nfe_link = 0; 2852 mii_mediachg(mii); 2853 2854 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2855 } 2856 2857 2858 static void 2859 nfe_stop(struct ifnet *ifp) 2860 { 2861 struct nfe_softc *sc = ifp->if_softc; 2862 struct nfe_rx_ring *rx_ring; 2863 struct nfe_jrx_ring *jrx_ring; 2864 struct nfe_tx_ring *tx_ring; 2865 struct nfe_rx_data *rdata; 2866 struct nfe_tx_data *tdata; 2867 int i; 2868 2869 NFE_LOCK_ASSERT(sc); 2870 2871 sc->nfe_watchdog_timer = 0; 2872 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2873 2874 callout_stop(&sc->nfe_stat_ch); 2875 2876 /* abort Tx */ 2877 NFE_WRITE(sc, NFE_TX_CTL, 0); 2878 2879 /* disable Rx */ 2880 NFE_WRITE(sc, NFE_RX_CTL, 0); 2881 2882 /* disable interrupts */ 2883 nfe_disable_intr(sc); 2884 2885 sc->nfe_link = 0; 2886 2887 /* free Rx and Tx mbufs still in the queues. */ 2888 rx_ring = &sc->rxq; 2889 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2890 rdata = &rx_ring->data[i]; 2891 if (rdata->m != NULL) { 2892 bus_dmamap_sync(rx_ring->rx_data_tag, 2893 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2894 bus_dmamap_unload(rx_ring->rx_data_tag, 2895 rdata->rx_data_map); 2896 m_freem(rdata->m); 2897 rdata->m = NULL; 2898 } 2899 } 2900 2901 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { 2902 jrx_ring = &sc->jrxq; 2903 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 2904 rdata = &jrx_ring->jdata[i]; 2905 if (rdata->m != NULL) { 2906 bus_dmamap_sync(jrx_ring->jrx_data_tag, 2907 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2908 bus_dmamap_unload(jrx_ring->jrx_data_tag, 2909 rdata->rx_data_map); 2910 m_freem(rdata->m); 2911 rdata->m = NULL; 2912 } 2913 } 2914 } 2915 2916 tx_ring = &sc->txq; 2917 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2918 tdata = &tx_ring->data[i]; 2919 if (tdata->m != NULL) { 2920 bus_dmamap_sync(tx_ring->tx_data_tag, 2921 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); 2922 bus_dmamap_unload(tx_ring->tx_data_tag, 2923 tdata->tx_data_map); 2924 m_freem(tdata->m); 2925 tdata->m = NULL; 2926 } 2927 } 2928 /* Update hardware stats. */ 2929 nfe_stats_update(sc); 2930 } 2931 2932 2933 static int 2934 nfe_ifmedia_upd(struct ifnet *ifp) 2935 { 2936 struct nfe_softc *sc = ifp->if_softc; 2937 struct mii_data *mii; 2938 2939 NFE_LOCK(sc); 2940 mii = device_get_softc(sc->nfe_miibus); 2941 mii_mediachg(mii); 2942 NFE_UNLOCK(sc); 2943 2944 return (0); 2945 } 2946 2947 2948 static void 2949 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2950 { 2951 struct nfe_softc *sc; 2952 struct mii_data *mii; 2953 2954 sc = ifp->if_softc; 2955 2956 NFE_LOCK(sc); 2957 mii = device_get_softc(sc->nfe_miibus); 2958 mii_pollstat(mii); 2959 NFE_UNLOCK(sc); 2960 2961 ifmr->ifm_active = mii->mii_media_active; 2962 ifmr->ifm_status = mii->mii_media_status; 2963 } 2964 2965 2966 void 2967 nfe_tick(void *xsc) 2968 { 2969 struct nfe_softc *sc; 2970 struct mii_data *mii; 2971 struct ifnet *ifp; 2972 2973 sc = (struct nfe_softc *)xsc; 2974 2975 NFE_LOCK_ASSERT(sc); 2976 2977 ifp = sc->nfe_ifp; 2978 2979 mii = device_get_softc(sc->nfe_miibus); 2980 mii_tick(mii); 2981 nfe_stats_update(sc); 2982 nfe_watchdog(ifp); 2983 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2984 } 2985 2986 2987 static int 2988 nfe_shutdown(device_t dev) 2989 { 2990 2991 return (nfe_suspend(dev)); 2992 } 2993 2994 2995 static void 2996 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2997 { 2998 uint32_t val; 2999 3000 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 3001 val = NFE_READ(sc, NFE_MACADDR_LO); 3002 addr[0] = (val >> 8) & 0xff; 3003 addr[1] = (val & 0xff); 3004 3005 val = NFE_READ(sc, NFE_MACADDR_HI); 3006 addr[2] = (val >> 24) & 0xff; 3007 addr[3] = (val >> 16) & 0xff; 3008 addr[4] = (val >> 8) & 0xff; 3009 addr[5] = (val & 0xff); 3010 } else { 3011 val = NFE_READ(sc, NFE_MACADDR_LO); 3012 addr[5] = (val >> 8) & 0xff; 3013 addr[4] = (val & 0xff); 3014 3015 val = NFE_READ(sc, NFE_MACADDR_HI); 3016 addr[3] = (val >> 24) & 0xff; 3017 addr[2] = (val >> 16) & 0xff; 3018 addr[1] = (val >> 8) & 0xff; 3019 addr[0] = (val & 0xff); 3020 } 3021 } 3022 3023 3024 static void 3025 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) 3026 { 3027 3028 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 3029 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 3030 addr[1] << 8 | addr[0]); 3031 } 3032 3033 3034 /* 3035 * Map a single buffer address. 3036 */ 3037 3038 static void 3039 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3040 { 3041 struct nfe_dmamap_arg *ctx; 3042 3043 if (error != 0) 3044 return; 3045 3046 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 3047 3048 ctx = (struct nfe_dmamap_arg *)arg; 3049 ctx->nfe_busaddr = segs[0].ds_addr; 3050 } 3051 3052 3053 static int 3054 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3055 { 3056 int error, value; 3057 3058 if (!arg1) 3059 return (EINVAL); 3060 value = *(int *)arg1; 3061 error = sysctl_handle_int(oidp, &value, 0, req); 3062 if (error || !req->newptr) 3063 return (error); 3064 if (value < low || value > high) 3065 return (EINVAL); 3066 *(int *)arg1 = value; 3067 3068 return (0); 3069 } 3070 3071 3072 static int 3073 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) 3074 { 3075 3076 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, 3077 NFE_PROC_MAX)); 3078 } 3079 3080 3081 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 3082 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 3083 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 3084 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 3085 3086 static void 3087 nfe_sysctl_node(struct nfe_softc *sc) 3088 { 3089 struct sysctl_ctx_list *ctx; 3090 struct sysctl_oid_list *child, *parent; 3091 struct sysctl_oid *tree; 3092 struct nfe_hw_stats *stats; 3093 int error; 3094 3095 stats = &sc->nfe_stats; 3096 ctx = device_get_sysctl_ctx(sc->nfe_dev); 3097 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev)); 3098 SYSCTL_ADD_PROC(ctx, child, 3099 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 3100 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", 3101 "max number of Rx events to process"); 3102 3103 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3104 error = resource_int_value(device_get_name(sc->nfe_dev), 3105 device_get_unit(sc->nfe_dev), "process_limit", 3106 &sc->nfe_process_limit); 3107 if (error == 0) { 3108 if (sc->nfe_process_limit < NFE_PROC_MIN || 3109 sc->nfe_process_limit > NFE_PROC_MAX) { 3110 device_printf(sc->nfe_dev, 3111 "process_limit value out of range; " 3112 "using default: %d\n", NFE_PROC_DEFAULT); 3113 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3114 } 3115 } 3116 3117 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3118 return; 3119 3120 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 3121 NULL, "NFE statistics"); 3122 parent = SYSCTL_CHILDREN(tree); 3123 3124 /* Rx statistics. */ 3125 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 3126 NULL, "Rx MAC statistics"); 3127 child = SYSCTL_CHILDREN(tree); 3128 3129 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors", 3130 &stats->rx_frame_errors, "Framing Errors"); 3131 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes", 3132 &stats->rx_extra_bytes, "Extra Bytes"); 3133 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3134 &stats->rx_late_cols, "Late Collisions"); 3135 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts", 3136 &stats->rx_runts, "Runts"); 3137 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos", 3138 &stats->rx_jumbos, "Jumbos"); 3139 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns", 3140 &stats->rx_fifo_overuns, "FIFO Overruns"); 3141 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors", 3142 &stats->rx_crc_errors, "CRC Errors"); 3143 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae", 3144 &stats->rx_fae, "Frame Alignment Errors"); 3145 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors", 3146 &stats->rx_len_errors, "Length Errors"); 3147 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3148 &stats->rx_unicast, "Unicast Frames"); 3149 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3150 &stats->rx_multicast, "Multicast Frames"); 3151 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3152 &stats->rx_broadcast, "Broadcast Frames"); 3153 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3154 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3155 &stats->rx_octets, "Octets"); 3156 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3157 &stats->rx_pause, "Pause frames"); 3158 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops", 3159 &stats->rx_drops, "Drop frames"); 3160 } 3161 3162 /* Tx statistics. */ 3163 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 3164 NULL, "Tx MAC statistics"); 3165 child = SYSCTL_CHILDREN(tree); 3166 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3167 &stats->tx_octets, "Octets"); 3168 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits", 3169 &stats->tx_zero_rexmits, "Zero Retransmits"); 3170 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits", 3171 &stats->tx_one_rexmits, "One Retransmits"); 3172 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits", 3173 &stats->tx_multi_rexmits, "Multiple Retransmits"); 3174 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3175 &stats->tx_late_cols, "Late Collisions"); 3176 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns", 3177 &stats->tx_fifo_underuns, "FIFO Underruns"); 3178 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts", 3179 &stats->tx_carrier_losts, "Carrier Losts"); 3180 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals", 3181 &stats->tx_excess_deferals, "Excess Deferrals"); 3182 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors", 3183 &stats->tx_retry_errors, "Retry Errors"); 3184 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3185 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals", 3186 &stats->tx_deferals, "Deferrals"); 3187 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames", 3188 &stats->tx_frames, "Frames"); 3189 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3190 &stats->tx_pause, "Pause Frames"); 3191 } 3192 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3193 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3194 &stats->tx_deferals, "Unicast Frames"); 3195 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3196 &stats->tx_frames, "Multicast Frames"); 3197 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3198 &stats->tx_pause, "Broadcast Frames"); 3199 } 3200 } 3201 3202 #undef NFE_SYSCTL_STAT_ADD32 3203 #undef NFE_SYSCTL_STAT_ADD64 3204 3205 static void 3206 nfe_stats_clear(struct nfe_softc *sc) 3207 { 3208 int i, mib_cnt; 3209 3210 if ((sc->nfe_flags & NFE_MIB_V1) != 0) 3211 mib_cnt = NFE_NUM_MIB_STATV1; 3212 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0) 3213 mib_cnt = NFE_NUM_MIB_STATV2; 3214 else 3215 return; 3216 3217 for (i = 0; i < mib_cnt; i += sizeof(uint32_t)) 3218 NFE_READ(sc, NFE_TX_OCTET + i); 3219 3220 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3221 NFE_READ(sc, NFE_TX_UNICAST); 3222 NFE_READ(sc, NFE_TX_MULTICAST); 3223 NFE_READ(sc, NFE_TX_BROADCAST); 3224 } 3225 } 3226 3227 static void 3228 nfe_stats_update(struct nfe_softc *sc) 3229 { 3230 struct nfe_hw_stats *stats; 3231 3232 NFE_LOCK_ASSERT(sc); 3233 3234 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3235 return; 3236 3237 stats = &sc->nfe_stats; 3238 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET); 3239 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT); 3240 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT); 3241 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT); 3242 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL); 3243 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN); 3244 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST); 3245 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL); 3246 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR); 3247 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR); 3248 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES); 3249 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL); 3250 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT); 3251 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO); 3252 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN); 3253 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR); 3254 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE); 3255 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR); 3256 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST); 3257 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST); 3258 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST); 3259 3260 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3261 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL); 3262 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME); 3263 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET); 3264 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE); 3265 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE); 3266 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP); 3267 } 3268 3269 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3270 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST); 3271 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST); 3272 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST); 3273 } 3274 } 3275 3276 3277 static void 3278 nfe_set_linkspeed(struct nfe_softc *sc) 3279 { 3280 struct mii_softc *miisc; 3281 struct mii_data *mii; 3282 int aneg, i, phyno; 3283 3284 NFE_LOCK_ASSERT(sc); 3285 3286 mii = device_get_softc(sc->nfe_miibus); 3287 mii_pollstat(mii); 3288 aneg = 0; 3289 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 3290 (IFM_ACTIVE | IFM_AVALID)) { 3291 switch IFM_SUBTYPE(mii->mii_media_active) { 3292 case IFM_10_T: 3293 case IFM_100_TX: 3294 return; 3295 case IFM_1000_T: 3296 aneg++; 3297 break; 3298 default: 3299 break; 3300 } 3301 } 3302 phyno = 0; 3303 if (mii->mii_instance) { 3304 miisc = LIST_FIRST(&mii->mii_phys); 3305 phyno = miisc->mii_phy; 3306 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3307 mii_phy_reset(miisc); 3308 } else 3309 return; 3310 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0); 3311 nfe_miibus_writereg(sc->nfe_dev, phyno, 3312 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 3313 nfe_miibus_writereg(sc->nfe_dev, phyno, 3314 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 3315 DELAY(1000); 3316 if (aneg != 0) { 3317 /* 3318 * Poll link state until nfe(4) get a 10/100Mbps link. 3319 */ 3320 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 3321 mii_pollstat(mii); 3322 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 3323 == (IFM_ACTIVE | IFM_AVALID)) { 3324 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3325 case IFM_10_T: 3326 case IFM_100_TX: 3327 nfe_mac_config(sc, mii); 3328 return; 3329 default: 3330 break; 3331 } 3332 } 3333 NFE_UNLOCK(sc); 3334 pause("nfelnk", hz); 3335 NFE_LOCK(sc); 3336 } 3337 if (i == MII_ANEGTICKS_GIGE) 3338 device_printf(sc->nfe_dev, 3339 "establishing a link failed, WOL may not work!"); 3340 } 3341 /* 3342 * No link, force MAC to have 100Mbps, full-duplex link. 3343 * This is the last resort and may/may not work. 3344 */ 3345 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 3346 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 3347 nfe_mac_config(sc, mii); 3348 } 3349 3350 3351 static void 3352 nfe_set_wol(struct nfe_softc *sc) 3353 { 3354 struct ifnet *ifp; 3355 uint32_t wolctl; 3356 int pmc; 3357 uint16_t pmstat; 3358 3359 NFE_LOCK_ASSERT(sc); 3360 3361 if (pci_find_extcap(sc->nfe_dev, PCIY_PMG, &pmc) != 0) 3362 return; 3363 ifp = sc->nfe_ifp; 3364 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3365 wolctl = NFE_WOL_MAGIC; 3366 else 3367 wolctl = 0; 3368 NFE_WRITE(sc, NFE_WOL_CTL, wolctl); 3369 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 3370 nfe_set_linkspeed(sc); 3371 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0) 3372 NFE_WRITE(sc, NFE_PWR2_CTL, 3373 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS); 3374 /* Enable RX. */ 3375 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0); 3376 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0); 3377 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) | 3378 NFE_RX_START); 3379 } 3380 /* Request PME if WOL is requested. */ 3381 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2); 3382 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3383 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3384 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3385 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3386 } 3387