1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23 #include <sys/cdefs.h> 24 __FBSDID("$FreeBSD$"); 25 26 #ifdef HAVE_KERNEL_OPTION_HEADERS 27 #include "opt_device_polling.h" 28 #endif 29 30 #include <sys/param.h> 31 #include <sys/endian.h> 32 #include <sys/systm.h> 33 #include <sys/sockio.h> 34 #include <sys/mbuf.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #include <sys/queue.h> 39 #include <sys/socket.h> 40 #include <sys/sysctl.h> 41 #include <sys/taskqueue.h> 42 43 #include <net/if.h> 44 #include <net/if_arp.h> 45 #include <net/ethernet.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/if_types.h> 49 #include <net/if_vlan_var.h> 50 51 #include <net/bpf.h> 52 53 #include <machine/bus.h> 54 #include <machine/resource.h> 55 #include <sys/bus.h> 56 #include <sys/rman.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include <dev/pci/pcireg.h> 62 #include <dev/pci/pcivar.h> 63 64 #include <dev/nfe/if_nfereg.h> 65 #include <dev/nfe/if_nfevar.h> 66 67 MODULE_DEPEND(nfe, pci, 1, 1, 1); 68 MODULE_DEPEND(nfe, ether, 1, 1, 1); 69 MODULE_DEPEND(nfe, miibus, 1, 1, 1); 70 71 /* "device miibus" required. See GENERIC if you get errors here. */ 72 #include "miibus_if.h" 73 74 static int nfe_probe(device_t); 75 static int nfe_attach(device_t); 76 static int nfe_detach(device_t); 77 static int nfe_suspend(device_t); 78 static int nfe_resume(device_t); 79 static int nfe_shutdown(device_t); 80 static void nfe_power(struct nfe_softc *); 81 static int nfe_miibus_readreg(device_t, int, int); 82 static int nfe_miibus_writereg(device_t, int, int, int); 83 static void nfe_miibus_statchg(device_t); 84 static void nfe_link_task(void *, int); 85 static void nfe_set_intr(struct nfe_softc *); 86 static __inline void nfe_enable_intr(struct nfe_softc *); 87 static __inline void nfe_disable_intr(struct nfe_softc *); 88 static int nfe_ioctl(struct ifnet *, u_long, caddr_t); 89 static void nfe_alloc_msix(struct nfe_softc *, int); 90 static int nfe_intr(void *); 91 static void nfe_int_task(void *, int); 92 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); 93 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); 94 static int nfe_newbuf(struct nfe_softc *, int); 95 static int nfe_jnewbuf(struct nfe_softc *, int); 96 static int nfe_rxeof(struct nfe_softc *, int, int *); 97 static int nfe_jrxeof(struct nfe_softc *, int, int *); 98 static void nfe_txeof(struct nfe_softc *); 99 static int nfe_encap(struct nfe_softc *, struct mbuf **); 100 static void nfe_setmulti(struct nfe_softc *); 101 static void nfe_tx_task(void *, int); 102 static void nfe_start(struct ifnet *); 103 static void nfe_watchdog(struct ifnet *); 104 static void nfe_init(void *); 105 static void nfe_init_locked(void *); 106 static void nfe_stop(struct ifnet *); 107 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 108 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 109 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 110 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 111 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 112 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 113 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 114 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 115 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 116 static int nfe_ifmedia_upd(struct ifnet *); 117 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 118 static void nfe_tick(void *); 119 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 120 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); 121 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); 122 123 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 124 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); 125 static void nfe_sysctl_node(struct nfe_softc *); 126 static void nfe_stats_clear(struct nfe_softc *); 127 static void nfe_stats_update(struct nfe_softc *); 128 129 #ifdef NFE_DEBUG 130 static int nfedebug = 0; 131 #define DPRINTF(sc, ...) do { \ 132 if (nfedebug) \ 133 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 134 } while (0) 135 #define DPRINTFN(sc, n, ...) do { \ 136 if (nfedebug >= (n)) \ 137 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 138 } while (0) 139 #else 140 #define DPRINTF(sc, ...) 141 #define DPRINTFN(sc, n, ...) 142 #endif 143 144 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 145 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 146 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 147 148 /* Tunables. */ 149 static int msi_disable = 0; 150 static int msix_disable = 0; 151 static int jumbo_disable = 0; 152 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); 153 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable); 154 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); 155 156 static device_method_t nfe_methods[] = { 157 /* Device interface */ 158 DEVMETHOD(device_probe, nfe_probe), 159 DEVMETHOD(device_attach, nfe_attach), 160 DEVMETHOD(device_detach, nfe_detach), 161 DEVMETHOD(device_suspend, nfe_suspend), 162 DEVMETHOD(device_resume, nfe_resume), 163 DEVMETHOD(device_shutdown, nfe_shutdown), 164 165 /* bus interface */ 166 DEVMETHOD(bus_print_child, bus_generic_print_child), 167 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 168 169 /* MII interface */ 170 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 171 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 172 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 173 174 { NULL, NULL } 175 }; 176 177 static driver_t nfe_driver = { 178 "nfe", 179 nfe_methods, 180 sizeof(struct nfe_softc) 181 }; 182 183 static devclass_t nfe_devclass; 184 185 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 186 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 187 188 static struct nfe_type nfe_devs[] = { 189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 190 "NVIDIA nForce MCP Networking Adapter"}, 191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 192 "NVIDIA nForce2 MCP2 Networking Adapter"}, 193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 194 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 196 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 198 "NVIDIA nForce3 MCP3 Networking Adapter"}, 199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 200 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 202 "NVIDIA nForce3 MCP7 Networking Adapter"}, 203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 204 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 206 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 208 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ 209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 210 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ 211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 212 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 214 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 216 "NVIDIA nForce MCP55 Networking Adapter"}, 217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 218 "NVIDIA nForce MCP55 Networking Adapter"}, 219 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 220 "NVIDIA nForce MCP61 Networking Adapter"}, 221 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 222 "NVIDIA nForce MCP61 Networking Adapter"}, 223 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 224 "NVIDIA nForce MCP61 Networking Adapter"}, 225 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 226 "NVIDIA nForce MCP61 Networking Adapter"}, 227 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 228 "NVIDIA nForce MCP65 Networking Adapter"}, 229 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 230 "NVIDIA nForce MCP65 Networking Adapter"}, 231 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 232 "NVIDIA nForce MCP65 Networking Adapter"}, 233 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 234 "NVIDIA nForce MCP65 Networking Adapter"}, 235 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 236 "NVIDIA nForce MCP67 Networking Adapter"}, 237 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 238 "NVIDIA nForce MCP67 Networking Adapter"}, 239 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 240 "NVIDIA nForce MCP67 Networking Adapter"}, 241 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 242 "NVIDIA nForce MCP67 Networking Adapter"}, 243 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 244 "NVIDIA nForce MCP73 Networking Adapter"}, 245 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 246 "NVIDIA nForce MCP73 Networking Adapter"}, 247 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 248 "NVIDIA nForce MCP73 Networking Adapter"}, 249 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 250 "NVIDIA nForce MCP73 Networking Adapter"}, 251 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 252 "NVIDIA nForce MCP77 Networking Adapter"}, 253 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 254 "NVIDIA nForce MCP77 Networking Adapter"}, 255 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 256 "NVIDIA nForce MCP77 Networking Adapter"}, 257 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 258 "NVIDIA nForce MCP77 Networking Adapter"}, 259 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 260 "NVIDIA nForce MCP79 Networking Adapter"}, 261 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 262 "NVIDIA nForce MCP79 Networking Adapter"}, 263 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 264 "NVIDIA nForce MCP79 Networking Adapter"}, 265 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 266 "NVIDIA nForce MCP79 Networking Adapter"}, 267 {0, 0, NULL} 268 }; 269 270 271 /* Probe for supported hardware ID's */ 272 static int 273 nfe_probe(device_t dev) 274 { 275 struct nfe_type *t; 276 277 t = nfe_devs; 278 /* Check for matching PCI DEVICE ID's */ 279 while (t->name != NULL) { 280 if ((pci_get_vendor(dev) == t->vid_id) && 281 (pci_get_device(dev) == t->dev_id)) { 282 device_set_desc(dev, t->name); 283 return (BUS_PROBE_DEFAULT); 284 } 285 t++; 286 } 287 288 return (ENXIO); 289 } 290 291 static void 292 nfe_alloc_msix(struct nfe_softc *sc, int count) 293 { 294 int rid; 295 296 rid = PCIR_BAR(2); 297 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, 298 &rid, RF_ACTIVE); 299 if (sc->nfe_msix_res == NULL) { 300 device_printf(sc->nfe_dev, 301 "couldn't allocate MSIX table resource\n"); 302 return; 303 } 304 rid = PCIR_BAR(3); 305 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, 306 SYS_RES_MEMORY, &rid, RF_ACTIVE); 307 if (sc->nfe_msix_pba_res == NULL) { 308 device_printf(sc->nfe_dev, 309 "couldn't allocate MSIX PBA resource\n"); 310 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), 311 sc->nfe_msix_res); 312 sc->nfe_msix_res = NULL; 313 return; 314 } 315 316 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { 317 if (count == NFE_MSI_MESSAGES) { 318 if (bootverbose) 319 device_printf(sc->nfe_dev, 320 "Using %d MSIX messages\n", count); 321 sc->nfe_msix = 1; 322 } else { 323 if (bootverbose) 324 device_printf(sc->nfe_dev, 325 "couldn't allocate MSIX\n"); 326 pci_release_msi(sc->nfe_dev); 327 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 328 PCIR_BAR(3), sc->nfe_msix_pba_res); 329 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 330 PCIR_BAR(2), sc->nfe_msix_res); 331 sc->nfe_msix_pba_res = NULL; 332 sc->nfe_msix_res = NULL; 333 } 334 } 335 } 336 337 static int 338 nfe_attach(device_t dev) 339 { 340 struct nfe_softc *sc; 341 struct ifnet *ifp; 342 bus_addr_t dma_addr_max; 343 int error = 0, i, msic, reg, rid; 344 345 sc = device_get_softc(dev); 346 sc->nfe_dev = dev; 347 348 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 349 MTX_DEF); 350 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 351 TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc); 352 353 pci_enable_busmaster(dev); 354 355 rid = PCIR_BAR(0); 356 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 357 RF_ACTIVE); 358 if (sc->nfe_res[0] == NULL) { 359 device_printf(dev, "couldn't map memory resources\n"); 360 mtx_destroy(&sc->nfe_mtx); 361 return (ENXIO); 362 } 363 364 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 365 uint16_t v, width; 366 367 v = pci_read_config(dev, reg + 0x08, 2); 368 /* Change max. read request size to 4096. */ 369 v &= ~(7 << 12); 370 v |= (5 << 12); 371 pci_write_config(dev, reg + 0x08, v, 2); 372 373 v = pci_read_config(dev, reg + 0x0c, 2); 374 /* link capability */ 375 v = (v >> 4) & 0x0f; 376 width = pci_read_config(dev, reg + 0x12, 2); 377 /* negotiated link width */ 378 width = (width >> 4) & 0x3f; 379 if (v != width) 380 device_printf(sc->nfe_dev, 381 "warning, negotiated width of link(x%d) != " 382 "max. width of link(x%d)\n", width, v); 383 } 384 385 /* Allocate interrupt */ 386 if (msix_disable == 0 || msi_disable == 0) { 387 if (msix_disable == 0 && 388 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) 389 nfe_alloc_msix(sc, msic); 390 if (msi_disable == 0 && sc->nfe_msix == 0 && 391 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && 392 pci_alloc_msi(dev, &msic) == 0) { 393 if (msic == NFE_MSI_MESSAGES) { 394 if (bootverbose) 395 device_printf(dev, 396 "Using %d MSI messages\n", msic); 397 sc->nfe_msi = 1; 398 } else 399 pci_release_msi(dev); 400 } 401 } 402 403 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { 404 rid = 0; 405 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 406 RF_SHAREABLE | RF_ACTIVE); 407 if (sc->nfe_irq[0] == NULL) { 408 device_printf(dev, "couldn't allocate IRQ resources\n"); 409 error = ENXIO; 410 goto fail; 411 } 412 } else { 413 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 414 sc->nfe_irq[i] = bus_alloc_resource_any(dev, 415 SYS_RES_IRQ, &rid, RF_ACTIVE); 416 if (sc->nfe_irq[i] == NULL) { 417 device_printf(dev, 418 "couldn't allocate IRQ resources for " 419 "message %d\n", rid); 420 error = ENXIO; 421 goto fail; 422 } 423 } 424 /* Map interrupts to vector 0. */ 425 if (sc->nfe_msix != 0) { 426 NFE_WRITE(sc, NFE_MSIX_MAP0, 0); 427 NFE_WRITE(sc, NFE_MSIX_MAP1, 0); 428 } else if (sc->nfe_msi != 0) { 429 NFE_WRITE(sc, NFE_MSI_MAP0, 0); 430 NFE_WRITE(sc, NFE_MSI_MAP1, 0); 431 } 432 } 433 434 /* Set IRQ status/mask register. */ 435 sc->nfe_irq_status = NFE_IRQ_STATUS; 436 sc->nfe_irq_mask = NFE_IRQ_MASK; 437 sc->nfe_intrs = NFE_IRQ_WANTED; 438 sc->nfe_nointrs = 0; 439 if (sc->nfe_msix != 0) { 440 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; 441 sc->nfe_nointrs = NFE_IRQ_WANTED; 442 } else if (sc->nfe_msi != 0) { 443 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; 444 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; 445 } 446 447 sc->nfe_devid = pci_get_device(dev); 448 sc->nfe_revid = pci_get_revid(dev); 449 sc->nfe_flags = 0; 450 451 switch (sc->nfe_devid) { 452 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 453 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 454 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 455 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 456 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 457 break; 458 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 459 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 460 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1; 461 break; 462 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 463 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 464 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 465 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 466 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 467 NFE_MIB_V1; 468 break; 469 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 470 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 471 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 472 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 473 break; 474 475 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 476 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 477 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 478 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 479 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 480 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 481 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 482 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 483 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 484 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 485 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 486 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 487 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | 488 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; 489 break; 490 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 491 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 492 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 493 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 494 /* XXX flow control */ 495 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | 496 NFE_CORRECT_MACADDR | NFE_MIB_V3; 497 break; 498 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 499 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 500 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 501 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 502 /* XXX flow control */ 503 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 504 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; 505 break; 506 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 507 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 508 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 509 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 510 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 511 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | 512 NFE_MIB_V2; 513 break; 514 } 515 516 nfe_power(sc); 517 /* Check for reversed ethernet address */ 518 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 519 sc->nfe_flags |= NFE_CORRECT_MACADDR; 520 nfe_get_macaddr(sc, sc->eaddr); 521 /* 522 * Allocate the parent bus DMA tag appropriate for PCI. 523 */ 524 dma_addr_max = BUS_SPACE_MAXADDR_32BIT; 525 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) 526 dma_addr_max = NFE_DMA_MAXADDR; 527 error = bus_dma_tag_create( 528 bus_get_dma_tag(sc->nfe_dev), /* parent */ 529 1, 0, /* alignment, boundary */ 530 dma_addr_max, /* lowaddr */ 531 BUS_SPACE_MAXADDR, /* highaddr */ 532 NULL, NULL, /* filter, filterarg */ 533 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 534 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 535 0, /* flags */ 536 NULL, NULL, /* lockfunc, lockarg */ 537 &sc->nfe_parent_tag); 538 if (error) 539 goto fail; 540 541 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); 542 if (ifp == NULL) { 543 device_printf(dev, "can not if_alloc()\n"); 544 error = ENOSPC; 545 goto fail; 546 } 547 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp); 548 549 /* 550 * Allocate Tx and Rx rings. 551 */ 552 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) 553 goto fail; 554 555 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) 556 goto fail; 557 558 nfe_alloc_jrx_ring(sc, &sc->jrxq); 559 /* Create sysctl node. */ 560 nfe_sysctl_node(sc); 561 562 ifp->if_softc = sc; 563 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 564 ifp->if_mtu = ETHERMTU; 565 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 566 ifp->if_ioctl = nfe_ioctl; 567 ifp->if_start = nfe_start; 568 ifp->if_hwassist = 0; 569 ifp->if_capabilities = 0; 570 ifp->if_watchdog = NULL; 571 ifp->if_init = nfe_init; 572 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1); 573 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1; 574 IFQ_SET_READY(&ifp->if_snd); 575 576 if (sc->nfe_flags & NFE_HW_CSUM) { 577 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; 578 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO; 579 } 580 ifp->if_capenable = ifp->if_capabilities; 581 582 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 583 /* VLAN capability setup. */ 584 ifp->if_capabilities |= IFCAP_VLAN_MTU; 585 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { 586 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 587 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0) 588 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 589 } 590 ifp->if_capenable = ifp->if_capabilities; 591 592 /* 593 * Tell the upper layer(s) we support long frames. 594 * Must appear after the call to ether_ifattach() because 595 * ether_ifattach() sets ifi_hdrlen to the default value. 596 */ 597 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 598 599 #ifdef DEVICE_POLLING 600 ifp->if_capabilities |= IFCAP_POLLING; 601 #endif 602 603 /* Do MII setup */ 604 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, 605 nfe_ifmedia_sts)) { 606 device_printf(dev, "MII without any phy!\n"); 607 error = ENXIO; 608 goto fail; 609 } 610 ether_ifattach(ifp, sc->eaddr); 611 612 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); 613 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, 614 taskqueue_thread_enqueue, &sc->nfe_tq); 615 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", 616 device_get_nameunit(sc->nfe_dev)); 617 error = 0; 618 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 619 error = bus_setup_intr(dev, sc->nfe_irq[0], 620 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 621 &sc->nfe_intrhand[0]); 622 } else { 623 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 624 error = bus_setup_intr(dev, sc->nfe_irq[i], 625 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 626 &sc->nfe_intrhand[i]); 627 if (error != 0) 628 break; 629 } 630 } 631 if (error) { 632 device_printf(dev, "couldn't set up irq\n"); 633 taskqueue_free(sc->nfe_tq); 634 sc->nfe_tq = NULL; 635 ether_ifdetach(ifp); 636 goto fail; 637 } 638 639 fail: 640 if (error) 641 nfe_detach(dev); 642 643 return (error); 644 } 645 646 647 static int 648 nfe_detach(device_t dev) 649 { 650 struct nfe_softc *sc; 651 struct ifnet *ifp; 652 uint8_t eaddr[ETHER_ADDR_LEN]; 653 int i, rid; 654 655 sc = device_get_softc(dev); 656 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 657 ifp = sc->nfe_ifp; 658 659 #ifdef DEVICE_POLLING 660 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 661 ether_poll_deregister(ifp); 662 #endif 663 if (device_is_attached(dev)) { 664 NFE_LOCK(sc); 665 nfe_stop(ifp); 666 ifp->if_flags &= ~IFF_UP; 667 NFE_UNLOCK(sc); 668 callout_drain(&sc->nfe_stat_ch); 669 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task); 670 taskqueue_drain(taskqueue_swi, &sc->nfe_link_task); 671 ether_ifdetach(ifp); 672 } 673 674 if (ifp) { 675 /* restore ethernet address */ 676 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 677 for (i = 0; i < ETHER_ADDR_LEN; i++) { 678 eaddr[i] = sc->eaddr[5 - i]; 679 } 680 } else 681 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); 682 nfe_set_macaddr(sc, eaddr); 683 if_free(ifp); 684 } 685 if (sc->nfe_miibus) 686 device_delete_child(dev, sc->nfe_miibus); 687 bus_generic_detach(dev); 688 if (sc->nfe_tq != NULL) { 689 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); 690 taskqueue_free(sc->nfe_tq); 691 sc->nfe_tq = NULL; 692 } 693 694 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 695 if (sc->nfe_intrhand[i] != NULL) { 696 bus_teardown_intr(dev, sc->nfe_irq[i], 697 sc->nfe_intrhand[i]); 698 sc->nfe_intrhand[i] = NULL; 699 } 700 } 701 702 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 703 if (sc->nfe_irq[0] != NULL) 704 bus_release_resource(dev, SYS_RES_IRQ, 0, 705 sc->nfe_irq[0]); 706 } else { 707 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 708 if (sc->nfe_irq[i] != NULL) { 709 bus_release_resource(dev, SYS_RES_IRQ, rid, 710 sc->nfe_irq[i]); 711 sc->nfe_irq[i] = NULL; 712 } 713 } 714 pci_release_msi(dev); 715 } 716 if (sc->nfe_msix_pba_res != NULL) { 717 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), 718 sc->nfe_msix_pba_res); 719 sc->nfe_msix_pba_res = NULL; 720 } 721 if (sc->nfe_msix_res != NULL) { 722 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 723 sc->nfe_msix_res); 724 sc->nfe_msix_res = NULL; 725 } 726 if (sc->nfe_res[0] != NULL) { 727 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 728 sc->nfe_res[0]); 729 sc->nfe_res[0] = NULL; 730 } 731 732 nfe_free_tx_ring(sc, &sc->txq); 733 nfe_free_rx_ring(sc, &sc->rxq); 734 nfe_free_jrx_ring(sc, &sc->jrxq); 735 736 if (sc->nfe_parent_tag) { 737 bus_dma_tag_destroy(sc->nfe_parent_tag); 738 sc->nfe_parent_tag = NULL; 739 } 740 741 mtx_destroy(&sc->nfe_mtx); 742 743 return (0); 744 } 745 746 747 static int 748 nfe_suspend(device_t dev) 749 { 750 struct nfe_softc *sc; 751 752 sc = device_get_softc(dev); 753 754 NFE_LOCK(sc); 755 nfe_stop(sc->nfe_ifp); 756 sc->nfe_suspended = 1; 757 NFE_UNLOCK(sc); 758 759 return (0); 760 } 761 762 763 static int 764 nfe_resume(device_t dev) 765 { 766 struct nfe_softc *sc; 767 struct ifnet *ifp; 768 769 sc = device_get_softc(dev); 770 771 NFE_LOCK(sc); 772 ifp = sc->nfe_ifp; 773 if (ifp->if_flags & IFF_UP) 774 nfe_init_locked(sc); 775 sc->nfe_suspended = 0; 776 NFE_UNLOCK(sc); 777 778 return (0); 779 } 780 781 782 /* Take PHY/NIC out of powerdown, from Linux */ 783 static void 784 nfe_power(struct nfe_softc *sc) 785 { 786 uint32_t pwr; 787 788 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) 789 return; 790 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 791 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 792 DELAY(100); 793 NFE_WRITE(sc, NFE_MAC_RESET, 0); 794 DELAY(100); 795 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 796 pwr = NFE_READ(sc, NFE_PWR2_CTL); 797 pwr &= ~NFE_PWR2_WAKEUP_MASK; 798 if (sc->nfe_revid >= 0xa3 && 799 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || 800 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) 801 pwr |= NFE_PWR2_REVA3; 802 NFE_WRITE(sc, NFE_PWR2_CTL, pwr); 803 } 804 805 806 static void 807 nfe_miibus_statchg(device_t dev) 808 { 809 struct nfe_softc *sc; 810 811 sc = device_get_softc(dev); 812 taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task); 813 } 814 815 816 static void 817 nfe_link_task(void *arg, int pending) 818 { 819 struct nfe_softc *sc; 820 struct mii_data *mii; 821 struct ifnet *ifp; 822 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 823 uint32_t gmask, rxctl, txctl, val; 824 825 sc = (struct nfe_softc *)arg; 826 827 NFE_LOCK(sc); 828 829 mii = device_get_softc(sc->nfe_miibus); 830 ifp = sc->nfe_ifp; 831 if (mii == NULL || ifp == NULL) { 832 NFE_UNLOCK(sc); 833 return; 834 } 835 836 if (mii->mii_media_status & IFM_ACTIVE) { 837 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 838 sc->nfe_link = 1; 839 } else 840 sc->nfe_link = 0; 841 842 phy = NFE_READ(sc, NFE_PHY_IFACE); 843 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 844 845 seed = NFE_READ(sc, NFE_RNDSEED); 846 seed &= ~NFE_SEED_MASK; 847 848 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) { 849 phy |= NFE_PHY_HDX; /* half-duplex */ 850 misc |= NFE_MISC1_HDX; 851 } 852 853 switch (IFM_SUBTYPE(mii->mii_media_active)) { 854 case IFM_1000_T: /* full-duplex only */ 855 link |= NFE_MEDIA_1000T; 856 seed |= NFE_SEED_1000T; 857 phy |= NFE_PHY_1000T; 858 break; 859 case IFM_100_TX: 860 link |= NFE_MEDIA_100TX; 861 seed |= NFE_SEED_100TX; 862 phy |= NFE_PHY_100TX; 863 break; 864 case IFM_10_T: 865 link |= NFE_MEDIA_10T; 866 seed |= NFE_SEED_10T; 867 break; 868 } 869 870 if ((phy & 0x10000000) != 0) { 871 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 872 val = NFE_R1_MAGIC_1000; 873 else 874 val = NFE_R1_MAGIC_10_100; 875 } else 876 val = NFE_R1_MAGIC_DEFAULT; 877 NFE_WRITE(sc, NFE_SETUP_R1, val); 878 879 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 880 881 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 882 NFE_WRITE(sc, NFE_MISC1, misc); 883 NFE_WRITE(sc, NFE_LINKSPEED, link); 884 885 gmask = mii->mii_media_active & IFM_GMASK; 886 if ((gmask & IFM_FDX) != 0) { 887 /* It seems all hardwares supports Rx pause frames. */ 888 val = NFE_READ(sc, NFE_RXFILTER); 889 if ((gmask & IFM_FLAG0) != 0) 890 val |= NFE_PFF_RX_PAUSE; 891 else 892 val &= ~NFE_PFF_RX_PAUSE; 893 NFE_WRITE(sc, NFE_RXFILTER, val); 894 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 895 val = NFE_READ(sc, NFE_MISC1); 896 if ((gmask & IFM_FLAG1) != 0) { 897 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 898 NFE_TX_PAUSE_FRAME_ENABLE); 899 val |= NFE_MISC1_TX_PAUSE; 900 } else { 901 val &= ~NFE_MISC1_TX_PAUSE; 902 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 903 NFE_TX_PAUSE_FRAME_DISABLE); 904 } 905 NFE_WRITE(sc, NFE_MISC1, val); 906 } 907 } else { 908 /* disable rx/tx pause frames */ 909 val = NFE_READ(sc, NFE_RXFILTER); 910 val &= ~NFE_PFF_RX_PAUSE; 911 NFE_WRITE(sc, NFE_RXFILTER, val); 912 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 913 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 914 NFE_TX_PAUSE_FRAME_DISABLE); 915 val = NFE_READ(sc, NFE_MISC1); 916 val &= ~NFE_MISC1_TX_PAUSE; 917 NFE_WRITE(sc, NFE_MISC1, val); 918 } 919 } 920 921 txctl = NFE_READ(sc, NFE_TX_CTL); 922 rxctl = NFE_READ(sc, NFE_RX_CTL); 923 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 924 txctl |= NFE_TX_START; 925 rxctl |= NFE_RX_START; 926 } else { 927 txctl &= ~NFE_TX_START; 928 rxctl &= ~NFE_RX_START; 929 } 930 NFE_WRITE(sc, NFE_TX_CTL, txctl); 931 NFE_WRITE(sc, NFE_RX_CTL, rxctl); 932 933 NFE_UNLOCK(sc); 934 } 935 936 937 static int 938 nfe_miibus_readreg(device_t dev, int phy, int reg) 939 { 940 struct nfe_softc *sc = device_get_softc(dev); 941 uint32_t val; 942 int ntries; 943 944 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 945 946 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 947 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 948 DELAY(100); 949 } 950 951 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 952 953 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 954 DELAY(100); 955 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 956 break; 957 } 958 if (ntries == NFE_TIMEOUT) { 959 DPRINTFN(sc, 2, "timeout waiting for PHY\n"); 960 return 0; 961 } 962 963 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 964 DPRINTFN(sc, 2, "could not read PHY\n"); 965 return 0; 966 } 967 968 val = NFE_READ(sc, NFE_PHY_DATA); 969 if (val != 0xffffffff && val != 0) 970 sc->mii_phyaddr = phy; 971 972 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 973 974 return (val); 975 } 976 977 978 static int 979 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 980 { 981 struct nfe_softc *sc = device_get_softc(dev); 982 uint32_t ctl; 983 int ntries; 984 985 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 986 987 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 988 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 989 DELAY(100); 990 } 991 992 NFE_WRITE(sc, NFE_PHY_DATA, val); 993 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 994 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 995 996 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 997 DELAY(100); 998 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 999 break; 1000 } 1001 #ifdef NFE_DEBUG 1002 if (nfedebug >= 2 && ntries == NFE_TIMEOUT) 1003 device_printf(sc->nfe_dev, "could not write to PHY\n"); 1004 #endif 1005 return (0); 1006 } 1007 1008 struct nfe_dmamap_arg { 1009 bus_addr_t nfe_busaddr; 1010 }; 1011 1012 static int 1013 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1014 { 1015 struct nfe_dmamap_arg ctx; 1016 struct nfe_rx_data *data; 1017 void *desc; 1018 int i, error, descsize; 1019 1020 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1021 desc = ring->desc64; 1022 descsize = sizeof (struct nfe_desc64); 1023 } else { 1024 desc = ring->desc32; 1025 descsize = sizeof (struct nfe_desc32); 1026 } 1027 1028 ring->cur = ring->next = 0; 1029 1030 error = bus_dma_tag_create(sc->nfe_parent_tag, 1031 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1032 BUS_SPACE_MAXADDR, /* lowaddr */ 1033 BUS_SPACE_MAXADDR, /* highaddr */ 1034 NULL, NULL, /* filter, filterarg */ 1035 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1036 NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 1037 0, /* flags */ 1038 NULL, NULL, /* lockfunc, lockarg */ 1039 &ring->rx_desc_tag); 1040 if (error != 0) { 1041 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1042 goto fail; 1043 } 1044 1045 /* allocate memory to desc */ 1046 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | 1047 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); 1048 if (error != 0) { 1049 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1050 goto fail; 1051 } 1052 if (sc->nfe_flags & NFE_40BIT_ADDR) 1053 ring->desc64 = desc; 1054 else 1055 ring->desc32 = desc; 1056 1057 /* map desc to device visible address space */ 1058 ctx.nfe_busaddr = 0; 1059 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, 1060 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1061 if (error != 0) { 1062 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1063 goto fail; 1064 } 1065 ring->physaddr = ctx.nfe_busaddr; 1066 1067 error = bus_dma_tag_create(sc->nfe_parent_tag, 1068 1, 0, /* alignment, boundary */ 1069 BUS_SPACE_MAXADDR, /* lowaddr */ 1070 BUS_SPACE_MAXADDR, /* highaddr */ 1071 NULL, NULL, /* filter, filterarg */ 1072 MCLBYTES, 1, /* maxsize, nsegments */ 1073 MCLBYTES, /* maxsegsize */ 1074 0, /* flags */ 1075 NULL, NULL, /* lockfunc, lockarg */ 1076 &ring->rx_data_tag); 1077 if (error != 0) { 1078 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); 1079 goto fail; 1080 } 1081 1082 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); 1083 if (error != 0) { 1084 device_printf(sc->nfe_dev, 1085 "could not create Rx DMA spare map\n"); 1086 goto fail; 1087 } 1088 1089 /* 1090 * Pre-allocate Rx buffers and populate Rx ring. 1091 */ 1092 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1093 data = &sc->rxq.data[i]; 1094 data->rx_data_map = NULL; 1095 data->m = NULL; 1096 error = bus_dmamap_create(ring->rx_data_tag, 0, 1097 &data->rx_data_map); 1098 if (error != 0) { 1099 device_printf(sc->nfe_dev, 1100 "could not create Rx DMA map\n"); 1101 goto fail; 1102 } 1103 } 1104 1105 fail: 1106 return (error); 1107 } 1108 1109 1110 static void 1111 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1112 { 1113 struct nfe_dmamap_arg ctx; 1114 struct nfe_rx_data *data; 1115 void *desc; 1116 int i, error, descsize; 1117 1118 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1119 return; 1120 if (jumbo_disable != 0) { 1121 device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); 1122 sc->nfe_jumbo_disable = 1; 1123 return; 1124 } 1125 1126 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1127 desc = ring->jdesc64; 1128 descsize = sizeof (struct nfe_desc64); 1129 } else { 1130 desc = ring->jdesc32; 1131 descsize = sizeof (struct nfe_desc32); 1132 } 1133 1134 ring->jcur = ring->jnext = 0; 1135 1136 /* Create DMA tag for jumbo Rx ring. */ 1137 error = bus_dma_tag_create(sc->nfe_parent_tag, 1138 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1139 BUS_SPACE_MAXADDR, /* lowaddr */ 1140 BUS_SPACE_MAXADDR, /* highaddr */ 1141 NULL, NULL, /* filter, filterarg */ 1142 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1143 1, /* nsegments */ 1144 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 1145 0, /* flags */ 1146 NULL, NULL, /* lockfunc, lockarg */ 1147 &ring->jrx_desc_tag); 1148 if (error != 0) { 1149 device_printf(sc->nfe_dev, 1150 "could not create jumbo ring DMA tag\n"); 1151 goto fail; 1152 } 1153 1154 /* Create DMA tag for jumbo Rx buffers. */ 1155 error = bus_dma_tag_create(sc->nfe_parent_tag, 1156 1, 0, /* alignment, boundary */ 1157 BUS_SPACE_MAXADDR, /* lowaddr */ 1158 BUS_SPACE_MAXADDR, /* highaddr */ 1159 NULL, NULL, /* filter, filterarg */ 1160 MJUM9BYTES, /* maxsize */ 1161 1, /* nsegments */ 1162 MJUM9BYTES, /* maxsegsize */ 1163 0, /* flags */ 1164 NULL, NULL, /* lockfunc, lockarg */ 1165 &ring->jrx_data_tag); 1166 if (error != 0) { 1167 device_printf(sc->nfe_dev, 1168 "could not create jumbo Rx buffer DMA tag\n"); 1169 goto fail; 1170 } 1171 1172 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1173 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | 1174 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); 1175 if (error != 0) { 1176 device_printf(sc->nfe_dev, 1177 "could not allocate DMA'able memory for jumbo Rx ring\n"); 1178 goto fail; 1179 } 1180 if (sc->nfe_flags & NFE_40BIT_ADDR) 1181 ring->jdesc64 = desc; 1182 else 1183 ring->jdesc32 = desc; 1184 1185 ctx.nfe_busaddr = 0; 1186 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, 1187 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1188 if (error != 0) { 1189 device_printf(sc->nfe_dev, 1190 "could not load DMA'able memory for jumbo Rx ring\n"); 1191 goto fail; 1192 } 1193 ring->jphysaddr = ctx.nfe_busaddr; 1194 1195 /* Create DMA maps for jumbo Rx buffers. */ 1196 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); 1197 if (error != 0) { 1198 device_printf(sc->nfe_dev, 1199 "could not create jumbo Rx DMA spare map\n"); 1200 goto fail; 1201 } 1202 1203 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1204 data = &sc->jrxq.jdata[i]; 1205 data->rx_data_map = NULL; 1206 data->m = NULL; 1207 error = bus_dmamap_create(ring->jrx_data_tag, 0, 1208 &data->rx_data_map); 1209 if (error != 0) { 1210 device_printf(sc->nfe_dev, 1211 "could not create jumbo Rx DMA map\n"); 1212 goto fail; 1213 } 1214 } 1215 1216 return; 1217 1218 fail: 1219 /* 1220 * Running without jumbo frame support is ok for most cases 1221 * so don't fail on creating dma tag/map for jumbo frame. 1222 */ 1223 nfe_free_jrx_ring(sc, ring); 1224 device_printf(sc->nfe_dev, "disabling jumbo frame support due to " 1225 "resource shortage\n"); 1226 sc->nfe_jumbo_disable = 1; 1227 } 1228 1229 1230 static int 1231 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1232 { 1233 void *desc; 1234 size_t descsize; 1235 int i; 1236 1237 ring->cur = ring->next = 0; 1238 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1239 desc = ring->desc64; 1240 descsize = sizeof (struct nfe_desc64); 1241 } else { 1242 desc = ring->desc32; 1243 descsize = sizeof (struct nfe_desc32); 1244 } 1245 bzero(desc, descsize * NFE_RX_RING_COUNT); 1246 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1247 if (nfe_newbuf(sc, i) != 0) 1248 return (ENOBUFS); 1249 } 1250 1251 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 1252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1253 1254 return (0); 1255 } 1256 1257 1258 static int 1259 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1260 { 1261 void *desc; 1262 size_t descsize; 1263 int i; 1264 1265 ring->jcur = ring->jnext = 0; 1266 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1267 desc = ring->jdesc64; 1268 descsize = sizeof (struct nfe_desc64); 1269 } else { 1270 desc = ring->jdesc32; 1271 descsize = sizeof (struct nfe_desc32); 1272 } 1273 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); 1274 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1275 if (nfe_jnewbuf(sc, i) != 0) 1276 return (ENOBUFS); 1277 } 1278 1279 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, 1280 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1281 1282 return (0); 1283 } 1284 1285 1286 static void 1287 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1288 { 1289 struct nfe_rx_data *data; 1290 void *desc; 1291 int i, descsize; 1292 1293 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1294 desc = ring->desc64; 1295 descsize = sizeof (struct nfe_desc64); 1296 } else { 1297 desc = ring->desc32; 1298 descsize = sizeof (struct nfe_desc32); 1299 } 1300 1301 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1302 data = &ring->data[i]; 1303 if (data->rx_data_map != NULL) { 1304 bus_dmamap_destroy(ring->rx_data_tag, 1305 data->rx_data_map); 1306 data->rx_data_map = NULL; 1307 } 1308 if (data->m != NULL) { 1309 m_freem(data->m); 1310 data->m = NULL; 1311 } 1312 } 1313 if (ring->rx_data_tag != NULL) { 1314 if (ring->rx_spare_map != NULL) { 1315 bus_dmamap_destroy(ring->rx_data_tag, 1316 ring->rx_spare_map); 1317 ring->rx_spare_map = NULL; 1318 } 1319 bus_dma_tag_destroy(ring->rx_data_tag); 1320 ring->rx_data_tag = NULL; 1321 } 1322 1323 if (desc != NULL) { 1324 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 1325 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 1326 ring->desc64 = NULL; 1327 ring->desc32 = NULL; 1328 ring->rx_desc_map = NULL; 1329 } 1330 if (ring->rx_desc_tag != NULL) { 1331 bus_dma_tag_destroy(ring->rx_desc_tag); 1332 ring->rx_desc_tag = NULL; 1333 } 1334 } 1335 1336 1337 static void 1338 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1339 { 1340 struct nfe_rx_data *data; 1341 void *desc; 1342 int i, descsize; 1343 1344 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1345 return; 1346 1347 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1348 desc = ring->jdesc64; 1349 descsize = sizeof (struct nfe_desc64); 1350 } else { 1351 desc = ring->jdesc32; 1352 descsize = sizeof (struct nfe_desc32); 1353 } 1354 1355 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1356 data = &ring->jdata[i]; 1357 if (data->rx_data_map != NULL) { 1358 bus_dmamap_destroy(ring->jrx_data_tag, 1359 data->rx_data_map); 1360 data->rx_data_map = NULL; 1361 } 1362 if (data->m != NULL) { 1363 m_freem(data->m); 1364 data->m = NULL; 1365 } 1366 } 1367 if (ring->jrx_data_tag != NULL) { 1368 if (ring->jrx_spare_map != NULL) { 1369 bus_dmamap_destroy(ring->jrx_data_tag, 1370 ring->jrx_spare_map); 1371 ring->jrx_spare_map = NULL; 1372 } 1373 bus_dma_tag_destroy(ring->jrx_data_tag); 1374 ring->jrx_data_tag = NULL; 1375 } 1376 1377 if (desc != NULL) { 1378 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); 1379 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); 1380 ring->jdesc64 = NULL; 1381 ring->jdesc32 = NULL; 1382 ring->jrx_desc_map = NULL; 1383 } 1384 1385 if (ring->jrx_desc_tag != NULL) { 1386 bus_dma_tag_destroy(ring->jrx_desc_tag); 1387 ring->jrx_desc_tag = NULL; 1388 } 1389 } 1390 1391 1392 static int 1393 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1394 { 1395 struct nfe_dmamap_arg ctx; 1396 int i, error; 1397 void *desc; 1398 int descsize; 1399 1400 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1401 desc = ring->desc64; 1402 descsize = sizeof (struct nfe_desc64); 1403 } else { 1404 desc = ring->desc32; 1405 descsize = sizeof (struct nfe_desc32); 1406 } 1407 1408 ring->queued = 0; 1409 ring->cur = ring->next = 0; 1410 1411 error = bus_dma_tag_create(sc->nfe_parent_tag, 1412 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1413 BUS_SPACE_MAXADDR, /* lowaddr */ 1414 BUS_SPACE_MAXADDR, /* highaddr */ 1415 NULL, NULL, /* filter, filterarg */ 1416 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1417 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 1418 0, /* flags */ 1419 NULL, NULL, /* lockfunc, lockarg */ 1420 &ring->tx_desc_tag); 1421 if (error != 0) { 1422 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1423 goto fail; 1424 } 1425 1426 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | 1427 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); 1428 if (error != 0) { 1429 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1430 goto fail; 1431 } 1432 if (sc->nfe_flags & NFE_40BIT_ADDR) 1433 ring->desc64 = desc; 1434 else 1435 ring->desc32 = desc; 1436 1437 ctx.nfe_busaddr = 0; 1438 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, 1439 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1440 if (error != 0) { 1441 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1442 goto fail; 1443 } 1444 ring->physaddr = ctx.nfe_busaddr; 1445 1446 error = bus_dma_tag_create(sc->nfe_parent_tag, 1447 1, 0, 1448 BUS_SPACE_MAXADDR, 1449 BUS_SPACE_MAXADDR, 1450 NULL, NULL, 1451 NFE_TSO_MAXSIZE, 1452 NFE_MAX_SCATTER, 1453 NFE_TSO_MAXSGSIZE, 1454 0, 1455 NULL, NULL, 1456 &ring->tx_data_tag); 1457 if (error != 0) { 1458 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); 1459 goto fail; 1460 } 1461 1462 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1463 error = bus_dmamap_create(ring->tx_data_tag, 0, 1464 &ring->data[i].tx_data_map); 1465 if (error != 0) { 1466 device_printf(sc->nfe_dev, 1467 "could not create Tx DMA map\n"); 1468 goto fail; 1469 } 1470 } 1471 1472 fail: 1473 return (error); 1474 } 1475 1476 1477 static void 1478 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1479 { 1480 void *desc; 1481 size_t descsize; 1482 1483 sc->nfe_force_tx = 0; 1484 ring->queued = 0; 1485 ring->cur = ring->next = 0; 1486 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1487 desc = ring->desc64; 1488 descsize = sizeof (struct nfe_desc64); 1489 } else { 1490 desc = ring->desc32; 1491 descsize = sizeof (struct nfe_desc32); 1492 } 1493 bzero(desc, descsize * NFE_TX_RING_COUNT); 1494 1495 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1496 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1497 } 1498 1499 1500 static void 1501 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1502 { 1503 struct nfe_tx_data *data; 1504 void *desc; 1505 int i, descsize; 1506 1507 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1508 desc = ring->desc64; 1509 descsize = sizeof (struct nfe_desc64); 1510 } else { 1511 desc = ring->desc32; 1512 descsize = sizeof (struct nfe_desc32); 1513 } 1514 1515 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1516 data = &ring->data[i]; 1517 1518 if (data->m != NULL) { 1519 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, 1520 BUS_DMASYNC_POSTWRITE); 1521 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); 1522 m_freem(data->m); 1523 data->m = NULL; 1524 } 1525 if (data->tx_data_map != NULL) { 1526 bus_dmamap_destroy(ring->tx_data_tag, 1527 data->tx_data_map); 1528 data->tx_data_map = NULL; 1529 } 1530 } 1531 1532 if (ring->tx_data_tag != NULL) { 1533 bus_dma_tag_destroy(ring->tx_data_tag); 1534 ring->tx_data_tag = NULL; 1535 } 1536 1537 if (desc != NULL) { 1538 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1539 BUS_DMASYNC_POSTWRITE); 1540 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 1541 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 1542 ring->desc64 = NULL; 1543 ring->desc32 = NULL; 1544 ring->tx_desc_map = NULL; 1545 bus_dma_tag_destroy(ring->tx_desc_tag); 1546 ring->tx_desc_tag = NULL; 1547 } 1548 } 1549 1550 #ifdef DEVICE_POLLING 1551 static poll_handler_t nfe_poll; 1552 1553 1554 static int 1555 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1556 { 1557 struct nfe_softc *sc = ifp->if_softc; 1558 uint32_t r; 1559 int rx_npkts = 0; 1560 1561 NFE_LOCK(sc); 1562 1563 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1564 NFE_UNLOCK(sc); 1565 return (rx_npkts); 1566 } 1567 1568 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1569 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts); 1570 else 1571 rx_npkts = nfe_rxeof(sc, count, &rx_npkts); 1572 nfe_txeof(sc); 1573 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1574 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1575 1576 if (cmd == POLL_AND_CHECK_STATUS) { 1577 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1578 NFE_UNLOCK(sc); 1579 return (rx_npkts); 1580 } 1581 NFE_WRITE(sc, sc->nfe_irq_status, r); 1582 1583 if (r & NFE_IRQ_LINK) { 1584 NFE_READ(sc, NFE_PHY_STATUS); 1585 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1586 DPRINTF(sc, "link state changed\n"); 1587 } 1588 } 1589 NFE_UNLOCK(sc); 1590 return (rx_npkts); 1591 } 1592 #endif /* DEVICE_POLLING */ 1593 1594 static void 1595 nfe_set_intr(struct nfe_softc *sc) 1596 { 1597 1598 if (sc->nfe_msi != 0) 1599 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1600 } 1601 1602 1603 /* In MSIX, a write to mask reegisters behaves as XOR. */ 1604 static __inline void 1605 nfe_enable_intr(struct nfe_softc *sc) 1606 { 1607 1608 if (sc->nfe_msix != 0) { 1609 /* XXX Should have a better way to enable interrupts! */ 1610 if (NFE_READ(sc, sc->nfe_irq_mask) == 0) 1611 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1612 } else 1613 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1614 } 1615 1616 1617 static __inline void 1618 nfe_disable_intr(struct nfe_softc *sc) 1619 { 1620 1621 if (sc->nfe_msix != 0) { 1622 /* XXX Should have a better way to disable interrupts! */ 1623 if (NFE_READ(sc, sc->nfe_irq_mask) != 0) 1624 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1625 } else 1626 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1627 } 1628 1629 1630 static int 1631 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1632 { 1633 struct nfe_softc *sc; 1634 struct ifreq *ifr; 1635 struct mii_data *mii; 1636 int error, init, mask; 1637 1638 sc = ifp->if_softc; 1639 ifr = (struct ifreq *) data; 1640 error = 0; 1641 init = 0; 1642 switch (cmd) { 1643 case SIOCSIFMTU: 1644 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) 1645 error = EINVAL; 1646 else if (ifp->if_mtu != ifr->ifr_mtu) { 1647 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || 1648 (sc->nfe_jumbo_disable != 0)) && 1649 ifr->ifr_mtu > ETHERMTU) 1650 error = EINVAL; 1651 else { 1652 NFE_LOCK(sc); 1653 ifp->if_mtu = ifr->ifr_mtu; 1654 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1655 nfe_init_locked(sc); 1656 NFE_UNLOCK(sc); 1657 } 1658 } 1659 break; 1660 case SIOCSIFFLAGS: 1661 NFE_LOCK(sc); 1662 if (ifp->if_flags & IFF_UP) { 1663 /* 1664 * If only the PROMISC or ALLMULTI flag changes, then 1665 * don't do a full re-init of the chip, just update 1666 * the Rx filter. 1667 */ 1668 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && 1669 ((ifp->if_flags ^ sc->nfe_if_flags) & 1670 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1671 nfe_setmulti(sc); 1672 else 1673 nfe_init_locked(sc); 1674 } else { 1675 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1676 nfe_stop(ifp); 1677 } 1678 sc->nfe_if_flags = ifp->if_flags; 1679 NFE_UNLOCK(sc); 1680 error = 0; 1681 break; 1682 case SIOCADDMULTI: 1683 case SIOCDELMULTI: 1684 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1685 NFE_LOCK(sc); 1686 nfe_setmulti(sc); 1687 NFE_UNLOCK(sc); 1688 error = 0; 1689 } 1690 break; 1691 case SIOCSIFMEDIA: 1692 case SIOCGIFMEDIA: 1693 mii = device_get_softc(sc->nfe_miibus); 1694 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1695 break; 1696 case SIOCSIFCAP: 1697 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1698 #ifdef DEVICE_POLLING 1699 if ((mask & IFCAP_POLLING) != 0) { 1700 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1701 error = ether_poll_register(nfe_poll, ifp); 1702 if (error) 1703 break; 1704 NFE_LOCK(sc); 1705 nfe_disable_intr(sc); 1706 ifp->if_capenable |= IFCAP_POLLING; 1707 NFE_UNLOCK(sc); 1708 } else { 1709 error = ether_poll_deregister(ifp); 1710 /* Enable interrupt even in error case */ 1711 NFE_LOCK(sc); 1712 nfe_enable_intr(sc); 1713 ifp->if_capenable &= ~IFCAP_POLLING; 1714 NFE_UNLOCK(sc); 1715 } 1716 } 1717 #endif /* DEVICE_POLLING */ 1718 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1719 (mask & IFCAP_HWCSUM) != 0) { 1720 ifp->if_capenable ^= IFCAP_HWCSUM; 1721 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 1722 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 1723 ifp->if_hwassist |= NFE_CSUM_FEATURES; 1724 else 1725 ifp->if_hwassist &= ~NFE_CSUM_FEATURES; 1726 init++; 1727 } 1728 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 && 1729 (mask & IFCAP_VLAN_HWTAGGING) != 0) { 1730 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1731 init++; 1732 } 1733 /* 1734 * XXX 1735 * It seems that VLAN stripping requires Rx checksum offload. 1736 * Unfortunately FreeBSD has no way to disable only Rx side 1737 * VLAN stripping. So when we know Rx checksum offload is 1738 * disabled turn entire hardware VLAN assist off. 1739 */ 1740 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) == 1741 (NFE_HW_CSUM | NFE_HW_VLAN)) { 1742 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 1743 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING; 1744 } 1745 1746 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1747 (mask & IFCAP_TSO4) != 0) { 1748 ifp->if_capenable ^= IFCAP_TSO4; 1749 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 1750 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 1751 ifp->if_hwassist |= CSUM_TSO; 1752 else 1753 ifp->if_hwassist &= ~CSUM_TSO; 1754 } 1755 1756 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1757 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1758 nfe_init(sc); 1759 } 1760 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) 1761 VLAN_CAPABILITIES(ifp); 1762 break; 1763 default: 1764 error = ether_ioctl(ifp, cmd, data); 1765 break; 1766 } 1767 1768 return (error); 1769 } 1770 1771 1772 static int 1773 nfe_intr(void *arg) 1774 { 1775 struct nfe_softc *sc; 1776 uint32_t status; 1777 1778 sc = (struct nfe_softc *)arg; 1779 1780 status = NFE_READ(sc, sc->nfe_irq_status); 1781 if (status == 0 || status == 0xffffffff) 1782 return (FILTER_STRAY); 1783 nfe_disable_intr(sc); 1784 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1785 1786 return (FILTER_HANDLED); 1787 } 1788 1789 1790 static void 1791 nfe_int_task(void *arg, int pending) 1792 { 1793 struct nfe_softc *sc = arg; 1794 struct ifnet *ifp = sc->nfe_ifp; 1795 uint32_t r; 1796 int domore; 1797 1798 NFE_LOCK(sc); 1799 1800 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1801 nfe_enable_intr(sc); 1802 NFE_UNLOCK(sc); 1803 return; /* not for us */ 1804 } 1805 NFE_WRITE(sc, sc->nfe_irq_status, r); 1806 1807 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); 1808 1809 #ifdef DEVICE_POLLING 1810 if (ifp->if_capenable & IFCAP_POLLING) { 1811 NFE_UNLOCK(sc); 1812 return; 1813 } 1814 #endif 1815 1816 if (r & NFE_IRQ_LINK) { 1817 NFE_READ(sc, NFE_PHY_STATUS); 1818 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1819 DPRINTF(sc, "link state changed\n"); 1820 } 1821 1822 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1823 NFE_UNLOCK(sc); 1824 nfe_enable_intr(sc); 1825 return; 1826 } 1827 1828 domore = 0; 1829 /* check Rx ring */ 1830 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1831 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL); 1832 else 1833 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL); 1834 /* check Tx ring */ 1835 nfe_txeof(sc); 1836 1837 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1838 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1839 1840 NFE_UNLOCK(sc); 1841 1842 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { 1843 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1844 return; 1845 } 1846 1847 /* Reenable interrupts. */ 1848 nfe_enable_intr(sc); 1849 } 1850 1851 1852 static __inline void 1853 nfe_discard_rxbuf(struct nfe_softc *sc, int idx) 1854 { 1855 struct nfe_desc32 *desc32; 1856 struct nfe_desc64 *desc64; 1857 struct nfe_rx_data *data; 1858 struct mbuf *m; 1859 1860 data = &sc->rxq.data[idx]; 1861 m = data->m; 1862 1863 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1864 desc64 = &sc->rxq.desc64[idx]; 1865 /* VLAN packet may have overwritten it. */ 1866 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1867 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1868 desc64->length = htole16(m->m_len); 1869 desc64->flags = htole16(NFE_RX_READY); 1870 } else { 1871 desc32 = &sc->rxq.desc32[idx]; 1872 desc32->length = htole16(m->m_len); 1873 desc32->flags = htole16(NFE_RX_READY); 1874 } 1875 } 1876 1877 1878 static __inline void 1879 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) 1880 { 1881 struct nfe_desc32 *desc32; 1882 struct nfe_desc64 *desc64; 1883 struct nfe_rx_data *data; 1884 struct mbuf *m; 1885 1886 data = &sc->jrxq.jdata[idx]; 1887 m = data->m; 1888 1889 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1890 desc64 = &sc->jrxq.jdesc64[idx]; 1891 /* VLAN packet may have overwritten it. */ 1892 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1893 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1894 desc64->length = htole16(m->m_len); 1895 desc64->flags = htole16(NFE_RX_READY); 1896 } else { 1897 desc32 = &sc->jrxq.jdesc32[idx]; 1898 desc32->length = htole16(m->m_len); 1899 desc32->flags = htole16(NFE_RX_READY); 1900 } 1901 } 1902 1903 1904 static int 1905 nfe_newbuf(struct nfe_softc *sc, int idx) 1906 { 1907 struct nfe_rx_data *data; 1908 struct nfe_desc32 *desc32; 1909 struct nfe_desc64 *desc64; 1910 struct mbuf *m; 1911 bus_dma_segment_t segs[1]; 1912 bus_dmamap_t map; 1913 int nsegs; 1914 1915 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1916 if (m == NULL) 1917 return (ENOBUFS); 1918 1919 m->m_len = m->m_pkthdr.len = MCLBYTES; 1920 m_adj(m, ETHER_ALIGN); 1921 1922 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, 1923 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 1924 m_freem(m); 1925 return (ENOBUFS); 1926 } 1927 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1928 1929 data = &sc->rxq.data[idx]; 1930 if (data->m != NULL) { 1931 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 1932 BUS_DMASYNC_POSTREAD); 1933 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); 1934 } 1935 map = data->rx_data_map; 1936 data->rx_data_map = sc->rxq.rx_spare_map; 1937 sc->rxq.rx_spare_map = map; 1938 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 1939 BUS_DMASYNC_PREREAD); 1940 data->paddr = segs[0].ds_addr; 1941 data->m = m; 1942 /* update mapping address in h/w descriptor */ 1943 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1944 desc64 = &sc->rxq.desc64[idx]; 1945 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 1946 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 1947 desc64->length = htole16(segs[0].ds_len); 1948 desc64->flags = htole16(NFE_RX_READY); 1949 } else { 1950 desc32 = &sc->rxq.desc32[idx]; 1951 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 1952 desc32->length = htole16(segs[0].ds_len); 1953 desc32->flags = htole16(NFE_RX_READY); 1954 } 1955 1956 return (0); 1957 } 1958 1959 1960 static int 1961 nfe_jnewbuf(struct nfe_softc *sc, int idx) 1962 { 1963 struct nfe_rx_data *data; 1964 struct nfe_desc32 *desc32; 1965 struct nfe_desc64 *desc64; 1966 struct mbuf *m; 1967 bus_dma_segment_t segs[1]; 1968 bus_dmamap_t map; 1969 int nsegs; 1970 1971 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1972 if (m == NULL) 1973 return (ENOBUFS); 1974 if ((m->m_flags & M_EXT) == 0) { 1975 m_freem(m); 1976 return (ENOBUFS); 1977 } 1978 m->m_pkthdr.len = m->m_len = MJUM9BYTES; 1979 m_adj(m, ETHER_ALIGN); 1980 1981 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, 1982 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 1983 m_freem(m); 1984 return (ENOBUFS); 1985 } 1986 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1987 1988 data = &sc->jrxq.jdata[idx]; 1989 if (data->m != NULL) { 1990 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 1991 BUS_DMASYNC_POSTREAD); 1992 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); 1993 } 1994 map = data->rx_data_map; 1995 data->rx_data_map = sc->jrxq.jrx_spare_map; 1996 sc->jrxq.jrx_spare_map = map; 1997 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 1998 BUS_DMASYNC_PREREAD); 1999 data->paddr = segs[0].ds_addr; 2000 data->m = m; 2001 /* update mapping address in h/w descriptor */ 2002 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2003 desc64 = &sc->jrxq.jdesc64[idx]; 2004 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 2005 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2006 desc64->length = htole16(segs[0].ds_len); 2007 desc64->flags = htole16(NFE_RX_READY); 2008 } else { 2009 desc32 = &sc->jrxq.jdesc32[idx]; 2010 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 2011 desc32->length = htole16(segs[0].ds_len); 2012 desc32->flags = htole16(NFE_RX_READY); 2013 } 2014 2015 return (0); 2016 } 2017 2018 2019 static int 2020 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2021 { 2022 struct ifnet *ifp = sc->nfe_ifp; 2023 struct nfe_desc32 *desc32; 2024 struct nfe_desc64 *desc64; 2025 struct nfe_rx_data *data; 2026 struct mbuf *m; 2027 uint16_t flags; 2028 int len, prog, rx_npkts; 2029 uint32_t vtag = 0; 2030 2031 rx_npkts = 0; 2032 NFE_LOCK_ASSERT(sc); 2033 2034 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2035 BUS_DMASYNC_POSTREAD); 2036 2037 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { 2038 if (count <= 0) 2039 break; 2040 count--; 2041 2042 data = &sc->rxq.data[sc->rxq.cur]; 2043 2044 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2045 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 2046 vtag = le32toh(desc64->physaddr[1]); 2047 flags = le16toh(desc64->flags); 2048 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2049 } else { 2050 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 2051 flags = le16toh(desc32->flags); 2052 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2053 } 2054 2055 if (flags & NFE_RX_READY) 2056 break; 2057 prog++; 2058 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2059 if (!(flags & NFE_RX_VALID_V1)) { 2060 ifp->if_ierrors++; 2061 nfe_discard_rxbuf(sc, sc->rxq.cur); 2062 continue; 2063 } 2064 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2065 flags &= ~NFE_RX_ERROR; 2066 len--; /* fix buffer length */ 2067 } 2068 } else { 2069 if (!(flags & NFE_RX_VALID_V2)) { 2070 ifp->if_ierrors++; 2071 nfe_discard_rxbuf(sc, sc->rxq.cur); 2072 continue; 2073 } 2074 2075 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2076 flags &= ~NFE_RX_ERROR; 2077 len--; /* fix buffer length */ 2078 } 2079 } 2080 2081 if (flags & NFE_RX_ERROR) { 2082 ifp->if_ierrors++; 2083 nfe_discard_rxbuf(sc, sc->rxq.cur); 2084 continue; 2085 } 2086 2087 m = data->m; 2088 if (nfe_newbuf(sc, sc->rxq.cur) != 0) { 2089 ifp->if_iqdrops++; 2090 nfe_discard_rxbuf(sc, sc->rxq.cur); 2091 continue; 2092 } 2093 2094 if ((vtag & NFE_RX_VTAG) != 0 && 2095 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2096 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2097 m->m_flags |= M_VLANTAG; 2098 } 2099 2100 m->m_pkthdr.len = m->m_len = len; 2101 m->m_pkthdr.rcvif = ifp; 2102 2103 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2104 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2105 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2106 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2107 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2108 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2109 m->m_pkthdr.csum_flags |= 2110 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2111 m->m_pkthdr.csum_data = 0xffff; 2112 } 2113 } 2114 } 2115 2116 ifp->if_ipackets++; 2117 2118 NFE_UNLOCK(sc); 2119 (*ifp->if_input)(ifp, m); 2120 NFE_LOCK(sc); 2121 rx_npkts++; 2122 } 2123 2124 if (prog > 0) 2125 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2126 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2127 2128 if (rx_npktsp != NULL) 2129 *rx_npktsp = rx_npkts; 2130 return (count > 0 ? 0 : EAGAIN); 2131 } 2132 2133 2134 static int 2135 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) 2136 { 2137 struct ifnet *ifp = sc->nfe_ifp; 2138 struct nfe_desc32 *desc32; 2139 struct nfe_desc64 *desc64; 2140 struct nfe_rx_data *data; 2141 struct mbuf *m; 2142 uint16_t flags; 2143 int len, prog, rx_npkts; 2144 uint32_t vtag = 0; 2145 2146 rx_npkts = 0; 2147 NFE_LOCK_ASSERT(sc); 2148 2149 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2150 BUS_DMASYNC_POSTREAD); 2151 2152 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), 2153 vtag = 0) { 2154 if (count <= 0) 2155 break; 2156 count--; 2157 2158 data = &sc->jrxq.jdata[sc->jrxq.jcur]; 2159 2160 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2161 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; 2162 vtag = le32toh(desc64->physaddr[1]); 2163 flags = le16toh(desc64->flags); 2164 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2165 } else { 2166 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; 2167 flags = le16toh(desc32->flags); 2168 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2169 } 2170 2171 if (flags & NFE_RX_READY) 2172 break; 2173 prog++; 2174 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2175 if (!(flags & NFE_RX_VALID_V1)) { 2176 ifp->if_ierrors++; 2177 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2178 continue; 2179 } 2180 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2181 flags &= ~NFE_RX_ERROR; 2182 len--; /* fix buffer length */ 2183 } 2184 } else { 2185 if (!(flags & NFE_RX_VALID_V2)) { 2186 ifp->if_ierrors++; 2187 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2188 continue; 2189 } 2190 2191 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2192 flags &= ~NFE_RX_ERROR; 2193 len--; /* fix buffer length */ 2194 } 2195 } 2196 2197 if (flags & NFE_RX_ERROR) { 2198 ifp->if_ierrors++; 2199 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2200 continue; 2201 } 2202 2203 m = data->m; 2204 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { 2205 ifp->if_iqdrops++; 2206 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2207 continue; 2208 } 2209 2210 if ((vtag & NFE_RX_VTAG) != 0 && 2211 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2212 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2213 m->m_flags |= M_VLANTAG; 2214 } 2215 2216 m->m_pkthdr.len = m->m_len = len; 2217 m->m_pkthdr.rcvif = ifp; 2218 2219 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2220 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2221 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2222 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2223 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2224 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2225 m->m_pkthdr.csum_flags |= 2226 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2227 m->m_pkthdr.csum_data = 0xffff; 2228 } 2229 } 2230 } 2231 2232 ifp->if_ipackets++; 2233 2234 NFE_UNLOCK(sc); 2235 (*ifp->if_input)(ifp, m); 2236 NFE_LOCK(sc); 2237 rx_npkts++; 2238 } 2239 2240 if (prog > 0) 2241 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2242 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2243 2244 if (rx_npktsp != NULL) 2245 *rx_npktsp = rx_npkts; 2246 return (count > 0 ? 0 : EAGAIN); 2247 } 2248 2249 2250 static void 2251 nfe_txeof(struct nfe_softc *sc) 2252 { 2253 struct ifnet *ifp = sc->nfe_ifp; 2254 struct nfe_desc32 *desc32; 2255 struct nfe_desc64 *desc64; 2256 struct nfe_tx_data *data = NULL; 2257 uint16_t flags; 2258 int cons, prog; 2259 2260 NFE_LOCK_ASSERT(sc); 2261 2262 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2263 BUS_DMASYNC_POSTREAD); 2264 2265 prog = 0; 2266 for (cons = sc->txq.next; cons != sc->txq.cur; 2267 NFE_INC(cons, NFE_TX_RING_COUNT)) { 2268 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2269 desc64 = &sc->txq.desc64[cons]; 2270 flags = le16toh(desc64->flags); 2271 } else { 2272 desc32 = &sc->txq.desc32[cons]; 2273 flags = le16toh(desc32->flags); 2274 } 2275 2276 if (flags & NFE_TX_VALID) 2277 break; 2278 2279 prog++; 2280 sc->txq.queued--; 2281 data = &sc->txq.data[cons]; 2282 2283 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2284 if ((flags & NFE_TX_LASTFRAG_V1) == 0) 2285 continue; 2286 if ((flags & NFE_TX_ERROR_V1) != 0) { 2287 device_printf(sc->nfe_dev, 2288 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); 2289 2290 ifp->if_oerrors++; 2291 } else 2292 ifp->if_opackets++; 2293 } else { 2294 if ((flags & NFE_TX_LASTFRAG_V2) == 0) 2295 continue; 2296 if ((flags & NFE_TX_ERROR_V2) != 0) { 2297 device_printf(sc->nfe_dev, 2298 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); 2299 ifp->if_oerrors++; 2300 } else 2301 ifp->if_opackets++; 2302 } 2303 2304 /* last fragment of the mbuf chain transmitted */ 2305 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); 2306 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, 2307 BUS_DMASYNC_POSTWRITE); 2308 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); 2309 m_freem(data->m); 2310 data->m = NULL; 2311 } 2312 2313 if (prog > 0) { 2314 sc->nfe_force_tx = 0; 2315 sc->txq.next = cons; 2316 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2317 if (sc->txq.queued == 0) 2318 sc->nfe_watchdog_timer = 0; 2319 } 2320 } 2321 2322 static int 2323 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) 2324 { 2325 struct nfe_desc32 *desc32 = NULL; 2326 struct nfe_desc64 *desc64 = NULL; 2327 bus_dmamap_t map; 2328 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 2329 int error, i, nsegs, prod, si; 2330 uint32_t tso_segsz; 2331 uint16_t cflags, flags; 2332 struct mbuf *m; 2333 2334 prod = si = sc->txq.cur; 2335 map = sc->txq.data[prod].tx_data_map; 2336 2337 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, 2338 &nsegs, BUS_DMA_NOWAIT); 2339 if (error == EFBIG) { 2340 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER); 2341 if (m == NULL) { 2342 m_freem(*m_head); 2343 *m_head = NULL; 2344 return (ENOBUFS); 2345 } 2346 *m_head = m; 2347 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, 2348 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2349 if (error != 0) { 2350 m_freem(*m_head); 2351 *m_head = NULL; 2352 return (ENOBUFS); 2353 } 2354 } else if (error != 0) 2355 return (error); 2356 if (nsegs == 0) { 2357 m_freem(*m_head); 2358 *m_head = NULL; 2359 return (EIO); 2360 } 2361 2362 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { 2363 bus_dmamap_unload(sc->txq.tx_data_tag, map); 2364 return (ENOBUFS); 2365 } 2366 2367 m = *m_head; 2368 cflags = flags = 0; 2369 tso_segsz = 0; 2370 if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { 2371 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2372 cflags |= NFE_TX_IP_CSUM; 2373 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2374 cflags |= NFE_TX_TCP_UDP_CSUM; 2375 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2376 cflags |= NFE_TX_TCP_UDP_CSUM; 2377 } 2378 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2379 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 2380 NFE_TX_TSO_SHIFT; 2381 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 2382 cflags |= NFE_TX_TSO; 2383 } 2384 2385 for (i = 0; i < nsegs; i++) { 2386 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2387 desc64 = &sc->txq.desc64[prod]; 2388 desc64->physaddr[0] = 2389 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 2390 desc64->physaddr[1] = 2391 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2392 desc64->vtag = 0; 2393 desc64->length = htole16(segs[i].ds_len - 1); 2394 desc64->flags = htole16(flags); 2395 } else { 2396 desc32 = &sc->txq.desc32[prod]; 2397 desc32->physaddr = 2398 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2399 desc32->length = htole16(segs[i].ds_len - 1); 2400 desc32->flags = htole16(flags); 2401 } 2402 2403 /* 2404 * Setting of the valid bit in the first descriptor is 2405 * deferred until the whole chain is fully setup. 2406 */ 2407 flags |= NFE_TX_VALID; 2408 2409 sc->txq.queued++; 2410 NFE_INC(prod, NFE_TX_RING_COUNT); 2411 } 2412 2413 /* 2414 * the whole mbuf chain has been DMA mapped, fix last/first descriptor. 2415 * csum flags, vtag and TSO belong to the first fragment only. 2416 */ 2417 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2418 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 2419 desc64 = &sc->txq.desc64[si]; 2420 if ((m->m_flags & M_VLANTAG) != 0) 2421 desc64->vtag = htole32(NFE_TX_VTAG | 2422 m->m_pkthdr.ether_vtag); 2423 if (tso_segsz != 0) { 2424 /* 2425 * XXX 2426 * The following indicates the descriptor element 2427 * is a 32bit quantity. 2428 */ 2429 desc64->length |= htole16((uint16_t)tso_segsz); 2430 desc64->flags |= htole16(tso_segsz >> 16); 2431 } 2432 /* 2433 * finally, set the valid/checksum/TSO bit in the first 2434 * descriptor. 2435 */ 2436 desc64->flags |= htole16(NFE_TX_VALID | cflags); 2437 } else { 2438 if (sc->nfe_flags & NFE_JUMBO_SUP) 2439 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); 2440 else 2441 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); 2442 desc32 = &sc->txq.desc32[si]; 2443 if (tso_segsz != 0) { 2444 /* 2445 * XXX 2446 * The following indicates the descriptor element 2447 * is a 32bit quantity. 2448 */ 2449 desc32->length |= htole16((uint16_t)tso_segsz); 2450 desc32->flags |= htole16(tso_segsz >> 16); 2451 } 2452 /* 2453 * finally, set the valid/checksum/TSO bit in the first 2454 * descriptor. 2455 */ 2456 desc32->flags |= htole16(NFE_TX_VALID | cflags); 2457 } 2458 2459 sc->txq.cur = prod; 2460 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; 2461 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; 2462 sc->txq.data[prod].tx_data_map = map; 2463 sc->txq.data[prod].m = m; 2464 2465 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 2466 2467 return (0); 2468 } 2469 2470 2471 static void 2472 nfe_setmulti(struct nfe_softc *sc) 2473 { 2474 struct ifnet *ifp = sc->nfe_ifp; 2475 struct ifmultiaddr *ifma; 2476 int i; 2477 uint32_t filter; 2478 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2479 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 2480 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2481 }; 2482 2483 NFE_LOCK_ASSERT(sc); 2484 2485 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2486 bzero(addr, ETHER_ADDR_LEN); 2487 bzero(mask, ETHER_ADDR_LEN); 2488 goto done; 2489 } 2490 2491 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2492 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2493 2494 if_maddr_rlock(ifp); 2495 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2496 u_char *addrp; 2497 2498 if (ifma->ifma_addr->sa_family != AF_LINK) 2499 continue; 2500 2501 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2502 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2503 u_int8_t mcaddr = addrp[i]; 2504 addr[i] &= mcaddr; 2505 mask[i] &= ~mcaddr; 2506 } 2507 } 2508 if_maddr_runlock(ifp); 2509 2510 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2511 mask[i] |= addr[i]; 2512 } 2513 2514 done: 2515 addr[0] |= 0x01; /* make sure multicast bit is set */ 2516 2517 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2518 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2519 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2520 addr[5] << 8 | addr[4]); 2521 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2522 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2523 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2524 mask[5] << 8 | mask[4]); 2525 2526 filter = NFE_READ(sc, NFE_RXFILTER); 2527 filter &= NFE_PFF_RX_PAUSE; 2528 filter |= NFE_RXFILTER_MAGIC; 2529 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; 2530 NFE_WRITE(sc, NFE_RXFILTER, filter); 2531 } 2532 2533 2534 static void 2535 nfe_tx_task(void *arg, int pending) 2536 { 2537 struct ifnet *ifp; 2538 2539 ifp = (struct ifnet *)arg; 2540 nfe_start(ifp); 2541 } 2542 2543 2544 static void 2545 nfe_start(struct ifnet *ifp) 2546 { 2547 struct nfe_softc *sc = ifp->if_softc; 2548 struct mbuf *m0; 2549 int enq; 2550 2551 NFE_LOCK(sc); 2552 2553 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2554 IFF_DRV_RUNNING || sc->nfe_link == 0) { 2555 NFE_UNLOCK(sc); 2556 return; 2557 } 2558 2559 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2560 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2561 if (m0 == NULL) 2562 break; 2563 2564 if (nfe_encap(sc, &m0) != 0) { 2565 if (m0 == NULL) 2566 break; 2567 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2568 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2569 break; 2570 } 2571 enq++; 2572 ETHER_BPF_MTAP(ifp, m0); 2573 } 2574 2575 if (enq > 0) { 2576 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2577 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2578 2579 /* kick Tx */ 2580 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2581 2582 /* 2583 * Set a timeout in case the chip goes out to lunch. 2584 */ 2585 sc->nfe_watchdog_timer = 5; 2586 } 2587 2588 NFE_UNLOCK(sc); 2589 } 2590 2591 2592 static void 2593 nfe_watchdog(struct ifnet *ifp) 2594 { 2595 struct nfe_softc *sc = ifp->if_softc; 2596 2597 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) 2598 return; 2599 2600 /* Check if we've lost Tx completion interrupt. */ 2601 nfe_txeof(sc); 2602 if (sc->txq.queued == 0) { 2603 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2604 "-- recovering\n"); 2605 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2606 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 2607 return; 2608 } 2609 /* Check if we've lost start Tx command. */ 2610 sc->nfe_force_tx++; 2611 if (sc->nfe_force_tx <= 3) { 2612 /* 2613 * If this is the case for watchdog timeout, the following 2614 * code should go to nfe_txeof(). 2615 */ 2616 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2617 return; 2618 } 2619 sc->nfe_force_tx = 0; 2620 2621 if_printf(ifp, "watchdog timeout\n"); 2622 2623 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2624 ifp->if_oerrors++; 2625 nfe_init_locked(sc); 2626 } 2627 2628 2629 static void 2630 nfe_init(void *xsc) 2631 { 2632 struct nfe_softc *sc = xsc; 2633 2634 NFE_LOCK(sc); 2635 nfe_init_locked(sc); 2636 NFE_UNLOCK(sc); 2637 } 2638 2639 2640 static void 2641 nfe_init_locked(void *xsc) 2642 { 2643 struct nfe_softc *sc = xsc; 2644 struct ifnet *ifp = sc->nfe_ifp; 2645 struct mii_data *mii; 2646 uint32_t val; 2647 int error; 2648 2649 NFE_LOCK_ASSERT(sc); 2650 2651 mii = device_get_softc(sc->nfe_miibus); 2652 2653 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2654 return; 2655 2656 nfe_stop(ifp); 2657 2658 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 2659 2660 nfe_init_tx_ring(sc, &sc->txq); 2661 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) 2662 error = nfe_init_jrx_ring(sc, &sc->jrxq); 2663 else 2664 error = nfe_init_rx_ring(sc, &sc->rxq); 2665 if (error != 0) { 2666 device_printf(sc->nfe_dev, 2667 "initialization failed: no memory for rx buffers\n"); 2668 nfe_stop(ifp); 2669 return; 2670 } 2671 2672 val = 0; 2673 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) 2674 val |= NFE_MAC_ADDR_INORDER; 2675 NFE_WRITE(sc, NFE_TX_UNK, val); 2676 NFE_WRITE(sc, NFE_STATUS, 0); 2677 2678 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) 2679 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); 2680 2681 sc->rxtxctl = NFE_RXTX_BIT2; 2682 if (sc->nfe_flags & NFE_40BIT_ADDR) 2683 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 2684 else if (sc->nfe_flags & NFE_JUMBO_SUP) 2685 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 2686 2687 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2688 sc->rxtxctl |= NFE_RXTX_RXCSUM; 2689 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2690 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 2691 2692 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 2693 DELAY(10); 2694 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2695 2696 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2697 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 2698 else 2699 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 2700 2701 NFE_WRITE(sc, NFE_SETUP_R6, 0); 2702 2703 /* set MAC address */ 2704 nfe_set_macaddr(sc, IF_LLADDR(ifp)); 2705 2706 /* tell MAC where rings are in memory */ 2707 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { 2708 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2709 NFE_ADDR_HI(sc->jrxq.jphysaddr)); 2710 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2711 NFE_ADDR_LO(sc->jrxq.jphysaddr)); 2712 } else { 2713 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2714 NFE_ADDR_HI(sc->rxq.physaddr)); 2715 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2716 NFE_ADDR_LO(sc->rxq.physaddr)); 2717 } 2718 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); 2719 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 2720 2721 NFE_WRITE(sc, NFE_RING_SIZE, 2722 (NFE_RX_RING_COUNT - 1) << 16 | 2723 (NFE_TX_RING_COUNT - 1)); 2724 2725 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); 2726 2727 /* force MAC to wakeup */ 2728 val = NFE_READ(sc, NFE_PWR_STATE); 2729 if ((val & NFE_PWR_WAKEUP) == 0) 2730 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); 2731 DELAY(10); 2732 val = NFE_READ(sc, NFE_PWR_STATE); 2733 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); 2734 2735 #if 1 2736 /* configure interrupts coalescing/mitigation */ 2737 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 2738 #else 2739 /* no interrupt mitigation: one interrupt per packet */ 2740 NFE_WRITE(sc, NFE_IMTIMER, 970); 2741 #endif 2742 2743 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); 2744 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 2745 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 2746 2747 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 2748 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 2749 2750 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 2751 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 2752 2753 sc->rxtxctl &= ~NFE_RXTX_BIT2; 2754 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2755 DELAY(10); 2756 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 2757 2758 /* set Rx filter */ 2759 nfe_setmulti(sc); 2760 2761 /* enable Rx */ 2762 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 2763 2764 /* enable Tx */ 2765 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 2766 2767 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 2768 2769 /* Clear hardware stats. */ 2770 nfe_stats_clear(sc); 2771 2772 #ifdef DEVICE_POLLING 2773 if (ifp->if_capenable & IFCAP_POLLING) 2774 nfe_disable_intr(sc); 2775 else 2776 #endif 2777 nfe_set_intr(sc); 2778 nfe_enable_intr(sc); /* enable interrupts */ 2779 2780 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2781 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2782 2783 sc->nfe_link = 0; 2784 mii_mediachg(mii); 2785 2786 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2787 } 2788 2789 2790 static void 2791 nfe_stop(struct ifnet *ifp) 2792 { 2793 struct nfe_softc *sc = ifp->if_softc; 2794 struct nfe_rx_ring *rx_ring; 2795 struct nfe_jrx_ring *jrx_ring; 2796 struct nfe_tx_ring *tx_ring; 2797 struct nfe_rx_data *rdata; 2798 struct nfe_tx_data *tdata; 2799 int i; 2800 2801 NFE_LOCK_ASSERT(sc); 2802 2803 sc->nfe_watchdog_timer = 0; 2804 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2805 2806 callout_stop(&sc->nfe_stat_ch); 2807 2808 /* abort Tx */ 2809 NFE_WRITE(sc, NFE_TX_CTL, 0); 2810 2811 /* disable Rx */ 2812 NFE_WRITE(sc, NFE_RX_CTL, 0); 2813 2814 /* disable interrupts */ 2815 nfe_disable_intr(sc); 2816 2817 sc->nfe_link = 0; 2818 2819 /* free Rx and Tx mbufs still in the queues. */ 2820 rx_ring = &sc->rxq; 2821 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2822 rdata = &rx_ring->data[i]; 2823 if (rdata->m != NULL) { 2824 bus_dmamap_sync(rx_ring->rx_data_tag, 2825 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2826 bus_dmamap_unload(rx_ring->rx_data_tag, 2827 rdata->rx_data_map); 2828 m_freem(rdata->m); 2829 rdata->m = NULL; 2830 } 2831 } 2832 2833 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { 2834 jrx_ring = &sc->jrxq; 2835 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 2836 rdata = &jrx_ring->jdata[i]; 2837 if (rdata->m != NULL) { 2838 bus_dmamap_sync(jrx_ring->jrx_data_tag, 2839 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2840 bus_dmamap_unload(jrx_ring->jrx_data_tag, 2841 rdata->rx_data_map); 2842 m_freem(rdata->m); 2843 rdata->m = NULL; 2844 } 2845 } 2846 } 2847 2848 tx_ring = &sc->txq; 2849 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2850 tdata = &tx_ring->data[i]; 2851 if (tdata->m != NULL) { 2852 bus_dmamap_sync(tx_ring->tx_data_tag, 2853 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); 2854 bus_dmamap_unload(tx_ring->tx_data_tag, 2855 tdata->tx_data_map); 2856 m_freem(tdata->m); 2857 tdata->m = NULL; 2858 } 2859 } 2860 /* Update hardware stats. */ 2861 nfe_stats_update(sc); 2862 } 2863 2864 2865 static int 2866 nfe_ifmedia_upd(struct ifnet *ifp) 2867 { 2868 struct nfe_softc *sc = ifp->if_softc; 2869 struct mii_data *mii; 2870 2871 NFE_LOCK(sc); 2872 mii = device_get_softc(sc->nfe_miibus); 2873 mii_mediachg(mii); 2874 NFE_UNLOCK(sc); 2875 2876 return (0); 2877 } 2878 2879 2880 static void 2881 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2882 { 2883 struct nfe_softc *sc; 2884 struct mii_data *mii; 2885 2886 sc = ifp->if_softc; 2887 2888 NFE_LOCK(sc); 2889 mii = device_get_softc(sc->nfe_miibus); 2890 mii_pollstat(mii); 2891 NFE_UNLOCK(sc); 2892 2893 ifmr->ifm_active = mii->mii_media_active; 2894 ifmr->ifm_status = mii->mii_media_status; 2895 } 2896 2897 2898 void 2899 nfe_tick(void *xsc) 2900 { 2901 struct nfe_softc *sc; 2902 struct mii_data *mii; 2903 struct ifnet *ifp; 2904 2905 sc = (struct nfe_softc *)xsc; 2906 2907 NFE_LOCK_ASSERT(sc); 2908 2909 ifp = sc->nfe_ifp; 2910 2911 mii = device_get_softc(sc->nfe_miibus); 2912 mii_tick(mii); 2913 nfe_stats_update(sc); 2914 nfe_watchdog(ifp); 2915 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2916 } 2917 2918 2919 static int 2920 nfe_shutdown(device_t dev) 2921 { 2922 struct nfe_softc *sc; 2923 struct ifnet *ifp; 2924 2925 sc = device_get_softc(dev); 2926 2927 NFE_LOCK(sc); 2928 ifp = sc->nfe_ifp; 2929 nfe_stop(ifp); 2930 /* nfe_reset(sc); */ 2931 NFE_UNLOCK(sc); 2932 2933 return (0); 2934 } 2935 2936 2937 static void 2938 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2939 { 2940 uint32_t val; 2941 2942 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 2943 val = NFE_READ(sc, NFE_MACADDR_LO); 2944 addr[0] = (val >> 8) & 0xff; 2945 addr[1] = (val & 0xff); 2946 2947 val = NFE_READ(sc, NFE_MACADDR_HI); 2948 addr[2] = (val >> 24) & 0xff; 2949 addr[3] = (val >> 16) & 0xff; 2950 addr[4] = (val >> 8) & 0xff; 2951 addr[5] = (val & 0xff); 2952 } else { 2953 val = NFE_READ(sc, NFE_MACADDR_LO); 2954 addr[5] = (val >> 8) & 0xff; 2955 addr[4] = (val & 0xff); 2956 2957 val = NFE_READ(sc, NFE_MACADDR_HI); 2958 addr[3] = (val >> 24) & 0xff; 2959 addr[2] = (val >> 16) & 0xff; 2960 addr[1] = (val >> 8) & 0xff; 2961 addr[0] = (val & 0xff); 2962 } 2963 } 2964 2965 2966 static void 2967 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) 2968 { 2969 2970 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 2971 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 2972 addr[1] << 8 | addr[0]); 2973 } 2974 2975 2976 /* 2977 * Map a single buffer address. 2978 */ 2979 2980 static void 2981 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2982 { 2983 struct nfe_dmamap_arg *ctx; 2984 2985 if (error != 0) 2986 return; 2987 2988 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 2989 2990 ctx = (struct nfe_dmamap_arg *)arg; 2991 ctx->nfe_busaddr = segs[0].ds_addr; 2992 } 2993 2994 2995 static int 2996 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2997 { 2998 int error, value; 2999 3000 if (!arg1) 3001 return (EINVAL); 3002 value = *(int *)arg1; 3003 error = sysctl_handle_int(oidp, &value, 0, req); 3004 if (error || !req->newptr) 3005 return (error); 3006 if (value < low || value > high) 3007 return (EINVAL); 3008 *(int *)arg1 = value; 3009 3010 return (0); 3011 } 3012 3013 3014 static int 3015 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) 3016 { 3017 3018 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, 3019 NFE_PROC_MAX)); 3020 } 3021 3022 3023 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 3024 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 3025 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 3026 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 3027 3028 static void 3029 nfe_sysctl_node(struct nfe_softc *sc) 3030 { 3031 struct sysctl_ctx_list *ctx; 3032 struct sysctl_oid_list *child, *parent; 3033 struct sysctl_oid *tree; 3034 struct nfe_hw_stats *stats; 3035 int error; 3036 3037 stats = &sc->nfe_stats; 3038 ctx = device_get_sysctl_ctx(sc->nfe_dev); 3039 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev)); 3040 SYSCTL_ADD_PROC(ctx, child, 3041 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 3042 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", 3043 "max number of Rx events to process"); 3044 3045 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3046 error = resource_int_value(device_get_name(sc->nfe_dev), 3047 device_get_unit(sc->nfe_dev), "process_limit", 3048 &sc->nfe_process_limit); 3049 if (error == 0) { 3050 if (sc->nfe_process_limit < NFE_PROC_MIN || 3051 sc->nfe_process_limit > NFE_PROC_MAX) { 3052 device_printf(sc->nfe_dev, 3053 "process_limit value out of range; " 3054 "using default: %d\n", NFE_PROC_DEFAULT); 3055 sc->nfe_process_limit = NFE_PROC_DEFAULT; 3056 } 3057 } 3058 3059 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3060 return; 3061 3062 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 3063 NULL, "NFE statistics"); 3064 parent = SYSCTL_CHILDREN(tree); 3065 3066 /* Rx statistics. */ 3067 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 3068 NULL, "Rx MAC statistics"); 3069 child = SYSCTL_CHILDREN(tree); 3070 3071 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors", 3072 &stats->rx_frame_errors, "Framing Errors"); 3073 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes", 3074 &stats->rx_extra_bytes, "Extra Bytes"); 3075 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3076 &stats->rx_late_cols, "Late Collisions"); 3077 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts", 3078 &stats->rx_runts, "Runts"); 3079 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos", 3080 &stats->rx_jumbos, "Jumbos"); 3081 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns", 3082 &stats->rx_fifo_overuns, "FIFO Overruns"); 3083 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors", 3084 &stats->rx_crc_errors, "CRC Errors"); 3085 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae", 3086 &stats->rx_fae, "Frame Alignment Errors"); 3087 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors", 3088 &stats->rx_len_errors, "Length Errors"); 3089 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3090 &stats->rx_unicast, "Unicast Frames"); 3091 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3092 &stats->rx_multicast, "Multicast Frames"); 3093 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3094 &stats->rx_broadcast, "Broadcast Frames"); 3095 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3096 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3097 &stats->rx_octets, "Octets"); 3098 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3099 &stats->rx_pause, "Pause frames"); 3100 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops", 3101 &stats->rx_drops, "Drop frames"); 3102 } 3103 3104 /* Tx statistics. */ 3105 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 3106 NULL, "Tx MAC statistics"); 3107 child = SYSCTL_CHILDREN(tree); 3108 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", 3109 &stats->tx_octets, "Octets"); 3110 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits", 3111 &stats->tx_zero_rexmits, "Zero Retransmits"); 3112 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits", 3113 &stats->tx_one_rexmits, "One Retransmits"); 3114 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits", 3115 &stats->tx_multi_rexmits, "Multiple Retransmits"); 3116 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", 3117 &stats->tx_late_cols, "Late Collisions"); 3118 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns", 3119 &stats->tx_fifo_underuns, "FIFO Underruns"); 3120 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts", 3121 &stats->tx_carrier_losts, "Carrier Losts"); 3122 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals", 3123 &stats->tx_excess_deferals, "Excess Deferrals"); 3124 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors", 3125 &stats->tx_retry_errors, "Retry Errors"); 3126 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3127 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals", 3128 &stats->tx_deferals, "Deferrals"); 3129 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames", 3130 &stats->tx_frames, "Frames"); 3131 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", 3132 &stats->tx_pause, "Pause Frames"); 3133 } 3134 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3135 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", 3136 &stats->tx_deferals, "Unicast Frames"); 3137 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", 3138 &stats->tx_frames, "Multicast Frames"); 3139 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", 3140 &stats->tx_pause, "Broadcast Frames"); 3141 } 3142 } 3143 3144 #undef NFE_SYSCTL_STAT_ADD32 3145 #undef NFE_SYSCTL_STAT_ADD64 3146 3147 static void 3148 nfe_stats_clear(struct nfe_softc *sc) 3149 { 3150 int i, mib_cnt; 3151 3152 if ((sc->nfe_flags & NFE_MIB_V1) != 0) 3153 mib_cnt = NFE_NUM_MIB_STATV1; 3154 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0) 3155 mib_cnt = NFE_NUM_MIB_STATV2; 3156 else 3157 return; 3158 3159 for (i = 0; i < mib_cnt; i += sizeof(uint32_t)) 3160 NFE_READ(sc, NFE_TX_OCTET + i); 3161 3162 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3163 NFE_READ(sc, NFE_TX_UNICAST); 3164 NFE_READ(sc, NFE_TX_MULTICAST); 3165 NFE_READ(sc, NFE_TX_BROADCAST); 3166 } 3167 } 3168 3169 static void 3170 nfe_stats_update(struct nfe_softc *sc) 3171 { 3172 struct nfe_hw_stats *stats; 3173 3174 NFE_LOCK_ASSERT(sc); 3175 3176 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) 3177 return; 3178 3179 stats = &sc->nfe_stats; 3180 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET); 3181 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT); 3182 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT); 3183 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT); 3184 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL); 3185 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN); 3186 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST); 3187 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL); 3188 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR); 3189 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR); 3190 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES); 3191 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL); 3192 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT); 3193 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO); 3194 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN); 3195 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR); 3196 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE); 3197 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR); 3198 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST); 3199 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST); 3200 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST); 3201 3202 if ((sc->nfe_flags & NFE_MIB_V2) != 0) { 3203 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL); 3204 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME); 3205 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET); 3206 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE); 3207 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE); 3208 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP); 3209 } 3210 3211 if ((sc->nfe_flags & NFE_MIB_V3) != 0) { 3212 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST); 3213 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST); 3214 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST); 3215 } 3216 } 3217