1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp> 5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23 #include <sys/cdefs.h> 24 __FBSDID("$FreeBSD$"); 25 26 #ifdef HAVE_KERNEL_OPTION_HEADERS 27 #include "opt_device_polling.h" 28 #endif 29 30 #include <sys/param.h> 31 #include <sys/endian.h> 32 #include <sys/systm.h> 33 #include <sys/sockio.h> 34 #include <sys/mbuf.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/kernel.h> 38 #include <sys/queue.h> 39 #include <sys/socket.h> 40 #include <sys/sysctl.h> 41 #include <sys/taskqueue.h> 42 43 #include <net/if.h> 44 #include <net/if_arp.h> 45 #include <net/ethernet.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/if_types.h> 49 #include <net/if_vlan_var.h> 50 51 #include <net/bpf.h> 52 53 #include <machine/bus.h> 54 #include <machine/resource.h> 55 #include <sys/bus.h> 56 #include <sys/rman.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include <dev/pci/pcireg.h> 62 #include <dev/pci/pcivar.h> 63 64 #include <dev/nfe/if_nfereg.h> 65 #include <dev/nfe/if_nfevar.h> 66 67 MODULE_DEPEND(nfe, pci, 1, 1, 1); 68 MODULE_DEPEND(nfe, ether, 1, 1, 1); 69 MODULE_DEPEND(nfe, miibus, 1, 1, 1); 70 71 /* "device miibus" required. See GENERIC if you get errors here. */ 72 #include "miibus_if.h" 73 74 static int nfe_probe(device_t); 75 static int nfe_attach(device_t); 76 static int nfe_detach(device_t); 77 static int nfe_suspend(device_t); 78 static int nfe_resume(device_t); 79 static int nfe_shutdown(device_t); 80 static void nfe_power(struct nfe_softc *); 81 static int nfe_miibus_readreg(device_t, int, int); 82 static int nfe_miibus_writereg(device_t, int, int, int); 83 static void nfe_miibus_statchg(device_t); 84 static void nfe_link_task(void *, int); 85 static void nfe_set_intr(struct nfe_softc *); 86 static __inline void nfe_enable_intr(struct nfe_softc *); 87 static __inline void nfe_disable_intr(struct nfe_softc *); 88 static int nfe_ioctl(struct ifnet *, u_long, caddr_t); 89 static void nfe_alloc_msix(struct nfe_softc *, int); 90 static int nfe_intr(void *); 91 static void nfe_int_task(void *, int); 92 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); 93 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); 94 static int nfe_newbuf(struct nfe_softc *, int); 95 static int nfe_jnewbuf(struct nfe_softc *, int); 96 static int nfe_rxeof(struct nfe_softc *, int); 97 static int nfe_jrxeof(struct nfe_softc *, int); 98 static void nfe_txeof(struct nfe_softc *); 99 static int nfe_encap(struct nfe_softc *, struct mbuf **); 100 static void nfe_setmulti(struct nfe_softc *); 101 static void nfe_tx_task(void *, int); 102 static void nfe_start(struct ifnet *); 103 static void nfe_watchdog(struct ifnet *); 104 static void nfe_init(void *); 105 static void nfe_init_locked(void *); 106 static void nfe_stop(struct ifnet *); 107 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 108 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 109 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 110 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 111 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 112 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); 113 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 114 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 115 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 116 static int nfe_ifmedia_upd(struct ifnet *); 117 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 118 static void nfe_tick(void *); 119 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 120 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); 121 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); 122 123 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 124 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); 125 126 #ifdef NFE_DEBUG 127 static int nfedebug = 0; 128 #define DPRINTF(sc, ...) do { \ 129 if (nfedebug) \ 130 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 131 } while (0) 132 #define DPRINTFN(sc, n, ...) do { \ 133 if (nfedebug >= (n)) \ 134 device_printf((sc)->nfe_dev, __VA_ARGS__); \ 135 } while (0) 136 #else 137 #define DPRINTF(sc, ...) 138 #define DPRINTFN(sc, n, ...) 139 #endif 140 141 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) 142 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) 143 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) 144 145 /* Tunables. */ 146 static int msi_disable = 0; 147 static int msix_disable = 0; 148 static int jumbo_disable = 0; 149 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); 150 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable); 151 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); 152 153 static device_method_t nfe_methods[] = { 154 /* Device interface */ 155 DEVMETHOD(device_probe, nfe_probe), 156 DEVMETHOD(device_attach, nfe_attach), 157 DEVMETHOD(device_detach, nfe_detach), 158 DEVMETHOD(device_suspend, nfe_suspend), 159 DEVMETHOD(device_resume, nfe_resume), 160 DEVMETHOD(device_shutdown, nfe_shutdown), 161 162 /* bus interface */ 163 DEVMETHOD(bus_print_child, bus_generic_print_child), 164 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 165 166 /* MII interface */ 167 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 168 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 169 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 170 171 { NULL, NULL } 172 }; 173 174 static driver_t nfe_driver = { 175 "nfe", 176 nfe_methods, 177 sizeof(struct nfe_softc) 178 }; 179 180 static devclass_t nfe_devclass; 181 182 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); 183 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 184 185 static struct nfe_type nfe_devs[] = { 186 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 187 "NVIDIA nForce MCP Networking Adapter"}, 188 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 189 "NVIDIA nForce2 MCP2 Networking Adapter"}, 190 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 191 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 193 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 195 "NVIDIA nForce3 MCP3 Networking Adapter"}, 196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 197 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 199 "NVIDIA nForce3 MCP7 Networking Adapter"}, 200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 201 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 203 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 205 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ 206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 207 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ 208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 209 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 211 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 213 "NVIDIA nForce MCP55 Networking Adapter"}, 214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 215 "NVIDIA nForce MCP55 Networking Adapter"}, 216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 217 "NVIDIA nForce MCP61 Networking Adapter"}, 218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 219 "NVIDIA nForce MCP61 Networking Adapter"}, 220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 221 "NVIDIA nForce MCP61 Networking Adapter"}, 222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 223 "NVIDIA nForce MCP61 Networking Adapter"}, 224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 225 "NVIDIA nForce MCP65 Networking Adapter"}, 226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 227 "NVIDIA nForce MCP65 Networking Adapter"}, 228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 229 "NVIDIA nForce MCP65 Networking Adapter"}, 230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 231 "NVIDIA nForce MCP65 Networking Adapter"}, 232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 233 "NVIDIA nForce MCP67 Networking Adapter"}, 234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 235 "NVIDIA nForce MCP67 Networking Adapter"}, 236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 237 "NVIDIA nForce MCP67 Networking Adapter"}, 238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 239 "NVIDIA nForce MCP67 Networking Adapter"}, 240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 241 "NVIDIA nForce MCP73 Networking Adapter"}, 242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 243 "NVIDIA nForce MCP73 Networking Adapter"}, 244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 245 "NVIDIA nForce MCP73 Networking Adapter"}, 246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 247 "NVIDIA nForce MCP73 Networking Adapter"}, 248 {0, 0, NULL} 249 }; 250 251 252 /* Probe for supported hardware ID's */ 253 static int 254 nfe_probe(device_t dev) 255 { 256 struct nfe_type *t; 257 258 t = nfe_devs; 259 /* Check for matching PCI DEVICE ID's */ 260 while (t->name != NULL) { 261 if ((pci_get_vendor(dev) == t->vid_id) && 262 (pci_get_device(dev) == t->dev_id)) { 263 device_set_desc(dev, t->name); 264 return (BUS_PROBE_DEFAULT); 265 } 266 t++; 267 } 268 269 return (ENXIO); 270 } 271 272 static void 273 nfe_alloc_msix(struct nfe_softc *sc, int count) 274 { 275 int rid; 276 277 rid = PCIR_BAR(2); 278 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, 279 &rid, RF_ACTIVE); 280 if (sc->nfe_msix_res == NULL) { 281 device_printf(sc->nfe_dev, 282 "couldn't allocate MSIX table resource\n"); 283 return; 284 } 285 rid = PCIR_BAR(3); 286 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, 287 SYS_RES_MEMORY, &rid, RF_ACTIVE); 288 if (sc->nfe_msix_pba_res == NULL) { 289 device_printf(sc->nfe_dev, 290 "couldn't allocate MSIX PBA resource\n"); 291 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), 292 sc->nfe_msix_res); 293 sc->nfe_msix_res = NULL; 294 return; 295 } 296 297 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { 298 if (count == NFE_MSI_MESSAGES) { 299 if (bootverbose) 300 device_printf(sc->nfe_dev, 301 "Using %d MSIX messages\n", count); 302 sc->nfe_msix = 1; 303 } else { 304 if (bootverbose) 305 device_printf(sc->nfe_dev, 306 "couldn't allocate MSIX\n"); 307 pci_release_msi(sc->nfe_dev); 308 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 309 PCIR_BAR(3), sc->nfe_msix_pba_res); 310 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, 311 PCIR_BAR(2), sc->nfe_msix_res); 312 sc->nfe_msix_pba_res = NULL; 313 sc->nfe_msix_res = NULL; 314 } 315 } 316 } 317 318 static int 319 nfe_attach(device_t dev) 320 { 321 struct nfe_softc *sc; 322 struct ifnet *ifp; 323 bus_addr_t dma_addr_max; 324 int error = 0, i, msic, reg, rid; 325 326 sc = device_get_softc(dev); 327 sc->nfe_dev = dev; 328 329 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 330 MTX_DEF); 331 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); 332 TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc); 333 334 pci_enable_busmaster(dev); 335 336 rid = PCIR_BAR(0); 337 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 338 RF_ACTIVE); 339 if (sc->nfe_res[0] == NULL) { 340 device_printf(dev, "couldn't map memory resources\n"); 341 mtx_destroy(&sc->nfe_mtx); 342 return (ENXIO); 343 } 344 345 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 346 uint16_t v, width; 347 348 v = pci_read_config(dev, reg + 0x08, 2); 349 /* Change max. read request size to 4096. */ 350 v &= ~(7 << 12); 351 v |= (5 << 12); 352 pci_write_config(dev, reg + 0x08, v, 2); 353 354 v = pci_read_config(dev, reg + 0x0c, 2); 355 /* link capability */ 356 v = (v >> 4) & 0x0f; 357 width = pci_read_config(dev, reg + 0x12, 2); 358 /* negotiated link width */ 359 width = (width >> 4) & 0x3f; 360 if (v != width) 361 device_printf(sc->nfe_dev, 362 "warning, negotiated width of link(x%d) != " 363 "max. width of link(x%d)\n", width, v); 364 } 365 366 /* Allocate interrupt */ 367 if (msix_disable == 0 || msi_disable == 0) { 368 if (msix_disable == 0 && 369 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) 370 nfe_alloc_msix(sc, msic); 371 if (msi_disable == 0 && sc->nfe_msix == 0 && 372 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && 373 pci_alloc_msi(dev, &msic) == 0) { 374 if (msic == NFE_MSI_MESSAGES) { 375 if (bootverbose) 376 device_printf(dev, 377 "Using %d MSI messages\n", msic); 378 sc->nfe_msi = 1; 379 } else 380 pci_release_msi(dev); 381 } 382 } 383 384 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { 385 rid = 0; 386 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 387 RF_SHAREABLE | RF_ACTIVE); 388 if (sc->nfe_irq[0] == NULL) { 389 device_printf(dev, "couldn't allocate IRQ resources\n"); 390 error = ENXIO; 391 goto fail; 392 } 393 } else { 394 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 395 sc->nfe_irq[i] = bus_alloc_resource_any(dev, 396 SYS_RES_IRQ, &rid, RF_ACTIVE); 397 if (sc->nfe_irq[i] == NULL) { 398 device_printf(dev, 399 "couldn't allocate IRQ resources for " 400 "message %d\n", rid); 401 error = ENXIO; 402 goto fail; 403 } 404 } 405 /* Map interrupts to vector 0. */ 406 if (sc->nfe_msix != 0) { 407 NFE_WRITE(sc, NFE_MSIX_MAP0, 0); 408 NFE_WRITE(sc, NFE_MSIX_MAP1, 0); 409 } else if (sc->nfe_msi != 0) { 410 NFE_WRITE(sc, NFE_MSI_MAP0, 0); 411 NFE_WRITE(sc, NFE_MSI_MAP1, 0); 412 } 413 } 414 415 /* Set IRQ status/mask register. */ 416 sc->nfe_irq_status = NFE_IRQ_STATUS; 417 sc->nfe_irq_mask = NFE_IRQ_MASK; 418 sc->nfe_intrs = NFE_IRQ_WANTED; 419 sc->nfe_nointrs = 0; 420 if (sc->nfe_msix != 0) { 421 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; 422 sc->nfe_nointrs = NFE_IRQ_WANTED; 423 } else if (sc->nfe_msi != 0) { 424 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; 425 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; 426 } 427 428 sc->nfe_devid = pci_get_device(dev); 429 sc->nfe_revid = pci_get_revid(dev); 430 sc->nfe_flags = 0; 431 432 switch (sc->nfe_devid) { 433 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 434 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 435 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 436 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 437 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 438 break; 439 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 440 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 441 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 442 break; 443 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 444 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 445 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 446 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 447 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 448 break; 449 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 450 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 451 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 452 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL; 453 break; 454 455 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 456 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 457 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 458 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 459 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 460 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 461 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 462 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 463 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 464 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 465 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 466 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 467 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | 468 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL; 469 break; 470 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 471 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 472 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 473 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 474 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 475 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL; 476 break; 477 } 478 479 nfe_power(sc); 480 /* Check for reversed ethernet address */ 481 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 482 sc->nfe_flags |= NFE_CORRECT_MACADDR; 483 nfe_get_macaddr(sc, sc->eaddr); 484 /* 485 * Allocate the parent bus DMA tag appropriate for PCI. 486 */ 487 dma_addr_max = BUS_SPACE_MAXADDR_32BIT; 488 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) 489 dma_addr_max = NFE_DMA_MAXADDR; 490 error = bus_dma_tag_create( 491 bus_get_dma_tag(sc->nfe_dev), /* parent */ 492 1, 0, /* alignment, boundary */ 493 dma_addr_max, /* lowaddr */ 494 BUS_SPACE_MAXADDR, /* highaddr */ 495 NULL, NULL, /* filter, filterarg */ 496 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 497 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 498 0, /* flags */ 499 NULL, NULL, /* lockfunc, lockarg */ 500 &sc->nfe_parent_tag); 501 if (error) 502 goto fail; 503 504 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); 505 if (ifp == NULL) { 506 device_printf(dev, "can not if_alloc()\n"); 507 error = ENOSPC; 508 goto fail; 509 } 510 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp); 511 512 /* 513 * Allocate Tx and Rx rings. 514 */ 515 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) 516 goto fail; 517 518 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) 519 goto fail; 520 521 nfe_alloc_jrx_ring(sc, &sc->jrxq); 522 523 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 524 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 525 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 526 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", 527 "max number of Rx events to process"); 528 529 sc->nfe_process_limit = NFE_PROC_DEFAULT; 530 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 531 "process_limit", &sc->nfe_process_limit); 532 if (error == 0) { 533 if (sc->nfe_process_limit < NFE_PROC_MIN || 534 sc->nfe_process_limit > NFE_PROC_MAX) { 535 device_printf(dev, "process_limit value out of range; " 536 "using default: %d\n", NFE_PROC_DEFAULT); 537 sc->nfe_process_limit = NFE_PROC_DEFAULT; 538 } 539 } 540 541 ifp->if_softc = sc; 542 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 543 ifp->if_mtu = ETHERMTU; 544 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 545 ifp->if_ioctl = nfe_ioctl; 546 ifp->if_start = nfe_start; 547 ifp->if_hwassist = 0; 548 ifp->if_capabilities = 0; 549 ifp->if_watchdog = NULL; 550 ifp->if_init = nfe_init; 551 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1); 552 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1; 553 IFQ_SET_READY(&ifp->if_snd); 554 555 if (sc->nfe_flags & NFE_HW_CSUM) { 556 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; 557 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO; 558 } 559 ifp->if_capenable = ifp->if_capabilities; 560 561 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 562 /* VLAN capability setup. */ 563 ifp->if_capabilities |= IFCAP_VLAN_MTU; 564 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { 565 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 566 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0) 567 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 568 } 569 ifp->if_capenable = ifp->if_capabilities; 570 571 /* 572 * Tell the upper layer(s) we support long frames. 573 * Must appear after the call to ether_ifattach() because 574 * ether_ifattach() sets ifi_hdrlen to the default value. 575 */ 576 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 577 578 #ifdef DEVICE_POLLING 579 ifp->if_capabilities |= IFCAP_POLLING; 580 #endif 581 582 /* Do MII setup */ 583 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, 584 nfe_ifmedia_sts)) { 585 device_printf(dev, "MII without any phy!\n"); 586 error = ENXIO; 587 goto fail; 588 } 589 ether_ifattach(ifp, sc->eaddr); 590 591 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); 592 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, 593 taskqueue_thread_enqueue, &sc->nfe_tq); 594 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", 595 device_get_nameunit(sc->nfe_dev)); 596 error = 0; 597 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 598 error = bus_setup_intr(dev, sc->nfe_irq[0], 599 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 600 &sc->nfe_intrhand[0]); 601 } else { 602 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 603 error = bus_setup_intr(dev, sc->nfe_irq[i], 604 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, 605 &sc->nfe_intrhand[i]); 606 if (error != 0) 607 break; 608 } 609 } 610 if (error) { 611 device_printf(dev, "couldn't set up irq\n"); 612 taskqueue_free(sc->nfe_tq); 613 sc->nfe_tq = NULL; 614 ether_ifdetach(ifp); 615 goto fail; 616 } 617 618 fail: 619 if (error) 620 nfe_detach(dev); 621 622 return (error); 623 } 624 625 626 static int 627 nfe_detach(device_t dev) 628 { 629 struct nfe_softc *sc; 630 struct ifnet *ifp; 631 uint8_t eaddr[ETHER_ADDR_LEN]; 632 int i, rid; 633 634 sc = device_get_softc(dev); 635 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); 636 ifp = sc->nfe_ifp; 637 638 #ifdef DEVICE_POLLING 639 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 640 ether_poll_deregister(ifp); 641 #endif 642 if (device_is_attached(dev)) { 643 NFE_LOCK(sc); 644 nfe_stop(ifp); 645 ifp->if_flags &= ~IFF_UP; 646 NFE_UNLOCK(sc); 647 callout_drain(&sc->nfe_stat_ch); 648 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task); 649 taskqueue_drain(taskqueue_swi, &sc->nfe_link_task); 650 ether_ifdetach(ifp); 651 } 652 653 if (ifp) { 654 /* restore ethernet address */ 655 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 656 for (i = 0; i < ETHER_ADDR_LEN; i++) { 657 eaddr[i] = sc->eaddr[5 - i]; 658 } 659 } else 660 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); 661 nfe_set_macaddr(sc, eaddr); 662 if_free(ifp); 663 } 664 if (sc->nfe_miibus) 665 device_delete_child(dev, sc->nfe_miibus); 666 bus_generic_detach(dev); 667 if (sc->nfe_tq != NULL) { 668 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); 669 taskqueue_free(sc->nfe_tq); 670 sc->nfe_tq = NULL; 671 } 672 673 for (i = 0; i < NFE_MSI_MESSAGES; i++) { 674 if (sc->nfe_intrhand[i] != NULL) { 675 bus_teardown_intr(dev, sc->nfe_irq[i], 676 sc->nfe_intrhand[i]); 677 sc->nfe_intrhand[i] = NULL; 678 } 679 } 680 681 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { 682 if (sc->nfe_irq[0] != NULL) 683 bus_release_resource(dev, SYS_RES_IRQ, 0, 684 sc->nfe_irq[0]); 685 } else { 686 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { 687 if (sc->nfe_irq[i] != NULL) { 688 bus_release_resource(dev, SYS_RES_IRQ, rid, 689 sc->nfe_irq[i]); 690 sc->nfe_irq[i] = NULL; 691 } 692 } 693 pci_release_msi(dev); 694 } 695 if (sc->nfe_msix_pba_res != NULL) { 696 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), 697 sc->nfe_msix_pba_res); 698 sc->nfe_msix_pba_res = NULL; 699 } 700 if (sc->nfe_msix_res != NULL) { 701 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), 702 sc->nfe_msix_res); 703 sc->nfe_msix_res = NULL; 704 } 705 if (sc->nfe_res[0] != NULL) { 706 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 707 sc->nfe_res[0]); 708 sc->nfe_res[0] = NULL; 709 } 710 711 nfe_free_tx_ring(sc, &sc->txq); 712 nfe_free_rx_ring(sc, &sc->rxq); 713 nfe_free_jrx_ring(sc, &sc->jrxq); 714 715 if (sc->nfe_parent_tag) { 716 bus_dma_tag_destroy(sc->nfe_parent_tag); 717 sc->nfe_parent_tag = NULL; 718 } 719 720 mtx_destroy(&sc->nfe_mtx); 721 722 return (0); 723 } 724 725 726 static int 727 nfe_suspend(device_t dev) 728 { 729 struct nfe_softc *sc; 730 731 sc = device_get_softc(dev); 732 733 NFE_LOCK(sc); 734 nfe_stop(sc->nfe_ifp); 735 sc->nfe_suspended = 1; 736 NFE_UNLOCK(sc); 737 738 return (0); 739 } 740 741 742 static int 743 nfe_resume(device_t dev) 744 { 745 struct nfe_softc *sc; 746 struct ifnet *ifp; 747 748 sc = device_get_softc(dev); 749 750 NFE_LOCK(sc); 751 ifp = sc->nfe_ifp; 752 if (ifp->if_flags & IFF_UP) 753 nfe_init_locked(sc); 754 sc->nfe_suspended = 0; 755 NFE_UNLOCK(sc); 756 757 return (0); 758 } 759 760 761 /* Take PHY/NIC out of powerdown, from Linux */ 762 static void 763 nfe_power(struct nfe_softc *sc) 764 { 765 uint32_t pwr; 766 767 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) 768 return; 769 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 770 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 771 DELAY(100); 772 NFE_WRITE(sc, NFE_MAC_RESET, 0); 773 DELAY(100); 774 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 775 pwr = NFE_READ(sc, NFE_PWR2_CTL); 776 pwr &= ~NFE_PWR2_WAKEUP_MASK; 777 if (sc->nfe_revid >= 0xa3 && 778 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || 779 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) 780 pwr |= NFE_PWR2_REVA3; 781 NFE_WRITE(sc, NFE_PWR2_CTL, pwr); 782 } 783 784 785 static void 786 nfe_miibus_statchg(device_t dev) 787 { 788 struct nfe_softc *sc; 789 790 sc = device_get_softc(dev); 791 taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task); 792 } 793 794 795 static void 796 nfe_link_task(void *arg, int pending) 797 { 798 struct nfe_softc *sc; 799 struct mii_data *mii; 800 struct ifnet *ifp; 801 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 802 uint32_t gmask, rxctl, txctl, val; 803 804 sc = (struct nfe_softc *)arg; 805 806 NFE_LOCK(sc); 807 808 mii = device_get_softc(sc->nfe_miibus); 809 ifp = sc->nfe_ifp; 810 if (mii == NULL || ifp == NULL) { 811 NFE_UNLOCK(sc); 812 return; 813 } 814 815 if (mii->mii_media_status & IFM_ACTIVE) { 816 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 817 sc->nfe_link = 1; 818 } else 819 sc->nfe_link = 0; 820 821 phy = NFE_READ(sc, NFE_PHY_IFACE); 822 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 823 824 seed = NFE_READ(sc, NFE_RNDSEED); 825 seed &= ~NFE_SEED_MASK; 826 827 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) { 828 phy |= NFE_PHY_HDX; /* half-duplex */ 829 misc |= NFE_MISC1_HDX; 830 } 831 832 switch (IFM_SUBTYPE(mii->mii_media_active)) { 833 case IFM_1000_T: /* full-duplex only */ 834 link |= NFE_MEDIA_1000T; 835 seed |= NFE_SEED_1000T; 836 phy |= NFE_PHY_1000T; 837 break; 838 case IFM_100_TX: 839 link |= NFE_MEDIA_100TX; 840 seed |= NFE_SEED_100TX; 841 phy |= NFE_PHY_100TX; 842 break; 843 case IFM_10_T: 844 link |= NFE_MEDIA_10T; 845 seed |= NFE_SEED_10T; 846 break; 847 } 848 849 if ((phy & 0x10000000) != 0) { 850 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 851 val = NFE_R1_MAGIC_1000; 852 else 853 val = NFE_R1_MAGIC_10_100; 854 } else 855 val = NFE_R1_MAGIC_DEFAULT; 856 NFE_WRITE(sc, NFE_SETUP_R1, val); 857 858 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 859 860 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 861 NFE_WRITE(sc, NFE_MISC1, misc); 862 NFE_WRITE(sc, NFE_LINKSPEED, link); 863 864 gmask = mii->mii_media_active & IFM_GMASK; 865 if ((gmask & IFM_FDX) != 0) { 866 /* It seems all hardwares supports Rx pause frames. */ 867 val = NFE_READ(sc, NFE_RXFILTER); 868 if ((gmask & IFM_FLAG0) != 0) 869 val |= NFE_PFF_RX_PAUSE; 870 else 871 val &= ~NFE_PFF_RX_PAUSE; 872 NFE_WRITE(sc, NFE_RXFILTER, val); 873 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 874 val = NFE_READ(sc, NFE_MISC1); 875 if ((gmask & IFM_FLAG1) != 0) { 876 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 877 NFE_TX_PAUSE_FRAME_ENABLE); 878 val |= NFE_MISC1_TX_PAUSE; 879 } else { 880 val &= ~NFE_MISC1_TX_PAUSE; 881 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 882 NFE_TX_PAUSE_FRAME_DISABLE); 883 } 884 NFE_WRITE(sc, NFE_MISC1, val); 885 } 886 } else { 887 /* disable rx/tx pause frames */ 888 val = NFE_READ(sc, NFE_RXFILTER); 889 val &= ~NFE_PFF_RX_PAUSE; 890 NFE_WRITE(sc, NFE_RXFILTER, val); 891 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { 892 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, 893 NFE_TX_PAUSE_FRAME_DISABLE); 894 val = NFE_READ(sc, NFE_MISC1); 895 val &= ~NFE_MISC1_TX_PAUSE; 896 NFE_WRITE(sc, NFE_MISC1, val); 897 } 898 } 899 900 txctl = NFE_READ(sc, NFE_TX_CTL); 901 rxctl = NFE_READ(sc, NFE_RX_CTL); 902 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 903 txctl |= NFE_TX_START; 904 rxctl |= NFE_RX_START; 905 } else { 906 txctl &= ~NFE_TX_START; 907 rxctl &= ~NFE_RX_START; 908 } 909 NFE_WRITE(sc, NFE_TX_CTL, txctl); 910 NFE_WRITE(sc, NFE_RX_CTL, rxctl); 911 912 NFE_UNLOCK(sc); 913 } 914 915 916 static int 917 nfe_miibus_readreg(device_t dev, int phy, int reg) 918 { 919 struct nfe_softc *sc = device_get_softc(dev); 920 uint32_t val; 921 int ntries; 922 923 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 924 925 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 926 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 927 DELAY(100); 928 } 929 930 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 931 932 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 933 DELAY(100); 934 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 935 break; 936 } 937 if (ntries == NFE_TIMEOUT) { 938 DPRINTFN(sc, 2, "timeout waiting for PHY\n"); 939 return 0; 940 } 941 942 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 943 DPRINTFN(sc, 2, "could not read PHY\n"); 944 return 0; 945 } 946 947 val = NFE_READ(sc, NFE_PHY_DATA); 948 if (val != 0xffffffff && val != 0) 949 sc->mii_phyaddr = phy; 950 951 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 952 953 return (val); 954 } 955 956 957 static int 958 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 959 { 960 struct nfe_softc *sc = device_get_softc(dev); 961 uint32_t ctl; 962 int ntries; 963 964 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 965 966 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 967 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 968 DELAY(100); 969 } 970 971 NFE_WRITE(sc, NFE_PHY_DATA, val); 972 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 973 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 974 975 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { 976 DELAY(100); 977 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 978 break; 979 } 980 #ifdef NFE_DEBUG 981 if (nfedebug >= 2 && ntries == NFE_TIMEOUT) 982 device_printf(sc->nfe_dev, "could not write to PHY\n"); 983 #endif 984 return (0); 985 } 986 987 struct nfe_dmamap_arg { 988 bus_addr_t nfe_busaddr; 989 }; 990 991 static int 992 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 993 { 994 struct nfe_dmamap_arg ctx; 995 struct nfe_rx_data *data; 996 void *desc; 997 int i, error, descsize; 998 999 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1000 desc = ring->desc64; 1001 descsize = sizeof (struct nfe_desc64); 1002 } else { 1003 desc = ring->desc32; 1004 descsize = sizeof (struct nfe_desc32); 1005 } 1006 1007 ring->cur = ring->next = 0; 1008 1009 error = bus_dma_tag_create(sc->nfe_parent_tag, 1010 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1011 BUS_SPACE_MAXADDR, /* lowaddr */ 1012 BUS_SPACE_MAXADDR, /* highaddr */ 1013 NULL, NULL, /* filter, filterarg */ 1014 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1015 NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 1016 0, /* flags */ 1017 NULL, NULL, /* lockfunc, lockarg */ 1018 &ring->rx_desc_tag); 1019 if (error != 0) { 1020 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1021 goto fail; 1022 } 1023 1024 /* allocate memory to desc */ 1025 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | 1026 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); 1027 if (error != 0) { 1028 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1029 goto fail; 1030 } 1031 if (sc->nfe_flags & NFE_40BIT_ADDR) 1032 ring->desc64 = desc; 1033 else 1034 ring->desc32 = desc; 1035 1036 /* map desc to device visible address space */ 1037 ctx.nfe_busaddr = 0; 1038 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, 1039 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1040 if (error != 0) { 1041 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1042 goto fail; 1043 } 1044 ring->physaddr = ctx.nfe_busaddr; 1045 1046 error = bus_dma_tag_create(sc->nfe_parent_tag, 1047 1, 0, /* alignment, boundary */ 1048 BUS_SPACE_MAXADDR, /* lowaddr */ 1049 BUS_SPACE_MAXADDR, /* highaddr */ 1050 NULL, NULL, /* filter, filterarg */ 1051 MCLBYTES, 1, /* maxsize, nsegments */ 1052 MCLBYTES, /* maxsegsize */ 1053 0, /* flags */ 1054 NULL, NULL, /* lockfunc, lockarg */ 1055 &ring->rx_data_tag); 1056 if (error != 0) { 1057 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); 1058 goto fail; 1059 } 1060 1061 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); 1062 if (error != 0) { 1063 device_printf(sc->nfe_dev, 1064 "could not create Rx DMA spare map\n"); 1065 goto fail; 1066 } 1067 1068 /* 1069 * Pre-allocate Rx buffers and populate Rx ring. 1070 */ 1071 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1072 data = &sc->rxq.data[i]; 1073 data->rx_data_map = NULL; 1074 data->m = NULL; 1075 error = bus_dmamap_create(ring->rx_data_tag, 0, 1076 &data->rx_data_map); 1077 if (error != 0) { 1078 device_printf(sc->nfe_dev, 1079 "could not create Rx DMA map\n"); 1080 goto fail; 1081 } 1082 } 1083 1084 fail: 1085 return (error); 1086 } 1087 1088 1089 static void 1090 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1091 { 1092 struct nfe_dmamap_arg ctx; 1093 struct nfe_rx_data *data; 1094 void *desc; 1095 int i, error, descsize; 1096 1097 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1098 return; 1099 if (jumbo_disable != 0) { 1100 device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); 1101 sc->nfe_jumbo_disable = 1; 1102 return; 1103 } 1104 1105 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1106 desc = ring->jdesc64; 1107 descsize = sizeof (struct nfe_desc64); 1108 } else { 1109 desc = ring->jdesc32; 1110 descsize = sizeof (struct nfe_desc32); 1111 } 1112 1113 ring->jcur = ring->jnext = 0; 1114 1115 /* Create DMA tag for jumbo Rx ring. */ 1116 error = bus_dma_tag_create(sc->nfe_parent_tag, 1117 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1118 BUS_SPACE_MAXADDR, /* lowaddr */ 1119 BUS_SPACE_MAXADDR, /* highaddr */ 1120 NULL, NULL, /* filter, filterarg */ 1121 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1122 1, /* nsegments */ 1123 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 1124 0, /* flags */ 1125 NULL, NULL, /* lockfunc, lockarg */ 1126 &ring->jrx_desc_tag); 1127 if (error != 0) { 1128 device_printf(sc->nfe_dev, 1129 "could not create jumbo ring DMA tag\n"); 1130 goto fail; 1131 } 1132 1133 /* Create DMA tag for jumbo Rx buffers. */ 1134 error = bus_dma_tag_create(sc->nfe_parent_tag, 1135 PAGE_SIZE, 0, /* alignment, boundary */ 1136 BUS_SPACE_MAXADDR, /* lowaddr */ 1137 BUS_SPACE_MAXADDR, /* highaddr */ 1138 NULL, NULL, /* filter, filterarg */ 1139 MJUM9BYTES, /* maxsize */ 1140 1, /* nsegments */ 1141 MJUM9BYTES, /* maxsegsize */ 1142 0, /* flags */ 1143 NULL, NULL, /* lockfunc, lockarg */ 1144 &ring->jrx_data_tag); 1145 if (error != 0) { 1146 device_printf(sc->nfe_dev, 1147 "could not create jumbo Rx buffer DMA tag\n"); 1148 goto fail; 1149 } 1150 1151 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 1152 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | 1153 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); 1154 if (error != 0) { 1155 device_printf(sc->nfe_dev, 1156 "could not allocate DMA'able memory for jumbo Rx ring\n"); 1157 goto fail; 1158 } 1159 if (sc->nfe_flags & NFE_40BIT_ADDR) 1160 ring->jdesc64 = desc; 1161 else 1162 ring->jdesc32 = desc; 1163 1164 ctx.nfe_busaddr = 0; 1165 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, 1166 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1167 if (error != 0) { 1168 device_printf(sc->nfe_dev, 1169 "could not load DMA'able memory for jumbo Rx ring\n"); 1170 goto fail; 1171 } 1172 ring->jphysaddr = ctx.nfe_busaddr; 1173 1174 /* Create DMA maps for jumbo Rx buffers. */ 1175 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); 1176 if (error != 0) { 1177 device_printf(sc->nfe_dev, 1178 "could not create jumbo Rx DMA spare map\n"); 1179 goto fail; 1180 } 1181 1182 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1183 data = &sc->jrxq.jdata[i]; 1184 data->rx_data_map = NULL; 1185 data->m = NULL; 1186 error = bus_dmamap_create(ring->jrx_data_tag, 0, 1187 &data->rx_data_map); 1188 if (error != 0) { 1189 device_printf(sc->nfe_dev, 1190 "could not create jumbo Rx DMA map\n"); 1191 goto fail; 1192 } 1193 } 1194 1195 return; 1196 1197 fail: 1198 /* 1199 * Running without jumbo frame support is ok for most cases 1200 * so don't fail on creating dma tag/map for jumbo frame. 1201 */ 1202 nfe_free_jrx_ring(sc, ring); 1203 device_printf(sc->nfe_dev, "disabling jumbo frame support due to " 1204 "resource shortage\n"); 1205 sc->nfe_jumbo_disable = 1; 1206 } 1207 1208 1209 static int 1210 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1211 { 1212 void *desc; 1213 size_t descsize; 1214 int i; 1215 1216 ring->cur = ring->next = 0; 1217 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1218 desc = ring->desc64; 1219 descsize = sizeof (struct nfe_desc64); 1220 } else { 1221 desc = ring->desc32; 1222 descsize = sizeof (struct nfe_desc32); 1223 } 1224 bzero(desc, descsize * NFE_RX_RING_COUNT); 1225 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1226 if (nfe_newbuf(sc, i) != 0) 1227 return (ENOBUFS); 1228 } 1229 1230 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, 1231 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1232 1233 return (0); 1234 } 1235 1236 1237 static int 1238 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1239 { 1240 void *desc; 1241 size_t descsize; 1242 int i; 1243 1244 ring->jcur = ring->jnext = 0; 1245 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1246 desc = ring->jdesc64; 1247 descsize = sizeof (struct nfe_desc64); 1248 } else { 1249 desc = ring->jdesc32; 1250 descsize = sizeof (struct nfe_desc32); 1251 } 1252 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); 1253 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1254 if (nfe_jnewbuf(sc, i) != 0) 1255 return (ENOBUFS); 1256 } 1257 1258 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, 1259 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1260 1261 return (0); 1262 } 1263 1264 1265 static void 1266 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1267 { 1268 struct nfe_rx_data *data; 1269 void *desc; 1270 int i, descsize; 1271 1272 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1273 desc = ring->desc64; 1274 descsize = sizeof (struct nfe_desc64); 1275 } else { 1276 desc = ring->desc32; 1277 descsize = sizeof (struct nfe_desc32); 1278 } 1279 1280 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1281 data = &ring->data[i]; 1282 if (data->rx_data_map != NULL) { 1283 bus_dmamap_destroy(ring->rx_data_tag, 1284 data->rx_data_map); 1285 data->rx_data_map = NULL; 1286 } 1287 if (data->m != NULL) { 1288 m_freem(data->m); 1289 data->m = NULL; 1290 } 1291 } 1292 if (ring->rx_data_tag != NULL) { 1293 if (ring->rx_spare_map != NULL) { 1294 bus_dmamap_destroy(ring->rx_data_tag, 1295 ring->rx_spare_map); 1296 ring->rx_spare_map = NULL; 1297 } 1298 bus_dma_tag_destroy(ring->rx_data_tag); 1299 ring->rx_data_tag = NULL; 1300 } 1301 1302 if (desc != NULL) { 1303 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); 1304 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); 1305 ring->desc64 = NULL; 1306 ring->desc32 = NULL; 1307 ring->rx_desc_map = NULL; 1308 } 1309 if (ring->rx_desc_tag != NULL) { 1310 bus_dma_tag_destroy(ring->rx_desc_tag); 1311 ring->rx_desc_tag = NULL; 1312 } 1313 } 1314 1315 1316 static void 1317 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) 1318 { 1319 struct nfe_rx_data *data; 1320 void *desc; 1321 int i, descsize; 1322 1323 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) 1324 return; 1325 1326 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1327 desc = ring->jdesc64; 1328 descsize = sizeof (struct nfe_desc64); 1329 } else { 1330 desc = ring->jdesc32; 1331 descsize = sizeof (struct nfe_desc32); 1332 } 1333 1334 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 1335 data = &ring->jdata[i]; 1336 if (data->rx_data_map != NULL) { 1337 bus_dmamap_destroy(ring->jrx_data_tag, 1338 data->rx_data_map); 1339 data->rx_data_map = NULL; 1340 } 1341 if (data->m != NULL) { 1342 m_freem(data->m); 1343 data->m = NULL; 1344 } 1345 } 1346 if (ring->jrx_data_tag != NULL) { 1347 if (ring->jrx_spare_map != NULL) { 1348 bus_dmamap_destroy(ring->jrx_data_tag, 1349 ring->jrx_spare_map); 1350 ring->jrx_spare_map = NULL; 1351 } 1352 bus_dma_tag_destroy(ring->jrx_data_tag); 1353 ring->jrx_data_tag = NULL; 1354 } 1355 1356 if (desc != NULL) { 1357 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); 1358 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); 1359 ring->jdesc64 = NULL; 1360 ring->jdesc32 = NULL; 1361 ring->jrx_desc_map = NULL; 1362 } 1363 1364 if (ring->jrx_desc_tag != NULL) { 1365 bus_dma_tag_destroy(ring->jrx_desc_tag); 1366 ring->jrx_desc_tag = NULL; 1367 } 1368 } 1369 1370 1371 static int 1372 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1373 { 1374 struct nfe_dmamap_arg ctx; 1375 int i, error; 1376 void *desc; 1377 int descsize; 1378 1379 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1380 desc = ring->desc64; 1381 descsize = sizeof (struct nfe_desc64); 1382 } else { 1383 desc = ring->desc32; 1384 descsize = sizeof (struct nfe_desc32); 1385 } 1386 1387 ring->queued = 0; 1388 ring->cur = ring->next = 0; 1389 1390 error = bus_dma_tag_create(sc->nfe_parent_tag, 1391 NFE_RING_ALIGN, 0, /* alignment, boundary */ 1392 BUS_SPACE_MAXADDR, /* lowaddr */ 1393 BUS_SPACE_MAXADDR, /* highaddr */ 1394 NULL, NULL, /* filter, filterarg */ 1395 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ 1396 NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 1397 0, /* flags */ 1398 NULL, NULL, /* lockfunc, lockarg */ 1399 &ring->tx_desc_tag); 1400 if (error != 0) { 1401 device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); 1402 goto fail; 1403 } 1404 1405 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | 1406 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); 1407 if (error != 0) { 1408 device_printf(sc->nfe_dev, "could not create desc DMA map\n"); 1409 goto fail; 1410 } 1411 if (sc->nfe_flags & NFE_40BIT_ADDR) 1412 ring->desc64 = desc; 1413 else 1414 ring->desc32 = desc; 1415 1416 ctx.nfe_busaddr = 0; 1417 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, 1418 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); 1419 if (error != 0) { 1420 device_printf(sc->nfe_dev, "could not load desc DMA map\n"); 1421 goto fail; 1422 } 1423 ring->physaddr = ctx.nfe_busaddr; 1424 1425 error = bus_dma_tag_create(sc->nfe_parent_tag, 1426 1, 0, 1427 BUS_SPACE_MAXADDR, 1428 BUS_SPACE_MAXADDR, 1429 NULL, NULL, 1430 NFE_TSO_MAXSIZE, 1431 NFE_MAX_SCATTER, 1432 NFE_TSO_MAXSGSIZE, 1433 0, 1434 NULL, NULL, 1435 &ring->tx_data_tag); 1436 if (error != 0) { 1437 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); 1438 goto fail; 1439 } 1440 1441 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1442 error = bus_dmamap_create(ring->tx_data_tag, 0, 1443 &ring->data[i].tx_data_map); 1444 if (error != 0) { 1445 device_printf(sc->nfe_dev, 1446 "could not create Tx DMA map\n"); 1447 goto fail; 1448 } 1449 } 1450 1451 fail: 1452 return (error); 1453 } 1454 1455 1456 static void 1457 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1458 { 1459 void *desc; 1460 size_t descsize; 1461 1462 sc->nfe_force_tx = 0; 1463 ring->queued = 0; 1464 ring->cur = ring->next = 0; 1465 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1466 desc = ring->desc64; 1467 descsize = sizeof (struct nfe_desc64); 1468 } else { 1469 desc = ring->desc32; 1470 descsize = sizeof (struct nfe_desc32); 1471 } 1472 bzero(desc, descsize * NFE_TX_RING_COUNT); 1473 1474 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1475 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1476 } 1477 1478 1479 static void 1480 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1481 { 1482 struct nfe_tx_data *data; 1483 void *desc; 1484 int i, descsize; 1485 1486 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1487 desc = ring->desc64; 1488 descsize = sizeof (struct nfe_desc64); 1489 } else { 1490 desc = ring->desc32; 1491 descsize = sizeof (struct nfe_desc32); 1492 } 1493 1494 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1495 data = &ring->data[i]; 1496 1497 if (data->m != NULL) { 1498 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, 1499 BUS_DMASYNC_POSTWRITE); 1500 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); 1501 m_freem(data->m); 1502 data->m = NULL; 1503 } 1504 if (data->tx_data_map != NULL) { 1505 bus_dmamap_destroy(ring->tx_data_tag, 1506 data->tx_data_map); 1507 data->tx_data_map = NULL; 1508 } 1509 } 1510 1511 if (ring->tx_data_tag != NULL) { 1512 bus_dma_tag_destroy(ring->tx_data_tag); 1513 ring->tx_data_tag = NULL; 1514 } 1515 1516 if (desc != NULL) { 1517 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, 1518 BUS_DMASYNC_POSTWRITE); 1519 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); 1520 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); 1521 ring->desc64 = NULL; 1522 ring->desc32 = NULL; 1523 ring->tx_desc_map = NULL; 1524 bus_dma_tag_destroy(ring->tx_desc_tag); 1525 ring->tx_desc_tag = NULL; 1526 } 1527 } 1528 1529 #ifdef DEVICE_POLLING 1530 static poll_handler_t nfe_poll; 1531 1532 1533 static void 1534 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1535 { 1536 struct nfe_softc *sc = ifp->if_softc; 1537 uint32_t r; 1538 1539 NFE_LOCK(sc); 1540 1541 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1542 NFE_UNLOCK(sc); 1543 return; 1544 } 1545 1546 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1547 nfe_jrxeof(sc, count); 1548 else 1549 nfe_rxeof(sc, count); 1550 nfe_txeof(sc); 1551 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1552 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1553 1554 if (cmd == POLL_AND_CHECK_STATUS) { 1555 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1556 NFE_UNLOCK(sc); 1557 return; 1558 } 1559 NFE_WRITE(sc, sc->nfe_irq_status, r); 1560 1561 if (r & NFE_IRQ_LINK) { 1562 NFE_READ(sc, NFE_PHY_STATUS); 1563 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1564 DPRINTF(sc, "link state changed\n"); 1565 } 1566 } 1567 NFE_UNLOCK(sc); 1568 } 1569 #endif /* DEVICE_POLLING */ 1570 1571 static void 1572 nfe_set_intr(struct nfe_softc *sc) 1573 { 1574 1575 if (sc->nfe_msi != 0) 1576 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1577 } 1578 1579 1580 /* In MSIX, a write to mask reegisters behaves as XOR. */ 1581 static __inline void 1582 nfe_enable_intr(struct nfe_softc *sc) 1583 { 1584 1585 if (sc->nfe_msix != 0) { 1586 /* XXX Should have a better way to enable interrupts! */ 1587 if (NFE_READ(sc, sc->nfe_irq_mask) == 0) 1588 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1589 } else 1590 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); 1591 } 1592 1593 1594 static __inline void 1595 nfe_disable_intr(struct nfe_softc *sc) 1596 { 1597 1598 if (sc->nfe_msix != 0) { 1599 /* XXX Should have a better way to disable interrupts! */ 1600 if (NFE_READ(sc, sc->nfe_irq_mask) != 0) 1601 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1602 } else 1603 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); 1604 } 1605 1606 1607 static int 1608 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1609 { 1610 struct nfe_softc *sc; 1611 struct ifreq *ifr; 1612 struct mii_data *mii; 1613 int error, init, mask; 1614 1615 sc = ifp->if_softc; 1616 ifr = (struct ifreq *) data; 1617 error = 0; 1618 init = 0; 1619 switch (cmd) { 1620 case SIOCSIFMTU: 1621 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) 1622 error = EINVAL; 1623 else if (ifp->if_mtu != ifr->ifr_mtu) { 1624 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || 1625 (sc->nfe_jumbo_disable != 0)) && 1626 ifr->ifr_mtu > ETHERMTU) 1627 error = EINVAL; 1628 else { 1629 NFE_LOCK(sc); 1630 ifp->if_mtu = ifr->ifr_mtu; 1631 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1632 nfe_init_locked(sc); 1633 NFE_UNLOCK(sc); 1634 } 1635 } 1636 break; 1637 case SIOCSIFFLAGS: 1638 NFE_LOCK(sc); 1639 if (ifp->if_flags & IFF_UP) { 1640 /* 1641 * If only the PROMISC or ALLMULTI flag changes, then 1642 * don't do a full re-init of the chip, just update 1643 * the Rx filter. 1644 */ 1645 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && 1646 ((ifp->if_flags ^ sc->nfe_if_flags) & 1647 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 1648 nfe_setmulti(sc); 1649 else 1650 nfe_init_locked(sc); 1651 } else { 1652 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1653 nfe_stop(ifp); 1654 } 1655 sc->nfe_if_flags = ifp->if_flags; 1656 NFE_UNLOCK(sc); 1657 error = 0; 1658 break; 1659 case SIOCADDMULTI: 1660 case SIOCDELMULTI: 1661 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1662 NFE_LOCK(sc); 1663 nfe_setmulti(sc); 1664 NFE_UNLOCK(sc); 1665 error = 0; 1666 } 1667 break; 1668 case SIOCSIFMEDIA: 1669 case SIOCGIFMEDIA: 1670 mii = device_get_softc(sc->nfe_miibus); 1671 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1672 break; 1673 case SIOCSIFCAP: 1674 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1675 #ifdef DEVICE_POLLING 1676 if ((mask & IFCAP_POLLING) != 0) { 1677 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1678 error = ether_poll_register(nfe_poll, ifp); 1679 if (error) 1680 break; 1681 NFE_LOCK(sc); 1682 nfe_disable_intr(sc); 1683 ifp->if_capenable |= IFCAP_POLLING; 1684 NFE_UNLOCK(sc); 1685 } else { 1686 error = ether_poll_deregister(ifp); 1687 /* Enable interrupt even in error case */ 1688 NFE_LOCK(sc); 1689 nfe_enable_intr(sc); 1690 ifp->if_capenable &= ~IFCAP_POLLING; 1691 NFE_UNLOCK(sc); 1692 } 1693 } 1694 #endif /* DEVICE_POLLING */ 1695 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1696 (mask & IFCAP_HWCSUM) != 0) { 1697 ifp->if_capenable ^= IFCAP_HWCSUM; 1698 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 1699 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 1700 ifp->if_hwassist |= NFE_CSUM_FEATURES; 1701 else 1702 ifp->if_hwassist &= ~NFE_CSUM_FEATURES; 1703 init++; 1704 } 1705 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 && 1706 (mask & IFCAP_VLAN_HWTAGGING) != 0) { 1707 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1708 init++; 1709 } 1710 /* 1711 * XXX 1712 * It seems that VLAN stripping requires Rx checksum offload. 1713 * Unfortunately FreeBSD has no way to disable only Rx side 1714 * VLAN stripping. So when we know Rx checksum offload is 1715 * disabled turn entire hardware VLAN assist off. 1716 */ 1717 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) == 1718 (NFE_HW_CSUM | NFE_HW_VLAN)) { 1719 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 1720 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING; 1721 } 1722 1723 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && 1724 (mask & IFCAP_TSO4) != 0) { 1725 ifp->if_capenable ^= IFCAP_TSO4; 1726 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 1727 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 1728 ifp->if_hwassist |= CSUM_TSO; 1729 else 1730 ifp->if_hwassist &= ~CSUM_TSO; 1731 } 1732 1733 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1734 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1735 nfe_init(sc); 1736 } 1737 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) 1738 VLAN_CAPABILITIES(ifp); 1739 break; 1740 default: 1741 error = ether_ioctl(ifp, cmd, data); 1742 break; 1743 } 1744 1745 return (error); 1746 } 1747 1748 1749 static int 1750 nfe_intr(void *arg) 1751 { 1752 struct nfe_softc *sc; 1753 uint32_t status; 1754 1755 sc = (struct nfe_softc *)arg; 1756 1757 status = NFE_READ(sc, sc->nfe_irq_status); 1758 if (status == 0 || status == 0xffffffff) 1759 return (FILTER_STRAY); 1760 nfe_disable_intr(sc); 1761 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1762 1763 return (FILTER_HANDLED); 1764 } 1765 1766 1767 static void 1768 nfe_int_task(void *arg, int pending) 1769 { 1770 struct nfe_softc *sc = arg; 1771 struct ifnet *ifp = sc->nfe_ifp; 1772 uint32_t r; 1773 int domore; 1774 1775 NFE_LOCK(sc); 1776 1777 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { 1778 nfe_enable_intr(sc); 1779 NFE_UNLOCK(sc); 1780 return; /* not for us */ 1781 } 1782 NFE_WRITE(sc, sc->nfe_irq_status, r); 1783 1784 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); 1785 1786 #ifdef DEVICE_POLLING 1787 if (ifp->if_capenable & IFCAP_POLLING) { 1788 NFE_UNLOCK(sc); 1789 return; 1790 } 1791 #endif 1792 1793 if (r & NFE_IRQ_LINK) { 1794 NFE_READ(sc, NFE_PHY_STATUS); 1795 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1796 DPRINTF(sc, "link state changed\n"); 1797 } 1798 1799 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1800 NFE_UNLOCK(sc); 1801 nfe_enable_intr(sc); 1802 return; 1803 } 1804 1805 domore = 0; 1806 /* check Rx ring */ 1807 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) 1808 domore = nfe_jrxeof(sc, sc->nfe_process_limit); 1809 else 1810 domore = nfe_rxeof(sc, sc->nfe_process_limit); 1811 /* check Tx ring */ 1812 nfe_txeof(sc); 1813 1814 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1815 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 1816 1817 NFE_UNLOCK(sc); 1818 1819 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { 1820 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); 1821 return; 1822 } 1823 1824 /* Reenable interrupts. */ 1825 nfe_enable_intr(sc); 1826 } 1827 1828 1829 static __inline void 1830 nfe_discard_rxbuf(struct nfe_softc *sc, int idx) 1831 { 1832 struct nfe_desc32 *desc32; 1833 struct nfe_desc64 *desc64; 1834 struct nfe_rx_data *data; 1835 struct mbuf *m; 1836 1837 data = &sc->rxq.data[idx]; 1838 m = data->m; 1839 1840 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1841 desc64 = &sc->rxq.desc64[idx]; 1842 /* VLAN packet may have overwritten it. */ 1843 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1844 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1845 desc64->length = htole16(m->m_len); 1846 desc64->flags = htole16(NFE_RX_READY); 1847 } else { 1848 desc32 = &sc->rxq.desc32[idx]; 1849 desc32->length = htole16(m->m_len); 1850 desc32->flags = htole16(NFE_RX_READY); 1851 } 1852 } 1853 1854 1855 static __inline void 1856 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) 1857 { 1858 struct nfe_desc32 *desc32; 1859 struct nfe_desc64 *desc64; 1860 struct nfe_rx_data *data; 1861 struct mbuf *m; 1862 1863 data = &sc->jrxq.jdata[idx]; 1864 m = data->m; 1865 1866 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1867 desc64 = &sc->jrxq.jdesc64[idx]; 1868 /* VLAN packet may have overwritten it. */ 1869 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); 1870 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); 1871 desc64->length = htole16(m->m_len); 1872 desc64->flags = htole16(NFE_RX_READY); 1873 } else { 1874 desc32 = &sc->jrxq.jdesc32[idx]; 1875 desc32->length = htole16(m->m_len); 1876 desc32->flags = htole16(NFE_RX_READY); 1877 } 1878 } 1879 1880 1881 static int 1882 nfe_newbuf(struct nfe_softc *sc, int idx) 1883 { 1884 struct nfe_rx_data *data; 1885 struct nfe_desc32 *desc32; 1886 struct nfe_desc64 *desc64; 1887 struct mbuf *m; 1888 bus_dma_segment_t segs[1]; 1889 bus_dmamap_t map; 1890 int nsegs; 1891 1892 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1893 if (m == NULL) 1894 return (ENOBUFS); 1895 1896 m->m_len = m->m_pkthdr.len = MCLBYTES; 1897 m_adj(m, ETHER_ALIGN); 1898 1899 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, 1900 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 1901 m_freem(m); 1902 return (ENOBUFS); 1903 } 1904 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1905 1906 data = &sc->rxq.data[idx]; 1907 if (data->m != NULL) { 1908 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 1909 BUS_DMASYNC_POSTREAD); 1910 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); 1911 } 1912 map = data->rx_data_map; 1913 data->rx_data_map = sc->rxq.rx_spare_map; 1914 sc->rxq.rx_spare_map = map; 1915 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, 1916 BUS_DMASYNC_PREREAD); 1917 data->paddr = segs[0].ds_addr; 1918 data->m = m; 1919 /* update mapping address in h/w descriptor */ 1920 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1921 desc64 = &sc->rxq.desc64[idx]; 1922 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 1923 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 1924 desc64->length = htole16(segs[0].ds_len); 1925 desc64->flags = htole16(NFE_RX_READY); 1926 } else { 1927 desc32 = &sc->rxq.desc32[idx]; 1928 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 1929 desc32->length = htole16(segs[0].ds_len); 1930 desc32->flags = htole16(NFE_RX_READY); 1931 } 1932 1933 return (0); 1934 } 1935 1936 1937 static int 1938 nfe_jnewbuf(struct nfe_softc *sc, int idx) 1939 { 1940 struct nfe_rx_data *data; 1941 struct nfe_desc32 *desc32; 1942 struct nfe_desc64 *desc64; 1943 struct mbuf *m; 1944 bus_dma_segment_t segs[1]; 1945 bus_dmamap_t map; 1946 int nsegs; 1947 1948 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1949 if (m == NULL) 1950 return (ENOBUFS); 1951 if ((m->m_flags & M_EXT) == 0) { 1952 m_freem(m); 1953 return (ENOBUFS); 1954 } 1955 m->m_pkthdr.len = m->m_len = MJUM9BYTES; 1956 m_adj(m, ETHER_ALIGN); 1957 1958 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, 1959 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { 1960 m_freem(m); 1961 return (ENOBUFS); 1962 } 1963 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1964 1965 data = &sc->jrxq.jdata[idx]; 1966 if (data->m != NULL) { 1967 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 1968 BUS_DMASYNC_POSTREAD); 1969 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); 1970 } 1971 map = data->rx_data_map; 1972 data->rx_data_map = sc->jrxq.jrx_spare_map; 1973 sc->jrxq.jrx_spare_map = map; 1974 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, 1975 BUS_DMASYNC_PREREAD); 1976 data->paddr = segs[0].ds_addr; 1977 data->m = m; 1978 /* update mapping address in h/w descriptor */ 1979 if (sc->nfe_flags & NFE_40BIT_ADDR) { 1980 desc64 = &sc->jrxq.jdesc64[idx]; 1981 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); 1982 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 1983 desc64->length = htole16(segs[0].ds_len); 1984 desc64->flags = htole16(NFE_RX_READY); 1985 } else { 1986 desc32 = &sc->jrxq.jdesc32[idx]; 1987 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); 1988 desc32->length = htole16(segs[0].ds_len); 1989 desc32->flags = htole16(NFE_RX_READY); 1990 } 1991 1992 return (0); 1993 } 1994 1995 1996 static int 1997 nfe_rxeof(struct nfe_softc *sc, int count) 1998 { 1999 struct ifnet *ifp = sc->nfe_ifp; 2000 struct nfe_desc32 *desc32; 2001 struct nfe_desc64 *desc64; 2002 struct nfe_rx_data *data; 2003 struct mbuf *m; 2004 uint16_t flags; 2005 int len, prog; 2006 uint32_t vtag = 0; 2007 2008 NFE_LOCK_ASSERT(sc); 2009 2010 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2011 BUS_DMASYNC_POSTREAD); 2012 2013 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { 2014 if (count <= 0) 2015 break; 2016 count--; 2017 2018 data = &sc->rxq.data[sc->rxq.cur]; 2019 2020 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2021 desc64 = &sc->rxq.desc64[sc->rxq.cur]; 2022 vtag = le32toh(desc64->physaddr[1]); 2023 flags = le16toh(desc64->flags); 2024 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2025 } else { 2026 desc32 = &sc->rxq.desc32[sc->rxq.cur]; 2027 flags = le16toh(desc32->flags); 2028 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2029 } 2030 2031 if (flags & NFE_RX_READY) 2032 break; 2033 prog++; 2034 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2035 if (!(flags & NFE_RX_VALID_V1)) { 2036 ifp->if_ierrors++; 2037 nfe_discard_rxbuf(sc, sc->rxq.cur); 2038 continue; 2039 } 2040 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2041 flags &= ~NFE_RX_ERROR; 2042 len--; /* fix buffer length */ 2043 } 2044 } else { 2045 if (!(flags & NFE_RX_VALID_V2)) { 2046 ifp->if_ierrors++; 2047 nfe_discard_rxbuf(sc, sc->rxq.cur); 2048 continue; 2049 } 2050 2051 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2052 flags &= ~NFE_RX_ERROR; 2053 len--; /* fix buffer length */ 2054 } 2055 } 2056 2057 if (flags & NFE_RX_ERROR) { 2058 ifp->if_ierrors++; 2059 nfe_discard_rxbuf(sc, sc->rxq.cur); 2060 continue; 2061 } 2062 2063 m = data->m; 2064 if (nfe_newbuf(sc, sc->rxq.cur) != 0) { 2065 ifp->if_iqdrops++; 2066 nfe_discard_rxbuf(sc, sc->rxq.cur); 2067 continue; 2068 } 2069 2070 if ((vtag & NFE_RX_VTAG) != 0 && 2071 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2072 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2073 m->m_flags |= M_VLANTAG; 2074 } 2075 2076 m->m_pkthdr.len = m->m_len = len; 2077 m->m_pkthdr.rcvif = ifp; 2078 2079 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2080 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2081 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2082 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2083 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2084 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2085 m->m_pkthdr.csum_flags |= 2086 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2087 m->m_pkthdr.csum_data = 0xffff; 2088 } 2089 } 2090 } 2091 2092 ifp->if_ipackets++; 2093 2094 NFE_UNLOCK(sc); 2095 (*ifp->if_input)(ifp, m); 2096 NFE_LOCK(sc); 2097 } 2098 2099 if (prog > 0) 2100 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, 2101 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2102 2103 return (count > 0 ? 0 : EAGAIN); 2104 } 2105 2106 2107 static int 2108 nfe_jrxeof(struct nfe_softc *sc, int count) 2109 { 2110 struct ifnet *ifp = sc->nfe_ifp; 2111 struct nfe_desc32 *desc32; 2112 struct nfe_desc64 *desc64; 2113 struct nfe_rx_data *data; 2114 struct mbuf *m; 2115 uint16_t flags; 2116 int len, prog; 2117 uint32_t vtag = 0; 2118 2119 NFE_LOCK_ASSERT(sc); 2120 2121 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2122 BUS_DMASYNC_POSTREAD); 2123 2124 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), 2125 vtag = 0) { 2126 if (count <= 0) 2127 break; 2128 count--; 2129 2130 data = &sc->jrxq.jdata[sc->jrxq.jcur]; 2131 2132 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2133 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; 2134 vtag = le32toh(desc64->physaddr[1]); 2135 flags = le16toh(desc64->flags); 2136 len = le16toh(desc64->length) & NFE_RX_LEN_MASK; 2137 } else { 2138 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; 2139 flags = le16toh(desc32->flags); 2140 len = le16toh(desc32->length) & NFE_RX_LEN_MASK; 2141 } 2142 2143 if (flags & NFE_RX_READY) 2144 break; 2145 prog++; 2146 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2147 if (!(flags & NFE_RX_VALID_V1)) { 2148 ifp->if_ierrors++; 2149 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2150 continue; 2151 } 2152 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 2153 flags &= ~NFE_RX_ERROR; 2154 len--; /* fix buffer length */ 2155 } 2156 } else { 2157 if (!(flags & NFE_RX_VALID_V2)) { 2158 ifp->if_ierrors++; 2159 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2160 continue; 2161 } 2162 2163 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 2164 flags &= ~NFE_RX_ERROR; 2165 len--; /* fix buffer length */ 2166 } 2167 } 2168 2169 if (flags & NFE_RX_ERROR) { 2170 ifp->if_ierrors++; 2171 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2172 continue; 2173 } 2174 2175 m = data->m; 2176 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { 2177 ifp->if_iqdrops++; 2178 nfe_discard_jrxbuf(sc, sc->jrxq.jcur); 2179 continue; 2180 } 2181 2182 if ((vtag & NFE_RX_VTAG) != 0 && 2183 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2184 m->m_pkthdr.ether_vtag = vtag & 0xffff; 2185 m->m_flags |= M_VLANTAG; 2186 } 2187 2188 m->m_pkthdr.len = m->m_len = len; 2189 m->m_pkthdr.rcvif = ifp; 2190 2191 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2192 if ((flags & NFE_RX_IP_CSUMOK) != 0) { 2193 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2194 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2195 if ((flags & NFE_RX_TCP_CSUMOK) != 0 || 2196 (flags & NFE_RX_UDP_CSUMOK) != 0) { 2197 m->m_pkthdr.csum_flags |= 2198 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2199 m->m_pkthdr.csum_data = 0xffff; 2200 } 2201 } 2202 } 2203 2204 ifp->if_ipackets++; 2205 2206 NFE_UNLOCK(sc); 2207 (*ifp->if_input)(ifp, m); 2208 NFE_LOCK(sc); 2209 } 2210 2211 if (prog > 0) 2212 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, 2213 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2214 2215 return (count > 0 ? 0 : EAGAIN); 2216 } 2217 2218 2219 static void 2220 nfe_txeof(struct nfe_softc *sc) 2221 { 2222 struct ifnet *ifp = sc->nfe_ifp; 2223 struct nfe_desc32 *desc32; 2224 struct nfe_desc64 *desc64; 2225 struct nfe_tx_data *data = NULL; 2226 uint16_t flags; 2227 int cons, prog; 2228 2229 NFE_LOCK_ASSERT(sc); 2230 2231 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2232 BUS_DMASYNC_POSTREAD); 2233 2234 prog = 0; 2235 for (cons = sc->txq.next; cons != sc->txq.cur; 2236 NFE_INC(cons, NFE_TX_RING_COUNT)) { 2237 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2238 desc64 = &sc->txq.desc64[cons]; 2239 flags = le16toh(desc64->flags); 2240 } else { 2241 desc32 = &sc->txq.desc32[cons]; 2242 flags = le16toh(desc32->flags); 2243 } 2244 2245 if (flags & NFE_TX_VALID) 2246 break; 2247 2248 prog++; 2249 sc->txq.queued--; 2250 data = &sc->txq.data[cons]; 2251 2252 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 2253 if ((flags & NFE_TX_LASTFRAG_V1) == 0) 2254 continue; 2255 if ((flags & NFE_TX_ERROR_V1) != 0) { 2256 device_printf(sc->nfe_dev, 2257 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); 2258 2259 ifp->if_oerrors++; 2260 } else 2261 ifp->if_opackets++; 2262 } else { 2263 if ((flags & NFE_TX_LASTFRAG_V2) == 0) 2264 continue; 2265 if ((flags & NFE_TX_ERROR_V2) != 0) { 2266 device_printf(sc->nfe_dev, 2267 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); 2268 ifp->if_oerrors++; 2269 } else 2270 ifp->if_opackets++; 2271 } 2272 2273 /* last fragment of the mbuf chain transmitted */ 2274 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); 2275 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, 2276 BUS_DMASYNC_POSTWRITE); 2277 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); 2278 m_freem(data->m); 2279 data->m = NULL; 2280 } 2281 2282 if (prog > 0) { 2283 sc->nfe_force_tx = 0; 2284 sc->txq.next = cons; 2285 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2286 if (sc->txq.queued == 0) 2287 sc->nfe_watchdog_timer = 0; 2288 } 2289 } 2290 2291 static int 2292 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) 2293 { 2294 struct nfe_desc32 *desc32 = NULL; 2295 struct nfe_desc64 *desc64 = NULL; 2296 bus_dmamap_t map; 2297 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 2298 int error, i, nsegs, prod, si; 2299 uint32_t tso_segsz; 2300 uint16_t cflags, flags; 2301 struct mbuf *m; 2302 2303 prod = si = sc->txq.cur; 2304 map = sc->txq.data[prod].tx_data_map; 2305 2306 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, 2307 &nsegs, BUS_DMA_NOWAIT); 2308 if (error == EFBIG) { 2309 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER); 2310 if (m == NULL) { 2311 m_freem(*m_head); 2312 *m_head = NULL; 2313 return (ENOBUFS); 2314 } 2315 *m_head = m; 2316 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, 2317 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2318 if (error != 0) { 2319 m_freem(*m_head); 2320 *m_head = NULL; 2321 return (ENOBUFS); 2322 } 2323 } else if (error != 0) 2324 return (error); 2325 if (nsegs == 0) { 2326 m_freem(*m_head); 2327 *m_head = NULL; 2328 return (EIO); 2329 } 2330 2331 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { 2332 bus_dmamap_unload(sc->txq.tx_data_tag, map); 2333 return (ENOBUFS); 2334 } 2335 2336 m = *m_head; 2337 cflags = flags = 0; 2338 tso_segsz = 0; 2339 if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { 2340 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2341 cflags |= NFE_TX_IP_CSUM; 2342 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2343 cflags |= NFE_TX_TCP_UDP_CSUM; 2344 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2345 cflags |= NFE_TX_TCP_UDP_CSUM; 2346 } 2347 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2348 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 2349 NFE_TX_TSO_SHIFT; 2350 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); 2351 cflags |= NFE_TX_TSO; 2352 } 2353 2354 for (i = 0; i < nsegs; i++) { 2355 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2356 desc64 = &sc->txq.desc64[prod]; 2357 desc64->physaddr[0] = 2358 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 2359 desc64->physaddr[1] = 2360 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2361 desc64->vtag = 0; 2362 desc64->length = htole16(segs[i].ds_len - 1); 2363 desc64->flags = htole16(flags); 2364 } else { 2365 desc32 = &sc->txq.desc32[prod]; 2366 desc32->physaddr = 2367 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 2368 desc32->length = htole16(segs[i].ds_len - 1); 2369 desc32->flags = htole16(flags); 2370 } 2371 2372 /* 2373 * Setting of the valid bit in the first descriptor is 2374 * deferred until the whole chain is fully setup. 2375 */ 2376 flags |= NFE_TX_VALID; 2377 2378 sc->txq.queued++; 2379 NFE_INC(prod, NFE_TX_RING_COUNT); 2380 } 2381 2382 /* 2383 * the whole mbuf chain has been DMA mapped, fix last/first descriptor. 2384 * csum flags, vtag and TSO belong to the first fragment only. 2385 */ 2386 if (sc->nfe_flags & NFE_40BIT_ADDR) { 2387 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 2388 desc64 = &sc->txq.desc64[si]; 2389 if ((m->m_flags & M_VLANTAG) != 0) 2390 desc64->vtag = htole32(NFE_TX_VTAG | 2391 m->m_pkthdr.ether_vtag); 2392 if (tso_segsz != 0) { 2393 /* 2394 * XXX 2395 * The following indicates the descriptor element 2396 * is a 32bit quantity. 2397 */ 2398 desc64->length |= htole16((uint16_t)tso_segsz); 2399 desc64->flags |= htole16(tso_segsz >> 16); 2400 } 2401 /* 2402 * finally, set the valid/checksum/TSO bit in the first 2403 * descriptor. 2404 */ 2405 desc64->flags |= htole16(NFE_TX_VALID | cflags); 2406 } else { 2407 if (sc->nfe_flags & NFE_JUMBO_SUP) 2408 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); 2409 else 2410 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); 2411 desc32 = &sc->txq.desc32[si]; 2412 if (tso_segsz != 0) { 2413 /* 2414 * XXX 2415 * The following indicates the descriptor element 2416 * is a 32bit quantity. 2417 */ 2418 desc32->length |= htole16((uint16_t)tso_segsz); 2419 desc32->flags |= htole16(tso_segsz >> 16); 2420 } 2421 /* 2422 * finally, set the valid/checksum/TSO bit in the first 2423 * descriptor. 2424 */ 2425 desc32->flags |= htole16(NFE_TX_VALID | cflags); 2426 } 2427 2428 sc->txq.cur = prod; 2429 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; 2430 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; 2431 sc->txq.data[prod].tx_data_map = map; 2432 sc->txq.data[prod].m = m; 2433 2434 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); 2435 2436 return (0); 2437 } 2438 2439 2440 static void 2441 nfe_setmulti(struct nfe_softc *sc) 2442 { 2443 struct ifnet *ifp = sc->nfe_ifp; 2444 struct ifmultiaddr *ifma; 2445 int i; 2446 uint32_t filter; 2447 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2448 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 2449 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2450 }; 2451 2452 NFE_LOCK_ASSERT(sc); 2453 2454 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2455 bzero(addr, ETHER_ADDR_LEN); 2456 bzero(mask, ETHER_ADDR_LEN); 2457 goto done; 2458 } 2459 2460 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2461 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2462 2463 IF_ADDR_LOCK(ifp); 2464 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2465 u_char *addrp; 2466 2467 if (ifma->ifma_addr->sa_family != AF_LINK) 2468 continue; 2469 2470 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 2471 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2472 u_int8_t mcaddr = addrp[i]; 2473 addr[i] &= mcaddr; 2474 mask[i] &= ~mcaddr; 2475 } 2476 } 2477 IF_ADDR_UNLOCK(ifp); 2478 2479 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2480 mask[i] |= addr[i]; 2481 } 2482 2483 done: 2484 addr[0] |= 0x01; /* make sure multicast bit is set */ 2485 2486 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2487 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2488 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2489 addr[5] << 8 | addr[4]); 2490 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2491 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2492 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2493 mask[5] << 8 | mask[4]); 2494 2495 filter = NFE_READ(sc, NFE_RXFILTER); 2496 filter &= NFE_PFF_RX_PAUSE; 2497 filter |= NFE_RXFILTER_MAGIC; 2498 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; 2499 NFE_WRITE(sc, NFE_RXFILTER, filter); 2500 } 2501 2502 2503 static void 2504 nfe_tx_task(void *arg, int pending) 2505 { 2506 struct ifnet *ifp; 2507 2508 ifp = (struct ifnet *)arg; 2509 nfe_start(ifp); 2510 } 2511 2512 2513 static void 2514 nfe_start(struct ifnet *ifp) 2515 { 2516 struct nfe_softc *sc = ifp->if_softc; 2517 struct mbuf *m0; 2518 int enq; 2519 2520 NFE_LOCK(sc); 2521 2522 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2523 IFF_DRV_RUNNING || sc->nfe_link == 0) { 2524 NFE_UNLOCK(sc); 2525 return; 2526 } 2527 2528 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2529 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2530 if (m0 == NULL) 2531 break; 2532 2533 if (nfe_encap(sc, &m0) != 0) { 2534 if (m0 == NULL) 2535 break; 2536 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2537 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2538 break; 2539 } 2540 enq++; 2541 ETHER_BPF_MTAP(ifp, m0); 2542 } 2543 2544 if (enq > 0) { 2545 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, 2546 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2547 2548 /* kick Tx */ 2549 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2550 2551 /* 2552 * Set a timeout in case the chip goes out to lunch. 2553 */ 2554 sc->nfe_watchdog_timer = 5; 2555 } 2556 2557 NFE_UNLOCK(sc); 2558 } 2559 2560 2561 static void 2562 nfe_watchdog(struct ifnet *ifp) 2563 { 2564 struct nfe_softc *sc = ifp->if_softc; 2565 2566 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) 2567 return; 2568 2569 /* Check if we've lost Tx completion interrupt. */ 2570 nfe_txeof(sc); 2571 if (sc->txq.queued == 0) { 2572 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2573 "-- recovering\n"); 2574 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2575 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); 2576 return; 2577 } 2578 /* Check if we've lost start Tx command. */ 2579 sc->nfe_force_tx++; 2580 if (sc->nfe_force_tx <= 3) { 2581 /* 2582 * If this is the case for watchdog timeout, the following 2583 * code should go to nfe_txeof(). 2584 */ 2585 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 2586 return; 2587 } 2588 sc->nfe_force_tx = 0; 2589 2590 if_printf(ifp, "watchdog timeout\n"); 2591 2592 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2593 ifp->if_oerrors++; 2594 nfe_init_locked(sc); 2595 } 2596 2597 2598 static void 2599 nfe_init(void *xsc) 2600 { 2601 struct nfe_softc *sc = xsc; 2602 2603 NFE_LOCK(sc); 2604 nfe_init_locked(sc); 2605 NFE_UNLOCK(sc); 2606 } 2607 2608 2609 static void 2610 nfe_init_locked(void *xsc) 2611 { 2612 struct nfe_softc *sc = xsc; 2613 struct ifnet *ifp = sc->nfe_ifp; 2614 struct mii_data *mii; 2615 uint32_t val; 2616 int error; 2617 2618 NFE_LOCK_ASSERT(sc); 2619 2620 mii = device_get_softc(sc->nfe_miibus); 2621 2622 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2623 return; 2624 2625 nfe_stop(ifp); 2626 2627 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; 2628 2629 nfe_init_tx_ring(sc, &sc->txq); 2630 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) 2631 error = nfe_init_jrx_ring(sc, &sc->jrxq); 2632 else 2633 error = nfe_init_rx_ring(sc, &sc->rxq); 2634 if (error != 0) { 2635 device_printf(sc->nfe_dev, 2636 "initialization failed: no memory for rx buffers\n"); 2637 nfe_stop(ifp); 2638 return; 2639 } 2640 2641 val = 0; 2642 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) 2643 val |= NFE_MAC_ADDR_INORDER; 2644 NFE_WRITE(sc, NFE_TX_UNK, val); 2645 NFE_WRITE(sc, NFE_STATUS, 0); 2646 2647 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) 2648 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); 2649 2650 sc->rxtxctl = NFE_RXTX_BIT2; 2651 if (sc->nfe_flags & NFE_40BIT_ADDR) 2652 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 2653 else if (sc->nfe_flags & NFE_JUMBO_SUP) 2654 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 2655 2656 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2657 sc->rxtxctl |= NFE_RXTX_RXCSUM; 2658 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2659 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; 2660 2661 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 2662 DELAY(10); 2663 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2664 2665 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2666 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 2667 else 2668 NFE_WRITE(sc, NFE_VTAG_CTL, 0); 2669 2670 NFE_WRITE(sc, NFE_SETUP_R6, 0); 2671 2672 /* set MAC address */ 2673 nfe_set_macaddr(sc, IF_LLADDR(ifp)); 2674 2675 /* tell MAC where rings are in memory */ 2676 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { 2677 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2678 NFE_ADDR_HI(sc->jrxq.jphysaddr)); 2679 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2680 NFE_ADDR_LO(sc->jrxq.jphysaddr)); 2681 } else { 2682 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 2683 NFE_ADDR_HI(sc->rxq.physaddr)); 2684 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 2685 NFE_ADDR_LO(sc->rxq.physaddr)); 2686 } 2687 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); 2688 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 2689 2690 NFE_WRITE(sc, NFE_RING_SIZE, 2691 (NFE_RX_RING_COUNT - 1) << 16 | 2692 (NFE_TX_RING_COUNT - 1)); 2693 2694 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); 2695 2696 /* force MAC to wakeup */ 2697 val = NFE_READ(sc, NFE_PWR_STATE); 2698 if ((val & NFE_PWR_WAKEUP) == 0) 2699 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); 2700 DELAY(10); 2701 val = NFE_READ(sc, NFE_PWR_STATE); 2702 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); 2703 2704 #if 1 2705 /* configure interrupts coalescing/mitigation */ 2706 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 2707 #else 2708 /* no interrupt mitigation: one interrupt per packet */ 2709 NFE_WRITE(sc, NFE_IMTIMER, 970); 2710 #endif 2711 2712 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); 2713 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 2714 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 2715 2716 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 2717 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 2718 2719 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 2720 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 2721 2722 sc->rxtxctl &= ~NFE_RXTX_BIT2; 2723 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 2724 DELAY(10); 2725 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 2726 2727 /* set Rx filter */ 2728 nfe_setmulti(sc); 2729 2730 /* enable Rx */ 2731 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 2732 2733 /* enable Tx */ 2734 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 2735 2736 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 2737 2738 #ifdef DEVICE_POLLING 2739 if (ifp->if_capenable & IFCAP_POLLING) 2740 nfe_disable_intr(sc); 2741 else 2742 #endif 2743 nfe_set_intr(sc); 2744 nfe_enable_intr(sc); /* enable interrupts */ 2745 2746 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2747 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2748 2749 sc->nfe_link = 0; 2750 mii_mediachg(mii); 2751 2752 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2753 } 2754 2755 2756 static void 2757 nfe_stop(struct ifnet *ifp) 2758 { 2759 struct nfe_softc *sc = ifp->if_softc; 2760 struct nfe_rx_ring *rx_ring; 2761 struct nfe_jrx_ring *jrx_ring; 2762 struct nfe_tx_ring *tx_ring; 2763 struct nfe_rx_data *rdata; 2764 struct nfe_tx_data *tdata; 2765 int i; 2766 2767 NFE_LOCK_ASSERT(sc); 2768 2769 sc->nfe_watchdog_timer = 0; 2770 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2771 2772 callout_stop(&sc->nfe_stat_ch); 2773 2774 /* abort Tx */ 2775 NFE_WRITE(sc, NFE_TX_CTL, 0); 2776 2777 /* disable Rx */ 2778 NFE_WRITE(sc, NFE_RX_CTL, 0); 2779 2780 /* disable interrupts */ 2781 nfe_disable_intr(sc); 2782 2783 sc->nfe_link = 0; 2784 2785 /* free Rx and Tx mbufs still in the queues. */ 2786 rx_ring = &sc->rxq; 2787 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2788 rdata = &rx_ring->data[i]; 2789 if (rdata->m != NULL) { 2790 bus_dmamap_sync(rx_ring->rx_data_tag, 2791 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2792 bus_dmamap_unload(rx_ring->rx_data_tag, 2793 rdata->rx_data_map); 2794 m_freem(rdata->m); 2795 rdata->m = NULL; 2796 } 2797 } 2798 2799 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { 2800 jrx_ring = &sc->jrxq; 2801 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { 2802 rdata = &jrx_ring->jdata[i]; 2803 if (rdata->m != NULL) { 2804 bus_dmamap_sync(jrx_ring->jrx_data_tag, 2805 rdata->rx_data_map, BUS_DMASYNC_POSTREAD); 2806 bus_dmamap_unload(jrx_ring->jrx_data_tag, 2807 rdata->rx_data_map); 2808 m_freem(rdata->m); 2809 rdata->m = NULL; 2810 } 2811 } 2812 } 2813 2814 tx_ring = &sc->txq; 2815 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 2816 tdata = &tx_ring->data[i]; 2817 if (tdata->m != NULL) { 2818 bus_dmamap_sync(tx_ring->tx_data_tag, 2819 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); 2820 bus_dmamap_unload(tx_ring->tx_data_tag, 2821 tdata->tx_data_map); 2822 m_freem(tdata->m); 2823 tdata->m = NULL; 2824 } 2825 } 2826 } 2827 2828 2829 static int 2830 nfe_ifmedia_upd(struct ifnet *ifp) 2831 { 2832 struct nfe_softc *sc = ifp->if_softc; 2833 struct mii_data *mii; 2834 2835 NFE_LOCK(sc); 2836 mii = device_get_softc(sc->nfe_miibus); 2837 mii_mediachg(mii); 2838 NFE_UNLOCK(sc); 2839 2840 return (0); 2841 } 2842 2843 2844 static void 2845 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2846 { 2847 struct nfe_softc *sc; 2848 struct mii_data *mii; 2849 2850 sc = ifp->if_softc; 2851 2852 NFE_LOCK(sc); 2853 mii = device_get_softc(sc->nfe_miibus); 2854 mii_pollstat(mii); 2855 NFE_UNLOCK(sc); 2856 2857 ifmr->ifm_active = mii->mii_media_active; 2858 ifmr->ifm_status = mii->mii_media_status; 2859 } 2860 2861 2862 void 2863 nfe_tick(void *xsc) 2864 { 2865 struct nfe_softc *sc; 2866 struct mii_data *mii; 2867 struct ifnet *ifp; 2868 2869 sc = (struct nfe_softc *)xsc; 2870 2871 NFE_LOCK_ASSERT(sc); 2872 2873 ifp = sc->nfe_ifp; 2874 2875 mii = device_get_softc(sc->nfe_miibus); 2876 mii_tick(mii); 2877 nfe_watchdog(ifp); 2878 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); 2879 } 2880 2881 2882 static int 2883 nfe_shutdown(device_t dev) 2884 { 2885 struct nfe_softc *sc; 2886 struct ifnet *ifp; 2887 2888 sc = device_get_softc(dev); 2889 2890 NFE_LOCK(sc); 2891 ifp = sc->nfe_ifp; 2892 nfe_stop(ifp); 2893 /* nfe_reset(sc); */ 2894 NFE_UNLOCK(sc); 2895 2896 return (0); 2897 } 2898 2899 2900 static void 2901 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2902 { 2903 uint32_t val; 2904 2905 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { 2906 val = NFE_READ(sc, NFE_MACADDR_LO); 2907 addr[0] = (val >> 8) & 0xff; 2908 addr[1] = (val & 0xff); 2909 2910 val = NFE_READ(sc, NFE_MACADDR_HI); 2911 addr[2] = (val >> 24) & 0xff; 2912 addr[3] = (val >> 16) & 0xff; 2913 addr[4] = (val >> 8) & 0xff; 2914 addr[5] = (val & 0xff); 2915 } else { 2916 val = NFE_READ(sc, NFE_MACADDR_LO); 2917 addr[5] = (val >> 8) & 0xff; 2918 addr[4] = (val & 0xff); 2919 2920 val = NFE_READ(sc, NFE_MACADDR_HI); 2921 addr[3] = (val >> 24) & 0xff; 2922 addr[2] = (val >> 16) & 0xff; 2923 addr[1] = (val >> 8) & 0xff; 2924 addr[0] = (val & 0xff); 2925 } 2926 } 2927 2928 2929 static void 2930 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) 2931 { 2932 2933 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); 2934 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | 2935 addr[1] << 8 | addr[0]); 2936 } 2937 2938 2939 /* 2940 * Map a single buffer address. 2941 */ 2942 2943 static void 2944 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2945 { 2946 struct nfe_dmamap_arg *ctx; 2947 2948 if (error != 0) 2949 return; 2950 2951 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 2952 2953 ctx = (struct nfe_dmamap_arg *)arg; 2954 ctx->nfe_busaddr = segs[0].ds_addr; 2955 } 2956 2957 2958 static int 2959 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2960 { 2961 int error, value; 2962 2963 if (!arg1) 2964 return (EINVAL); 2965 value = *(int *)arg1; 2966 error = sysctl_handle_int(oidp, &value, 0, req); 2967 if (error || !req->newptr) 2968 return (error); 2969 if (value < low || value > high) 2970 return (EINVAL); 2971 *(int *)arg1 = value; 2972 2973 return (0); 2974 } 2975 2976 2977 static int 2978 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) 2979 { 2980 2981 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, 2982 NFE_PROC_MAX)); 2983 } 2984