1 /*- 2 * Copyright (c) 2013 Tsubai Masanari 3 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * 17 * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $ 18 */ 19 20 /* Driver for VMware vmxnet3 virtual ethernet devices. */ 21 22 #include <sys/cdefs.h> 23 __FBSDID("$FreeBSD$"); 24 25 #include <sys/param.h> 26 #include <sys/systm.h> 27 #include <sys/eventhandler.h> 28 #include <sys/kernel.h> 29 #include <sys/endian.h> 30 #include <sys/sockio.h> 31 #include <sys/mbuf.h> 32 #include <sys/malloc.h> 33 #include <sys/module.h> 34 #include <sys/socket.h> 35 #include <sys/sysctl.h> 36 #include <vm/vm.h> 37 #include <vm/pmap.h> 38 39 #include <net/ethernet.h> 40 #include <net/if.h> 41 #include <net/if_var.h> 42 #include <net/if_arp.h> 43 #include <net/if_dl.h> 44 #include <net/if_types.h> 45 #include <net/if_media.h> 46 #include <net/if_vlan_var.h> 47 48 #include <net/bpf.h> 49 50 #include <netinet/in_systm.h> 51 #include <netinet/in.h> 52 #include <netinet/ip.h> 53 #include <netinet/ip6.h> 54 #include <netinet6/ip6_var.h> 55 #include <netinet/udp.h> 56 #include <netinet/tcp.h> 57 58 #include <machine/bus.h> 59 #include <machine/resource.h> 60 #include <sys/bus.h> 61 #include <sys/rman.h> 62 63 #include <dev/pci/pcireg.h> 64 #include <dev/pci/pcivar.h> 65 66 #include "if_vmxreg.h" 67 #include "if_vmxvar.h" 68 69 #include "opt_inet.h" 70 #include "opt_inet6.h" 71 72 /* Always enable for now - useful for queue hangs. */ 73 #define VMXNET3_DEBUG_SYSCTL 74 75 #ifdef VMXNET3_FAILPOINTS 76 #include <sys/fail.h> 77 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0, 78 "vmxnet3 fail points"); 79 #define VMXNET3_FP _debug_fail_point_vmxnet3 80 #endif 81 82 static int vmxnet3_probe(device_t); 83 static int vmxnet3_attach(device_t); 84 static int vmxnet3_detach(device_t); 85 static int vmxnet3_shutdown(device_t); 86 87 static int vmxnet3_alloc_resources(struct vmxnet3_softc *); 88 static void vmxnet3_free_resources(struct vmxnet3_softc *); 89 static int vmxnet3_check_version(struct vmxnet3_softc *); 90 static void vmxnet3_initial_config(struct vmxnet3_softc *); 91 92 static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *); 93 static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *); 94 static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *); 95 static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int, 96 struct vmxnet3_interrupt *); 97 static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *); 98 static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *); 99 static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *); 100 static int vmxnet3_setup_interrupts(struct vmxnet3_softc *); 101 static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *); 102 103 static void vmxnet3_free_interrupt(struct vmxnet3_softc *, 104 struct vmxnet3_interrupt *); 105 static void vmxnet3_free_interrupts(struct vmxnet3_softc *); 106 107 static int vmxnet3_init_rxq(struct vmxnet3_softc *, int); 108 static int vmxnet3_init_txq(struct vmxnet3_softc *, int); 109 static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *); 110 static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *); 111 static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *); 112 static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *); 113 114 static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *); 115 static void vmxnet3_free_shared_data(struct vmxnet3_softc *); 116 static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *); 117 static void vmxnet3_free_txq_data(struct vmxnet3_softc *); 118 static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *); 119 static void vmxnet3_free_rxq_data(struct vmxnet3_softc *); 120 static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *); 121 static void vmxnet3_free_queue_data(struct vmxnet3_softc *); 122 static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *); 123 static void vmxnet3_init_shared_data(struct vmxnet3_softc *); 124 static void vmxnet3_reinit_interface(struct vmxnet3_softc *); 125 static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *); 126 static int vmxnet3_alloc_data(struct vmxnet3_softc *); 127 static void vmxnet3_free_data(struct vmxnet3_softc *); 128 static int vmxnet3_setup_interface(struct vmxnet3_softc *); 129 130 static void vmxnet3_evintr(struct vmxnet3_softc *); 131 static void vmxnet3_txq_eof(struct vmxnet3_txqueue *); 132 static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *); 133 static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *); 134 static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *, 135 struct vmxnet3_rxring *, int); 136 static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *); 137 static void vmxnet3_legacy_intr(void *); 138 static void vmxnet3_txq_intr(void *); 139 static void vmxnet3_rxq_intr(void *); 140 static void vmxnet3_event_intr(void *); 141 142 static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *); 143 static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); 144 static void vmxnet3_stop(struct vmxnet3_softc *); 145 146 static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *); 147 static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); 148 static int vmxnet3_reinit_queues(struct vmxnet3_softc *); 149 static int vmxnet3_enable_device(struct vmxnet3_softc *); 150 static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *); 151 static int vmxnet3_reinit(struct vmxnet3_softc *); 152 static void vmxnet3_init_locked(struct vmxnet3_softc *); 153 static void vmxnet3_init(void *); 154 155 static int vmxnet3_txq_offload_ctx(struct mbuf *, int *, int *, int *); 156 static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, 157 bus_dmamap_t, bus_dma_segment_t [], int *); 158 static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t); 159 static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **); 160 static void vmxnet3_start_locked(struct ifnet *); 161 static void vmxnet3_start(struct ifnet *); 162 163 static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int, 164 uint16_t); 165 static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t); 166 static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t); 167 static void vmxnet3_set_rxfilter(struct vmxnet3_softc *); 168 static int vmxnet3_change_mtu(struct vmxnet3_softc *, int); 169 static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t); 170 171 static int vmxnet3_watchdog(struct vmxnet3_txqueue *); 172 static void vmxnet3_tick(void *); 173 static void vmxnet3_link_status(struct vmxnet3_softc *); 174 static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *); 175 static int vmxnet3_media_change(struct ifnet *); 176 static void vmxnet3_set_lladdr(struct vmxnet3_softc *); 177 static void vmxnet3_get_lladdr(struct vmxnet3_softc *); 178 179 static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *, 180 struct sysctl_ctx_list *, struct sysctl_oid_list *); 181 static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *, 182 struct sysctl_ctx_list *, struct sysctl_oid_list *); 183 static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *, 184 struct sysctl_ctx_list *, struct sysctl_oid_list *); 185 static void vmxnet3_setup_sysctl(struct vmxnet3_softc *); 186 187 static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t, 188 uint32_t); 189 static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t); 190 static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t, 191 uint32_t); 192 static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t); 193 static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t); 194 195 static void vmxnet3_enable_intr(struct vmxnet3_softc *, int); 196 static void vmxnet3_disable_intr(struct vmxnet3_softc *, int); 197 static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *); 198 static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *); 199 200 static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, 201 bus_size_t, struct vmxnet3_dma_alloc *); 202 static void vmxnet3_dma_free(struct vmxnet3_softc *, 203 struct vmxnet3_dma_alloc *); 204 static int vmxnet3_tunable_int(struct vmxnet3_softc *, 205 const char *, int); 206 207 typedef enum { 208 VMXNET3_BARRIER_RD, 209 VMXNET3_BARRIER_WR, 210 VMXNET3_BARRIER_RDWR, 211 } vmxnet3_barrier_t; 212 213 static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t); 214 215 /* Tunables. */ 216 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC; 217 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc); 218 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC; 219 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc); 220 221 static device_method_t vmxnet3_methods[] = { 222 /* Device interface. */ 223 DEVMETHOD(device_probe, vmxnet3_probe), 224 DEVMETHOD(device_attach, vmxnet3_attach), 225 DEVMETHOD(device_detach, vmxnet3_detach), 226 DEVMETHOD(device_shutdown, vmxnet3_shutdown), 227 228 DEVMETHOD_END 229 }; 230 231 static driver_t vmxnet3_driver = { 232 "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc) 233 }; 234 235 static devclass_t vmxnet3_devclass; 236 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0); 237 238 MODULE_DEPEND(vmx, pci, 1, 1, 1); 239 MODULE_DEPEND(vmx, ether, 1, 1, 1); 240 241 #define VMXNET3_VMWARE_VENDOR_ID 0x15AD 242 #define VMXNET3_VMWARE_DEVICE_ID 0x07B0 243 244 static int 245 vmxnet3_probe(device_t dev) 246 { 247 248 if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID && 249 pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) { 250 device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter"); 251 return (BUS_PROBE_DEFAULT); 252 } 253 254 return (ENXIO); 255 } 256 257 static int 258 vmxnet3_attach(device_t dev) 259 { 260 struct vmxnet3_softc *sc; 261 int error; 262 263 sc = device_get_softc(dev); 264 sc->vmx_dev = dev; 265 266 pci_enable_busmaster(dev); 267 268 VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev)); 269 callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0); 270 271 vmxnet3_initial_config(sc); 272 273 error = vmxnet3_alloc_resources(sc); 274 if (error) 275 goto fail; 276 277 error = vmxnet3_check_version(sc); 278 if (error) 279 goto fail; 280 281 error = vmxnet3_alloc_rxtx_queues(sc); 282 if (error) 283 goto fail; 284 285 error = vmxnet3_alloc_interrupts(sc); 286 if (error) 287 goto fail; 288 289 error = vmxnet3_alloc_data(sc); 290 if (error) 291 goto fail; 292 293 error = vmxnet3_setup_interface(sc); 294 if (error) 295 goto fail; 296 297 error = vmxnet3_setup_interrupts(sc); 298 if (error) { 299 ether_ifdetach(sc->vmx_ifp); 300 device_printf(dev, "could not set up interrupt\n"); 301 goto fail; 302 } 303 304 vmxnet3_setup_sysctl(sc); 305 vmxnet3_link_status(sc); 306 307 fail: 308 if (error) 309 vmxnet3_detach(dev); 310 311 return (error); 312 } 313 314 static int 315 vmxnet3_detach(device_t dev) 316 { 317 struct vmxnet3_softc *sc; 318 struct ifnet *ifp; 319 320 sc = device_get_softc(dev); 321 ifp = sc->vmx_ifp; 322 323 if (device_is_attached(dev)) { 324 ether_ifdetach(ifp); 325 VMXNET3_CORE_LOCK(sc); 326 vmxnet3_stop(sc); 327 VMXNET3_CORE_UNLOCK(sc); 328 callout_drain(&sc->vmx_tick); 329 } 330 331 if (sc->vmx_vlan_attach != NULL) { 332 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach); 333 sc->vmx_vlan_attach = NULL; 334 } 335 if (sc->vmx_vlan_detach != NULL) { 336 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach); 337 sc->vmx_vlan_detach = NULL; 338 } 339 340 vmxnet3_free_interrupts(sc); 341 342 if (ifp != NULL) { 343 if_free(ifp); 344 sc->vmx_ifp = NULL; 345 } 346 347 ifmedia_removeall(&sc->vmx_media); 348 349 vmxnet3_free_data(sc); 350 vmxnet3_free_resources(sc); 351 vmxnet3_free_rxtx_queues(sc); 352 353 VMXNET3_CORE_LOCK_DESTROY(sc); 354 355 return (0); 356 } 357 358 static int 359 vmxnet3_shutdown(device_t dev) 360 { 361 362 return (0); 363 } 364 365 static int 366 vmxnet3_alloc_resources(struct vmxnet3_softc *sc) 367 { 368 device_t dev; 369 int rid; 370 371 dev = sc->vmx_dev; 372 373 rid = PCIR_BAR(0); 374 sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 375 RF_ACTIVE); 376 if (sc->vmx_res0 == NULL) { 377 device_printf(dev, 378 "could not map BAR0 memory\n"); 379 return (ENXIO); 380 } 381 382 sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0); 383 sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0); 384 385 rid = PCIR_BAR(1); 386 sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 387 RF_ACTIVE); 388 if (sc->vmx_res1 == NULL) { 389 device_printf(dev, 390 "could not map BAR1 memory\n"); 391 return (ENXIO); 392 } 393 394 sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1); 395 sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1); 396 397 if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { 398 rid = PCIR_BAR(2); 399 sc->vmx_msix_res = bus_alloc_resource_any(dev, 400 SYS_RES_MEMORY, &rid, RF_ACTIVE); 401 } 402 403 if (sc->vmx_msix_res == NULL) 404 sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX; 405 406 return (0); 407 } 408 409 static void 410 vmxnet3_free_resources(struct vmxnet3_softc *sc) 411 { 412 device_t dev; 413 int rid; 414 415 dev = sc->vmx_dev; 416 417 if (sc->vmx_res0 != NULL) { 418 rid = PCIR_BAR(0); 419 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0); 420 sc->vmx_res0 = NULL; 421 } 422 423 if (sc->vmx_res1 != NULL) { 424 rid = PCIR_BAR(1); 425 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1); 426 sc->vmx_res1 = NULL; 427 } 428 429 if (sc->vmx_msix_res != NULL) { 430 rid = PCIR_BAR(2); 431 bus_release_resource(dev, SYS_RES_MEMORY, rid, 432 sc->vmx_msix_res); 433 sc->vmx_msix_res = NULL; 434 } 435 } 436 437 static int 438 vmxnet3_check_version(struct vmxnet3_softc *sc) 439 { 440 device_t dev; 441 uint32_t version; 442 443 dev = sc->vmx_dev; 444 445 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS); 446 if ((version & 0x01) == 0) { 447 device_printf(dev, "unsupported hardware version %#x\n", 448 version); 449 return (ENOTSUP); 450 } 451 vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1); 452 453 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS); 454 if ((version & 0x01) == 0) { 455 device_printf(dev, "unsupported UPT version %#x\n", version); 456 return (ENOTSUP); 457 } 458 vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1); 459 460 return (0); 461 } 462 463 static void 464 vmxnet3_initial_config(struct vmxnet3_softc *sc) 465 { 466 int ndesc; 467 468 /* 469 * BMV Much of the work is already done, but this driver does 470 * not support multiqueue yet. 471 */ 472 sc->vmx_ntxqueues = VMXNET3_TX_QUEUES; 473 sc->vmx_nrxqueues = VMXNET3_RX_QUEUES; 474 475 ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc); 476 if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC) 477 ndesc = VMXNET3_DEF_TX_NDESC; 478 if (ndesc & VMXNET3_MASK_TX_NDESC) 479 ndesc &= ~VMXNET3_MASK_TX_NDESC; 480 sc->vmx_ntxdescs = ndesc; 481 482 ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc); 483 if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC) 484 ndesc = VMXNET3_DEF_RX_NDESC; 485 if (ndesc & VMXNET3_MASK_RX_NDESC) 486 ndesc &= ~VMXNET3_MASK_RX_NDESC; 487 sc->vmx_nrxdescs = ndesc; 488 sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS; 489 } 490 491 static int 492 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc) 493 { 494 device_t dev; 495 int nmsix, cnt, required; 496 497 dev = sc->vmx_dev; 498 499 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) 500 return (1); 501 502 /* Allocate an additional vector for the events interrupt. */ 503 required = sc->vmx_nrxqueues + sc->vmx_ntxqueues + 1; 504 505 nmsix = pci_msix_count(dev); 506 if (nmsix < required) 507 return (1); 508 509 cnt = required; 510 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { 511 sc->vmx_nintrs = required; 512 return (0); 513 } else 514 pci_release_msi(dev); 515 516 return (1); 517 } 518 519 static int 520 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc) 521 { 522 device_t dev; 523 int nmsi, cnt, required; 524 525 dev = sc->vmx_dev; 526 required = 1; 527 528 nmsi = pci_msi_count(dev); 529 if (nmsi < required) 530 return (1); 531 532 cnt = required; 533 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) { 534 sc->vmx_nintrs = 1; 535 return (0); 536 } else 537 pci_release_msi(dev); 538 539 return (1); 540 } 541 542 static int 543 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc) 544 { 545 546 sc->vmx_nintrs = 1; 547 return (0); 548 } 549 550 static int 551 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags, 552 struct vmxnet3_interrupt *intr) 553 { 554 struct resource *irq; 555 556 irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags); 557 if (irq == NULL) 558 return (ENXIO); 559 560 intr->vmxi_irq = irq; 561 intr->vmxi_rid = rid; 562 563 return (0); 564 } 565 566 static int 567 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc) 568 { 569 int i, rid, flags, error; 570 571 rid = 0; 572 flags = RF_ACTIVE; 573 574 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) 575 flags |= RF_SHAREABLE; 576 else 577 rid = 1; 578 579 for (i = 0; i < sc->vmx_nintrs; i++, rid++) { 580 error = vmxnet3_alloc_interrupt(sc, rid, flags, 581 &sc->vmx_intrs[i]); 582 if (error) 583 return (error); 584 } 585 586 return (0); 587 } 588 589 /* 590 * NOTE: We only support the simple case of each Rx and Tx queue on its 591 * own MSIX vector. This is good enough until we support mulitqueue. 592 */ 593 static int 594 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc) 595 { 596 device_t dev; 597 struct vmxnet3_txqueue *txq; 598 struct vmxnet3_rxqueue *rxq; 599 struct vmxnet3_interrupt *intr; 600 enum intr_type type; 601 int i, error; 602 603 dev = sc->vmx_dev; 604 intr = &sc->vmx_intrs[0]; 605 type = INTR_TYPE_NET | INTR_MPSAFE; 606 607 for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) { 608 txq = &sc->vmx_txq[i]; 609 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, 610 vmxnet3_txq_intr, txq, &intr->vmxi_handler); 611 if (error) 612 return (error); 613 txq->vxtxq_intr_idx = intr->vmxi_rid - 1; 614 } 615 616 for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) { 617 rxq = &sc->vmx_rxq[i]; 618 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, 619 vmxnet3_rxq_intr, rxq, &intr->vmxi_handler); 620 if (error) 621 return (error); 622 rxq->vxrxq_intr_idx = intr->vmxi_rid - 1; 623 } 624 625 error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, 626 vmxnet3_event_intr, sc, &intr->vmxi_handler); 627 if (error) 628 return (error); 629 sc->vmx_event_intr_idx = intr->vmxi_rid - 1; 630 631 return (0); 632 } 633 634 static int 635 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc) 636 { 637 struct vmxnet3_interrupt *intr; 638 int i, error; 639 640 intr = &sc->vmx_intrs[0]; 641 error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq, 642 INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc, 643 &intr->vmxi_handler); 644 645 for (i = 0; i < sc->vmx_ntxqueues; i++) 646 sc->vmx_txq[i].vxtxq_intr_idx = 0; 647 for (i = 0; i < sc->vmx_nrxqueues; i++) 648 sc->vmx_rxq[i].vxrxq_intr_idx = 0; 649 sc->vmx_event_intr_idx = 0; 650 651 return (error); 652 } 653 654 /* 655 * XXX BMV Should probably reorganize the attach and just do 656 * this in vmxnet3_init_shared_data(). 657 */ 658 static void 659 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc) 660 { 661 struct vmxnet3_txqueue *txq; 662 struct vmxnet3_txq_shared *txs; 663 struct vmxnet3_rxqueue *rxq; 664 struct vmxnet3_rxq_shared *rxs; 665 int i; 666 667 sc->vmx_ds->evintr = sc->vmx_event_intr_idx; 668 669 for (i = 0; i < sc->vmx_ntxqueues; i++) { 670 txq = &sc->vmx_txq[i]; 671 txs = txq->vxtxq_ts; 672 txs->intr_idx = txq->vxtxq_intr_idx; 673 } 674 675 for (i = 0; i < sc->vmx_nrxqueues; i++) { 676 rxq = &sc->vmx_rxq[i]; 677 rxs = rxq->vxrxq_rs; 678 rxs->intr_idx = rxq->vxrxq_intr_idx; 679 } 680 } 681 682 static int 683 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc) 684 { 685 int error; 686 687 error = vmxnet3_alloc_intr_resources(sc); 688 if (error) 689 return (error); 690 691 switch (sc->vmx_intr_type) { 692 case VMXNET3_IT_MSIX: 693 error = vmxnet3_setup_msix_interrupts(sc); 694 break; 695 case VMXNET3_IT_MSI: 696 case VMXNET3_IT_LEGACY: 697 error = vmxnet3_setup_legacy_interrupt(sc); 698 break; 699 default: 700 panic("%s: invalid interrupt type %d", __func__, 701 sc->vmx_intr_type); 702 } 703 704 if (error == 0) 705 vmxnet3_set_interrupt_idx(sc); 706 707 return (error); 708 } 709 710 static int 711 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc) 712 { 713 device_t dev; 714 uint32_t config; 715 int error; 716 717 dev = sc->vmx_dev; 718 config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG); 719 720 sc->vmx_intr_type = config & 0x03; 721 sc->vmx_intr_mask_mode = (config >> 2) & 0x03; 722 723 switch (sc->vmx_intr_type) { 724 case VMXNET3_IT_AUTO: 725 sc->vmx_intr_type = VMXNET3_IT_MSIX; 726 /* FALLTHROUGH */ 727 case VMXNET3_IT_MSIX: 728 error = vmxnet3_alloc_msix_interrupts(sc); 729 if (error == 0) 730 break; 731 sc->vmx_intr_type = VMXNET3_IT_MSI; 732 /* FALLTHROUGH */ 733 case VMXNET3_IT_MSI: 734 error = vmxnet3_alloc_msi_interrupts(sc); 735 if (error == 0) 736 break; 737 sc->vmx_intr_type = VMXNET3_IT_LEGACY; 738 /* FALLTHROUGH */ 739 case VMXNET3_IT_LEGACY: 740 error = vmxnet3_alloc_legacy_interrupts(sc); 741 if (error == 0) 742 break; 743 /* FALLTHROUGH */ 744 default: 745 sc->vmx_intr_type = -1; 746 device_printf(dev, "cannot allocate any interrupt resources\n"); 747 return (ENXIO); 748 } 749 750 return (error); 751 } 752 753 static void 754 vmxnet3_free_interrupt(struct vmxnet3_softc *sc, 755 struct vmxnet3_interrupt *intr) 756 { 757 device_t dev; 758 759 dev = sc->vmx_dev; 760 761 if (intr->vmxi_handler != NULL) { 762 bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler); 763 intr->vmxi_handler = NULL; 764 } 765 766 if (intr->vmxi_irq != NULL) { 767 bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid, 768 intr->vmxi_irq); 769 intr->vmxi_irq = NULL; 770 intr->vmxi_rid = -1; 771 } 772 } 773 774 static void 775 vmxnet3_free_interrupts(struct vmxnet3_softc *sc) 776 { 777 int i; 778 779 for (i = 0; i < sc->vmx_nintrs; i++) 780 vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]); 781 782 if (sc->vmx_intr_type == VMXNET3_IT_MSI || 783 sc->vmx_intr_type == VMXNET3_IT_MSIX) 784 pci_release_msi(sc->vmx_dev); 785 } 786 787 static int 788 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q) 789 { 790 struct vmxnet3_rxqueue *rxq; 791 struct vmxnet3_rxring *rxr; 792 int i; 793 794 rxq = &sc->vmx_rxq[q]; 795 796 snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d", 797 device_get_nameunit(sc->vmx_dev), q); 798 mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF); 799 800 rxq->vxrxq_sc = sc; 801 rxq->vxrxq_id = q; 802 803 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 804 rxr = &rxq->vxrxq_cmd_ring[i]; 805 rxr->vxrxr_rid = i; 806 rxr->vxrxr_ndesc = sc->vmx_nrxdescs; 807 rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc * 808 sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO); 809 if (rxr->vxrxr_rxbuf == NULL) 810 return (ENOMEM); 811 812 rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs; 813 } 814 815 return (0); 816 } 817 818 static int 819 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q) 820 { 821 struct vmxnet3_txqueue *txq; 822 struct vmxnet3_txring *txr; 823 824 txq = &sc->vmx_txq[q]; 825 txr = &txq->vxtxq_cmd_ring; 826 827 snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d", 828 device_get_nameunit(sc->vmx_dev), q); 829 mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF); 830 831 txq->vxtxq_sc = sc; 832 txq->vxtxq_id = q; 833 834 txr->vxtxr_ndesc = sc->vmx_ntxdescs; 835 txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc * 836 sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO); 837 if (txr->vxtxr_txbuf == NULL) 838 return (ENOMEM); 839 840 txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs; 841 842 return (0); 843 } 844 845 static int 846 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc) 847 { 848 int i, error; 849 850 sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) * 851 sc->vmx_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO); 852 sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) * 853 sc->vmx_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO); 854 if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL) 855 return (ENOMEM); 856 857 for (i = 0; i < sc->vmx_nrxqueues; i++) { 858 error = vmxnet3_init_rxq(sc, i); 859 if (error) 860 return (error); 861 } 862 863 for (i = 0; i < sc->vmx_ntxqueues; i++) { 864 error = vmxnet3_init_txq(sc, i); 865 if (error) 866 return (error); 867 } 868 869 return (0); 870 } 871 872 static void 873 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq) 874 { 875 struct vmxnet3_rxring *rxr; 876 int i; 877 878 rxq->vxrxq_sc = NULL; 879 rxq->vxrxq_id = -1; 880 881 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 882 rxr = &rxq->vxrxq_cmd_ring[i]; 883 884 if (rxr->vxrxr_rxbuf != NULL) { 885 free(rxr->vxrxr_rxbuf, M_DEVBUF); 886 rxr->vxrxr_rxbuf = NULL; 887 } 888 } 889 890 if (mtx_initialized(&rxq->vxrxq_mtx) != 0) 891 mtx_destroy(&rxq->vxrxq_mtx); 892 } 893 894 static void 895 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq) 896 { 897 struct vmxnet3_txring *txr; 898 899 txr = &txq->vxtxq_cmd_ring; 900 901 txq->vxtxq_sc = NULL; 902 txq->vxtxq_id = -1; 903 904 if (txr->vxtxr_txbuf != NULL) { 905 free(txr->vxtxr_txbuf, M_DEVBUF); 906 txr->vxtxr_txbuf = NULL; 907 } 908 909 if (mtx_initialized(&txq->vxtxq_mtx) != 0) 910 mtx_destroy(&txq->vxtxq_mtx); 911 } 912 913 static void 914 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc) 915 { 916 int i; 917 918 if (sc->vmx_rxq != NULL) { 919 for (i = 0; i < sc->vmx_nrxqueues; i++) 920 vmxnet3_destroy_rxq(&sc->vmx_rxq[i]); 921 free(sc->vmx_rxq, M_DEVBUF); 922 sc->vmx_rxq = NULL; 923 } 924 925 if (sc->vmx_txq != NULL) { 926 for (i = 0; i < sc->vmx_ntxqueues; i++) 927 vmxnet3_destroy_txq(&sc->vmx_txq[i]); 928 free(sc->vmx_txq, M_DEVBUF); 929 sc->vmx_txq = NULL; 930 } 931 } 932 933 static int 934 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc) 935 { 936 device_t dev; 937 uint8_t *kva; 938 size_t size; 939 int i, error; 940 941 dev = sc->vmx_dev; 942 943 size = sizeof(struct vmxnet3_driver_shared); 944 error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma); 945 if (error) { 946 device_printf(dev, "cannot alloc shared memory\n"); 947 return (error); 948 } 949 sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr; 950 951 size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) + 952 sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared); 953 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma); 954 if (error) { 955 device_printf(dev, "cannot alloc queue shared memory\n"); 956 return (error); 957 } 958 sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr; 959 kva = sc->vmx_qs; 960 961 for (i = 0; i < sc->vmx_ntxqueues; i++) { 962 sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva; 963 kva += sizeof(struct vmxnet3_txq_shared); 964 } 965 for (i = 0; i < sc->vmx_nrxqueues; i++) { 966 sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva; 967 kva += sizeof(struct vmxnet3_rxq_shared); 968 } 969 970 return (0); 971 } 972 973 static void 974 vmxnet3_free_shared_data(struct vmxnet3_softc *sc) 975 { 976 977 if (sc->vmx_qs != NULL) { 978 vmxnet3_dma_free(sc, &sc->vmx_qs_dma); 979 sc->vmx_qs = NULL; 980 } 981 982 if (sc->vmx_ds != NULL) { 983 vmxnet3_dma_free(sc, &sc->vmx_ds_dma); 984 sc->vmx_ds = NULL; 985 } 986 } 987 988 static int 989 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc) 990 { 991 device_t dev; 992 struct vmxnet3_txqueue *txq; 993 struct vmxnet3_txring *txr; 994 struct vmxnet3_comp_ring *txc; 995 size_t descsz, compsz; 996 int i, q, error; 997 998 dev = sc->vmx_dev; 999 1000 for (q = 0; q < sc->vmx_ntxqueues; q++) { 1001 txq = &sc->vmx_txq[q]; 1002 txr = &txq->vxtxq_cmd_ring; 1003 txc = &txq->vxtxq_comp_ring; 1004 1005 descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc); 1006 compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc); 1007 1008 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1009 1, 0, /* alignment, boundary */ 1010 BUS_SPACE_MAXADDR, /* lowaddr */ 1011 BUS_SPACE_MAXADDR, /* highaddr */ 1012 NULL, NULL, /* filter, filterarg */ 1013 VMXNET3_TSO_MAXSIZE, /* maxsize */ 1014 VMXNET3_TX_MAXSEGS, /* nsegments */ 1015 VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */ 1016 0, /* flags */ 1017 NULL, NULL, /* lockfunc, lockarg */ 1018 &txr->vxtxr_txtag); 1019 if (error) { 1020 device_printf(dev, 1021 "unable to create Tx buffer tag for queue %d\n", q); 1022 return (error); 1023 } 1024 1025 error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma); 1026 if (error) { 1027 device_printf(dev, "cannot alloc Tx descriptors for " 1028 "queue %d error %d\n", q, error); 1029 return (error); 1030 } 1031 txr->vxtxr_txd = 1032 (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr; 1033 1034 error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma); 1035 if (error) { 1036 device_printf(dev, "cannot alloc Tx comp descriptors " 1037 "for queue %d error %d\n", q, error); 1038 return (error); 1039 } 1040 txc->vxcr_u.txcd = 1041 (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr; 1042 1043 for (i = 0; i < txr->vxtxr_ndesc; i++) { 1044 error = bus_dmamap_create(txr->vxtxr_txtag, 0, 1045 &txr->vxtxr_txbuf[i].vtxb_dmamap); 1046 if (error) { 1047 device_printf(dev, "unable to create Tx buf " 1048 "dmamap for queue %d idx %d\n", q, i); 1049 return (error); 1050 } 1051 } 1052 } 1053 1054 return (0); 1055 } 1056 1057 static void 1058 vmxnet3_free_txq_data(struct vmxnet3_softc *sc) 1059 { 1060 device_t dev; 1061 struct vmxnet3_txqueue *txq; 1062 struct vmxnet3_txring *txr; 1063 struct vmxnet3_comp_ring *txc; 1064 struct vmxnet3_txbuf *txb; 1065 int i, q; 1066 1067 dev = sc->vmx_dev; 1068 1069 for (q = 0; q < sc->vmx_ntxqueues; q++) { 1070 txq = &sc->vmx_txq[q]; 1071 txr = &txq->vxtxq_cmd_ring; 1072 txc = &txq->vxtxq_comp_ring; 1073 1074 for (i = 0; i < txr->vxtxr_ndesc; i++) { 1075 txb = &txr->vxtxr_txbuf[i]; 1076 if (txb->vtxb_dmamap != NULL) { 1077 bus_dmamap_destroy(txr->vxtxr_txtag, 1078 txb->vtxb_dmamap); 1079 txb->vtxb_dmamap = NULL; 1080 } 1081 } 1082 1083 if (txc->vxcr_u.txcd != NULL) { 1084 vmxnet3_dma_free(sc, &txc->vxcr_dma); 1085 txc->vxcr_u.txcd = NULL; 1086 } 1087 1088 if (txr->vxtxr_txd != NULL) { 1089 vmxnet3_dma_free(sc, &txr->vxtxr_dma); 1090 txr->vxtxr_txd = NULL; 1091 } 1092 1093 if (txr->vxtxr_txtag != NULL) { 1094 bus_dma_tag_destroy(txr->vxtxr_txtag); 1095 txr->vxtxr_txtag = NULL; 1096 } 1097 } 1098 } 1099 1100 static int 1101 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc) 1102 { 1103 device_t dev; 1104 struct vmxnet3_rxqueue *rxq; 1105 struct vmxnet3_rxring *rxr; 1106 struct vmxnet3_comp_ring *rxc; 1107 int descsz, compsz; 1108 int i, j, q, error; 1109 1110 dev = sc->vmx_dev; 1111 1112 for (q = 0; q < sc->vmx_nrxqueues; q++) { 1113 rxq = &sc->vmx_rxq[q]; 1114 rxc = &rxq->vxrxq_comp_ring; 1115 compsz = 0; 1116 1117 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1118 rxr = &rxq->vxrxq_cmd_ring[i]; 1119 1120 descsz = rxr->vxrxr_ndesc * 1121 sizeof(struct vmxnet3_rxdesc); 1122 compsz += rxr->vxrxr_ndesc * 1123 sizeof(struct vmxnet3_rxcompdesc); 1124 1125 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1126 1, 0, /* alignment, boundary */ 1127 BUS_SPACE_MAXADDR, /* lowaddr */ 1128 BUS_SPACE_MAXADDR, /* highaddr */ 1129 NULL, NULL, /* filter, filterarg */ 1130 MJUMPAGESIZE, /* maxsize */ 1131 1, /* nsegments */ 1132 MJUMPAGESIZE, /* maxsegsize */ 1133 0, /* flags */ 1134 NULL, NULL, /* lockfunc, lockarg */ 1135 &rxr->vxrxr_rxtag); 1136 if (error) { 1137 device_printf(dev, 1138 "unable to create Rx buffer tag for " 1139 "queue %d\n", q); 1140 return (error); 1141 } 1142 1143 error = vmxnet3_dma_malloc(sc, descsz, 512, 1144 &rxr->vxrxr_dma); 1145 if (error) { 1146 device_printf(dev, "cannot allocate Rx " 1147 "descriptors for queue %d/%d error %d\n", 1148 i, q, error); 1149 return (error); 1150 } 1151 rxr->vxrxr_rxd = 1152 (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr; 1153 } 1154 1155 error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma); 1156 if (error) { 1157 device_printf(dev, "cannot alloc Rx comp descriptors " 1158 "for queue %d error %d\n", q, error); 1159 return (error); 1160 } 1161 rxc->vxcr_u.rxcd = 1162 (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr; 1163 1164 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1165 rxr = &rxq->vxrxq_cmd_ring[i]; 1166 1167 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0, 1168 &rxr->vxrxr_spare_dmap); 1169 if (error) { 1170 device_printf(dev, "unable to create spare " 1171 "dmamap for queue %d/%d error %d\n", 1172 q, i, error); 1173 return (error); 1174 } 1175 1176 for (j = 0; j < rxr->vxrxr_ndesc; j++) { 1177 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0, 1178 &rxr->vxrxr_rxbuf[j].vrxb_dmamap); 1179 if (error) { 1180 device_printf(dev, "unable to create " 1181 "dmamap for queue %d/%d slot %d " 1182 "error %d\n", 1183 q, i, j, error); 1184 return (error); 1185 } 1186 } 1187 } 1188 } 1189 1190 return (0); 1191 } 1192 1193 static void 1194 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc) 1195 { 1196 device_t dev; 1197 struct vmxnet3_rxqueue *rxq; 1198 struct vmxnet3_rxring *rxr; 1199 struct vmxnet3_comp_ring *rxc; 1200 struct vmxnet3_rxbuf *rxb; 1201 int i, j, q; 1202 1203 dev = sc->vmx_dev; 1204 1205 for (q = 0; q < sc->vmx_nrxqueues; q++) { 1206 rxq = &sc->vmx_rxq[q]; 1207 rxc = &rxq->vxrxq_comp_ring; 1208 1209 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1210 rxr = &rxq->vxrxq_cmd_ring[i]; 1211 1212 if (rxr->vxrxr_spare_dmap != NULL) { 1213 bus_dmamap_destroy(rxr->vxrxr_rxtag, 1214 rxr->vxrxr_spare_dmap); 1215 rxr->vxrxr_spare_dmap = NULL; 1216 } 1217 1218 for (j = 0; j < rxr->vxrxr_ndesc; j++) { 1219 rxb = &rxr->vxrxr_rxbuf[j]; 1220 if (rxb->vrxb_dmamap != NULL) { 1221 bus_dmamap_destroy(rxr->vxrxr_rxtag, 1222 rxb->vrxb_dmamap); 1223 rxb->vrxb_dmamap = NULL; 1224 } 1225 } 1226 } 1227 1228 if (rxc->vxcr_u.rxcd != NULL) { 1229 vmxnet3_dma_free(sc, &rxc->vxcr_dma); 1230 rxc->vxcr_u.rxcd = NULL; 1231 } 1232 1233 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 1234 rxr = &rxq->vxrxq_cmd_ring[i]; 1235 1236 if (rxr->vxrxr_rxd != NULL) { 1237 vmxnet3_dma_free(sc, &rxr->vxrxr_dma); 1238 rxr->vxrxr_rxd = NULL; 1239 } 1240 1241 if (rxr->vxrxr_rxtag != NULL) { 1242 bus_dma_tag_destroy(rxr->vxrxr_rxtag); 1243 rxr->vxrxr_rxtag = NULL; 1244 } 1245 } 1246 } 1247 } 1248 1249 static int 1250 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc) 1251 { 1252 int error; 1253 1254 error = vmxnet3_alloc_txq_data(sc); 1255 if (error) 1256 return (error); 1257 1258 error = vmxnet3_alloc_rxq_data(sc); 1259 if (error) 1260 return (error); 1261 1262 return (0); 1263 } 1264 1265 static void 1266 vmxnet3_free_queue_data(struct vmxnet3_softc *sc) 1267 { 1268 1269 if (sc->vmx_rxq != NULL) 1270 vmxnet3_free_rxq_data(sc); 1271 1272 if (sc->vmx_txq != NULL) 1273 vmxnet3_free_txq_data(sc); 1274 } 1275 1276 static int 1277 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc) 1278 { 1279 int error; 1280 1281 error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, 1282 32, &sc->vmx_mcast_dma); 1283 if (error) 1284 device_printf(sc->vmx_dev, "unable to alloc multicast table\n"); 1285 else 1286 sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr; 1287 1288 return (error); 1289 } 1290 1291 static void 1292 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc) 1293 { 1294 1295 if (sc->vmx_mcast != NULL) { 1296 vmxnet3_dma_free(sc, &sc->vmx_mcast_dma); 1297 sc->vmx_mcast = NULL; 1298 } 1299 } 1300 1301 static void 1302 vmxnet3_init_shared_data(struct vmxnet3_softc *sc) 1303 { 1304 struct vmxnet3_driver_shared *ds; 1305 struct vmxnet3_txqueue *txq; 1306 struct vmxnet3_txq_shared *txs; 1307 struct vmxnet3_rxqueue *rxq; 1308 struct vmxnet3_rxq_shared *rxs; 1309 int i; 1310 1311 ds = sc->vmx_ds; 1312 1313 /* 1314 * Initialize fields of the shared data that remains the same across 1315 * reinits. Note the shared data is zero'd when allocated. 1316 */ 1317 1318 ds->magic = VMXNET3_REV1_MAGIC; 1319 1320 /* DriverInfo */ 1321 ds->version = VMXNET3_DRIVER_VERSION; 1322 ds->guest = VMXNET3_GOS_FREEBSD | 1323 #ifdef __LP64__ 1324 VMXNET3_GOS_64BIT; 1325 #else 1326 VMXNET3_GOS_32BIT; 1327 #endif 1328 ds->vmxnet3_revision = 1; 1329 ds->upt_version = 1; 1330 1331 /* Misc. conf */ 1332 ds->driver_data = vtophys(sc); 1333 ds->driver_data_len = sizeof(struct vmxnet3_softc); 1334 ds->queue_shared = sc->vmx_qs_dma.dma_paddr; 1335 ds->queue_shared_len = sc->vmx_qs_dma.dma_size; 1336 ds->nrxsg_max = sc->vmx_max_rxsegs; 1337 1338 /* Interrupt control. */ 1339 ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO; 1340 ds->nintr = sc->vmx_nintrs; 1341 ds->evintr = sc->vmx_event_intr_idx; 1342 ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL; 1343 1344 for (i = 0; i < sc->vmx_nintrs; i++) 1345 ds->modlevel[i] = UPT1_IMOD_ADAPTIVE; 1346 1347 /* Receive filter. */ 1348 ds->mcast_table = sc->vmx_mcast_dma.dma_paddr; 1349 ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size; 1350 1351 /* Tx queues */ 1352 for (i = 0; i < sc->vmx_ntxqueues; i++) { 1353 txq = &sc->vmx_txq[i]; 1354 txs = txq->vxtxq_ts; 1355 1356 txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr; 1357 txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc; 1358 txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr; 1359 txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc; 1360 txs->driver_data = vtophys(txq); 1361 txs->driver_data_len = sizeof(struct vmxnet3_txqueue); 1362 } 1363 1364 /* Rx queues */ 1365 for (i = 0; i < sc->vmx_nrxqueues; i++) { 1366 rxq = &sc->vmx_rxq[i]; 1367 rxs = rxq->vxrxq_rs; 1368 1369 rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr; 1370 rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc; 1371 rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr; 1372 rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc; 1373 rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr; 1374 rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc; 1375 rxs->driver_data = vtophys(rxq); 1376 rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue); 1377 } 1378 } 1379 1380 static void 1381 vmxnet3_reinit_interface(struct vmxnet3_softc *sc) 1382 { 1383 struct ifnet *ifp; 1384 1385 ifp = sc->vmx_ifp; 1386 1387 /* Use the current MAC address. */ 1388 bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN); 1389 vmxnet3_set_lladdr(sc); 1390 1391 ifp->if_hwassist = 0; 1392 if (ifp->if_capenable & IFCAP_TXCSUM) 1393 ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD; 1394 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 1395 ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6; 1396 if (ifp->if_capenable & IFCAP_TSO4) 1397 ifp->if_hwassist |= CSUM_TSO; 1398 if (ifp->if_capenable & IFCAP_TSO6) 1399 ifp->if_hwassist |= CSUM_TSO; /* No CSUM_TSO_IPV6. */ 1400 } 1401 1402 static void 1403 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc) 1404 { 1405 struct ifnet *ifp; 1406 struct vmxnet3_driver_shared *ds; 1407 1408 ifp = sc->vmx_ifp; 1409 ds = sc->vmx_ds; 1410 1411 ds->upt_features = 0; 1412 if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) 1413 ds->upt_features |= UPT1_F_CSUM; 1414 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1415 ds->upt_features |= UPT1_F_VLAN; 1416 if (ifp->if_capenable & IFCAP_LRO) 1417 ds->upt_features |= UPT1_F_LRO; 1418 1419 ds->mtu = ifp->if_mtu; 1420 ds->ntxqueue = sc->vmx_ntxqueues; 1421 ds->nrxqueue = sc->vmx_nrxqueues; 1422 1423 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr); 1424 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH, 1425 (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32); 1426 } 1427 1428 static int 1429 vmxnet3_alloc_data(struct vmxnet3_softc *sc) 1430 { 1431 int error; 1432 1433 error = vmxnet3_alloc_shared_data(sc); 1434 if (error) 1435 return (error); 1436 1437 error = vmxnet3_alloc_queue_data(sc); 1438 if (error) 1439 return (error); 1440 1441 error = vmxnet3_alloc_mcast_table(sc); 1442 if (error) 1443 return (error); 1444 1445 vmxnet3_init_shared_data(sc); 1446 1447 return (0); 1448 } 1449 1450 static void 1451 vmxnet3_free_data(struct vmxnet3_softc *sc) 1452 { 1453 1454 vmxnet3_free_mcast_table(sc); 1455 vmxnet3_free_queue_data(sc); 1456 vmxnet3_free_shared_data(sc); 1457 } 1458 1459 static int 1460 vmxnet3_setup_interface(struct vmxnet3_softc *sc) 1461 { 1462 device_t dev; 1463 struct ifnet *ifp; 1464 1465 dev = sc->vmx_dev; 1466 1467 ifp = sc->vmx_ifp = if_alloc(IFT_ETHER); 1468 if (ifp == NULL) { 1469 device_printf(dev, "cannot allocate ifnet structure\n"); 1470 return (ENOSPC); 1471 } 1472 1473 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1474 #if __FreeBSD_version < 1000025 1475 ifp->if_baudrate = 1000000000; 1476 #else 1477 if_initbaudrate(ifp, IF_Gbps(10)); 1478 #endif 1479 ifp->if_softc = sc; 1480 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1481 ifp->if_init = vmxnet3_init; 1482 ifp->if_ioctl = vmxnet3_ioctl; 1483 ifp->if_start = vmxnet3_start; 1484 ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1; 1485 IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1); 1486 IFQ_SET_READY(&ifp->if_snd); 1487 1488 vmxnet3_get_lladdr(sc); 1489 ether_ifattach(ifp, sc->vmx_lladdr); 1490 1491 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM; 1492 ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6; 1493 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; 1494 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 1495 IFCAP_VLAN_HWCSUM; 1496 ifp->if_capenable = ifp->if_capabilities; 1497 1498 /* These capabilities are not enabled by default. */ 1499 ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; 1500 1501 sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 1502 vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 1503 sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config, 1504 vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 1505 1506 ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change, 1507 vmxnet3_media_status); 1508 ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1509 ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO); 1510 1511 return (0); 1512 } 1513 1514 static void 1515 vmxnet3_evintr(struct vmxnet3_softc *sc) 1516 { 1517 device_t dev; 1518 struct ifnet *ifp; 1519 struct vmxnet3_txq_shared *ts; 1520 struct vmxnet3_rxq_shared *rs; 1521 uint32_t event; 1522 int reset; 1523 1524 dev = sc->vmx_dev; 1525 ifp = sc->vmx_ifp; 1526 reset = 0; 1527 1528 VMXNET3_CORE_LOCK(sc); 1529 1530 /* Clear events. */ 1531 event = sc->vmx_ds->event; 1532 vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event); 1533 1534 if (event & VMXNET3_EVENT_LINK) 1535 vmxnet3_link_status(sc); 1536 1537 if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) { 1538 reset = 1; 1539 vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS); 1540 ts = sc->vmx_txq[0].vxtxq_ts; 1541 if (ts->stopped != 0) 1542 device_printf(dev, "Tx queue error %#x\n", ts->error); 1543 rs = sc->vmx_rxq[0].vxrxq_rs; 1544 if (rs->stopped != 0) 1545 device_printf(dev, "Rx queue error %#x\n", rs->error); 1546 device_printf(dev, "Rx/Tx queue error event ... resetting\n"); 1547 } 1548 1549 if (event & VMXNET3_EVENT_DIC) 1550 device_printf(dev, "device implementation change event\n"); 1551 if (event & VMXNET3_EVENT_DEBUG) 1552 device_printf(dev, "debug event\n"); 1553 1554 if (reset != 0) { 1555 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1556 vmxnet3_init_locked(sc); 1557 } 1558 1559 VMXNET3_CORE_UNLOCK(sc); 1560 } 1561 1562 static void 1563 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq) 1564 { 1565 struct vmxnet3_softc *sc; 1566 struct ifnet *ifp; 1567 struct vmxnet3_txring *txr; 1568 struct vmxnet3_comp_ring *txc; 1569 struct vmxnet3_txcompdesc *txcd; 1570 struct vmxnet3_txbuf *txb; 1571 u_int sop; 1572 1573 sc = txq->vxtxq_sc; 1574 ifp = sc->vmx_ifp; 1575 txr = &txq->vxtxq_cmd_ring; 1576 txc = &txq->vxtxq_comp_ring; 1577 1578 VMXNET3_TXQ_LOCK_ASSERT(txq); 1579 1580 for (;;) { 1581 txcd = &txc->vxcr_u.txcd[txc->vxcr_next]; 1582 if (txcd->gen != txc->vxcr_gen) 1583 break; 1584 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); 1585 1586 if (++txc->vxcr_next == txc->vxcr_ndesc) { 1587 txc->vxcr_next = 0; 1588 txc->vxcr_gen ^= 1; 1589 } 1590 1591 sop = txr->vxtxr_next; 1592 txb = &txr->vxtxr_txbuf[sop]; 1593 1594 if (txb->vtxb_m != NULL) { 1595 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap, 1596 BUS_DMASYNC_POSTWRITE); 1597 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap); 1598 1599 m_freem(txb->vtxb_m); 1600 txb->vtxb_m = NULL; 1601 1602 ifp->if_opackets++; 1603 } 1604 1605 txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc; 1606 } 1607 1608 if (txr->vxtxr_head == txr->vxtxr_next) 1609 txq->vxtxq_watchdog = 0; 1610 } 1611 1612 static int 1613 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr) 1614 { 1615 struct ifnet *ifp; 1616 struct mbuf *m; 1617 struct vmxnet3_rxdesc *rxd; 1618 struct vmxnet3_rxbuf *rxb; 1619 bus_dma_tag_t tag; 1620 bus_dmamap_t dmap; 1621 bus_dma_segment_t segs[1]; 1622 int idx, clsize, btype, flags, nsegs, error; 1623 1624 ifp = sc->vmx_ifp; 1625 tag = rxr->vxrxr_rxtag; 1626 dmap = rxr->vxrxr_spare_dmap; 1627 idx = rxr->vxrxr_fill; 1628 rxd = &rxr->vxrxr_rxd[idx]; 1629 rxb = &rxr->vxrxr_rxbuf[idx]; 1630 1631 #ifdef VMXNET3_FAILPOINTS 1632 KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS); 1633 if (rxr->vxrxr_rid != 0) 1634 KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS); 1635 #endif 1636 1637 if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) { 1638 flags = M_PKTHDR; 1639 clsize = MCLBYTES; 1640 btype = VMXNET3_BTYPE_HEAD; 1641 } else { 1642 #if __FreeBSD_version < 902001 1643 /* 1644 * These mbufs will never be used for the start of a frame. 1645 * Roughly prior to branching releng/9.2, the load_mbuf_sg() 1646 * required the mbuf to always be a packet header. Avoid 1647 * unnecessary mbuf initialization in newer versions where 1648 * that is not the case. 1649 */ 1650 flags = M_PKTHDR; 1651 #else 1652 flags = 0; 1653 #endif 1654 clsize = MJUMPAGESIZE; 1655 btype = VMXNET3_BTYPE_BODY; 1656 } 1657 1658 m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize); 1659 if (m == NULL) { 1660 sc->vmx_stats.vmst_mgetcl_failed++; 1661 return (ENOBUFS); 1662 } 1663 1664 if (btype == VMXNET3_BTYPE_HEAD) { 1665 m->m_len = m->m_pkthdr.len = clsize; 1666 m_adj(m, ETHER_ALIGN); 1667 } else 1668 m->m_len = clsize; 1669 1670 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs, 1671 BUS_DMA_NOWAIT); 1672 if (error) { 1673 m_freem(m); 1674 sc->vmx_stats.vmst_mbuf_load_failed++; 1675 return (error); 1676 } 1677 KASSERT(nsegs == 1, 1678 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs)); 1679 #if __FreeBSD_version < 902001 1680 if (btype == VMXNET3_BTYPE_BODY) 1681 m->m_flags &= ~M_PKTHDR; 1682 #endif 1683 1684 if (rxb->vrxb_m != NULL) { 1685 bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD); 1686 bus_dmamap_unload(tag, rxb->vrxb_dmamap); 1687 } 1688 1689 rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap; 1690 rxb->vrxb_dmamap = dmap; 1691 rxb->vrxb_m = m; 1692 1693 rxd->addr = segs[0].ds_addr; 1694 rxd->len = segs[0].ds_len; 1695 rxd->btype = btype; 1696 rxd->gen = rxr->vxrxr_gen; 1697 1698 vmxnet3_rxr_increment_fill(rxr); 1699 return (0); 1700 } 1701 1702 static void 1703 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq, 1704 struct vmxnet3_rxring *rxr, int idx) 1705 { 1706 struct vmxnet3_rxdesc *rxd; 1707 1708 rxd = &rxr->vxrxr_rxd[idx]; 1709 rxd->gen = rxr->vxrxr_gen; 1710 vmxnet3_rxr_increment_fill(rxr); 1711 } 1712 1713 static void 1714 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq) 1715 { 1716 struct vmxnet3_softc *sc; 1717 struct vmxnet3_rxring *rxr; 1718 struct vmxnet3_comp_ring *rxc; 1719 struct vmxnet3_rxcompdesc *rxcd; 1720 int idx, eof; 1721 1722 sc = rxq->vxrxq_sc; 1723 rxc = &rxq->vxrxq_comp_ring; 1724 1725 do { 1726 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; 1727 if (rxcd->gen != rxc->vxcr_gen) 1728 break; /* Not expected. */ 1729 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); 1730 1731 if (++rxc->vxcr_next == rxc->vxcr_ndesc) { 1732 rxc->vxcr_next = 0; 1733 rxc->vxcr_gen ^= 1; 1734 } 1735 1736 idx = rxcd->rxd_idx; 1737 eof = rxcd->eop; 1738 if (rxcd->qid < sc->vmx_nrxqueues) 1739 rxr = &rxq->vxrxq_cmd_ring[0]; 1740 else 1741 rxr = &rxq->vxrxq_cmd_ring[1]; 1742 vmxnet3_rxq_eof_discard(rxq, rxr, idx); 1743 } while (!eof); 1744 } 1745 1746 static void 1747 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) 1748 { 1749 1750 if (rxcd->ipv4) { 1751 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1752 if (rxcd->ipcsum_ok) 1753 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1754 } 1755 1756 if (!rxcd->fragment) { 1757 if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) { 1758 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1759 CSUM_PSEUDO_HDR; 1760 m->m_pkthdr.csum_data = 0xFFFF; 1761 } 1762 } 1763 } 1764 1765 static void 1766 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq, 1767 struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) 1768 { 1769 struct vmxnet3_softc *sc; 1770 struct ifnet *ifp; 1771 1772 sc = rxq->vxrxq_sc; 1773 ifp = sc->vmx_ifp; 1774 1775 if (rxcd->error) { 1776 ifp->if_ierrors++; 1777 m_freem(m); 1778 return; 1779 } 1780 1781 if (!rxcd->no_csum) 1782 vmxnet3_rx_csum(rxcd, m); 1783 if (rxcd->vlan) { 1784 m->m_flags |= M_VLANTAG; 1785 m->m_pkthdr.ether_vtag = rxcd->vtag; 1786 } 1787 1788 ifp->if_ipackets++; 1789 VMXNET3_RXQ_UNLOCK(rxq); 1790 (*ifp->if_input)(ifp, m); 1791 VMXNET3_RXQ_LOCK(rxq); 1792 } 1793 1794 static void 1795 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq) 1796 { 1797 struct vmxnet3_softc *sc; 1798 struct ifnet *ifp; 1799 struct vmxnet3_rxring *rxr; 1800 struct vmxnet3_comp_ring *rxc; 1801 struct vmxnet3_rxdesc *rxd; 1802 struct vmxnet3_rxcompdesc *rxcd; 1803 struct mbuf *m, *m_head, *m_tail; 1804 int idx, length; 1805 1806 sc = rxq->vxrxq_sc; 1807 ifp = sc->vmx_ifp; 1808 rxc = &rxq->vxrxq_comp_ring; 1809 m_head = m_tail = NULL; 1810 1811 VMXNET3_RXQ_LOCK_ASSERT(rxq); 1812 1813 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1814 return; 1815 1816 for (;;) { 1817 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; 1818 if (rxcd->gen != rxc->vxcr_gen) 1819 break; 1820 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); 1821 1822 if (++rxc->vxcr_next == rxc->vxcr_ndesc) { 1823 rxc->vxcr_next = 0; 1824 rxc->vxcr_gen ^= 1; 1825 } 1826 1827 idx = rxcd->rxd_idx; 1828 length = rxcd->len; 1829 if (rxcd->qid < sc->vmx_nrxqueues) 1830 rxr = &rxq->vxrxq_cmd_ring[0]; 1831 else 1832 rxr = &rxq->vxrxq_cmd_ring[1]; 1833 rxd = &rxr->vxrxr_rxd[idx]; 1834 1835 m = rxr->vxrxr_rxbuf[idx].vrxb_m; 1836 KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf", 1837 __func__, rxcd->qid, idx)); 1838 1839 /* 1840 * The host may skip descriptors. We detect this when this 1841 * descriptor does not match the previous fill index. Catch 1842 * up with the host now. 1843 */ 1844 if (__predict_false(rxr->vxrxr_fill != idx)) { 1845 while (rxr->vxrxr_fill != idx) { 1846 rxr->vxrxr_rxd[rxr->vxrxr_fill].gen = 1847 rxr->vxrxr_gen; 1848 vmxnet3_rxr_increment_fill(rxr); 1849 } 1850 } 1851 1852 if (rxcd->sop) { 1853 KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD, 1854 ("%s: start of frame w/o head buffer", __func__)); 1855 KASSERT(rxr == &rxq->vxrxq_cmd_ring[0], 1856 ("%s: start of frame not in ring 0", __func__)); 1857 KASSERT((idx % sc->vmx_rx_max_chain) == 0, 1858 ("%s: start of frame at unexcepted index %d (%d)", 1859 __func__, idx, sc->vmx_rx_max_chain)); 1860 KASSERT(m_head == NULL, 1861 ("%s: duplicate start of frame?", __func__)); 1862 1863 if (length == 0) { 1864 /* Just ignore this descriptor. */ 1865 vmxnet3_rxq_eof_discard(rxq, rxr, idx); 1866 goto nextp; 1867 } 1868 1869 if (vmxnet3_newbuf(sc, rxr) != 0) { 1870 ifp->if_iqdrops++; 1871 vmxnet3_rxq_eof_discard(rxq, rxr, idx); 1872 if (!rxcd->eop) 1873 vmxnet3_rxq_discard_chain(rxq); 1874 goto nextp; 1875 } 1876 1877 m->m_pkthdr.rcvif = ifp; 1878 m->m_pkthdr.len = m->m_len = length; 1879 m->m_pkthdr.csum_flags = 0; 1880 m_head = m_tail = m; 1881 1882 } else { 1883 KASSERT(rxd->btype == VMXNET3_BTYPE_BODY, 1884 ("%s: non start of frame w/o body buffer", __func__)); 1885 KASSERT(m_head != NULL, 1886 ("%s: frame not started?", __func__)); 1887 1888 if (vmxnet3_newbuf(sc, rxr) != 0) { 1889 ifp->if_iqdrops++; 1890 vmxnet3_rxq_eof_discard(rxq, rxr, idx); 1891 if (!rxcd->eop) 1892 vmxnet3_rxq_discard_chain(rxq); 1893 m_freem(m_head); 1894 m_head = m_tail = NULL; 1895 goto nextp; 1896 } 1897 1898 m->m_len = length; 1899 m_head->m_pkthdr.len += length; 1900 m_tail->m_next = m; 1901 m_tail = m; 1902 } 1903 1904 if (rxcd->eop) { 1905 vmxnet3_rxq_input(rxq, rxcd, m_head); 1906 m_head = m_tail = NULL; 1907 1908 /* Must recheck after dropping the Rx lock. */ 1909 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1910 break; 1911 } 1912 1913 nextp: 1914 if (__predict_false(rxq->vxrxq_rs->update_rxhead)) { 1915 int qid = rxcd->qid; 1916 bus_size_t r; 1917 1918 idx = (idx + 1) % rxr->vxrxr_ndesc; 1919 if (qid >= sc->vmx_nrxqueues) { 1920 qid -= sc->vmx_nrxqueues; 1921 r = VMXNET3_BAR0_RXH2(qid); 1922 } else 1923 r = VMXNET3_BAR0_RXH1(qid); 1924 vmxnet3_write_bar0(sc, r, idx); 1925 } 1926 } 1927 } 1928 1929 static void 1930 vmxnet3_legacy_intr(void *xsc) 1931 { 1932 struct vmxnet3_softc *sc; 1933 struct vmxnet3_rxqueue *rxq; 1934 struct vmxnet3_txqueue *txq; 1935 struct ifnet *ifp; 1936 1937 sc = xsc; 1938 rxq = &sc->vmx_rxq[0]; 1939 txq = &sc->vmx_txq[0]; 1940 ifp = sc->vmx_ifp; 1941 1942 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) { 1943 if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0) 1944 return; 1945 } 1946 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) 1947 vmxnet3_disable_all_intrs(sc); 1948 1949 if (sc->vmx_ds->event != 0) 1950 vmxnet3_evintr(sc); 1951 1952 VMXNET3_RXQ_LOCK(rxq); 1953 vmxnet3_rxq_eof(rxq); 1954 VMXNET3_RXQ_UNLOCK(rxq); 1955 1956 VMXNET3_TXQ_LOCK(txq); 1957 vmxnet3_txq_eof(txq); 1958 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1959 vmxnet3_start_locked(ifp); 1960 VMXNET3_TXQ_UNLOCK(txq); 1961 1962 vmxnet3_enable_all_intrs(sc); 1963 } 1964 1965 static void 1966 vmxnet3_txq_intr(void *xtxq) 1967 { 1968 struct vmxnet3_softc *sc; 1969 struct vmxnet3_txqueue *txq; 1970 struct ifnet *ifp; 1971 1972 txq = xtxq; 1973 sc = txq->vxtxq_sc; 1974 ifp = sc->vmx_ifp; 1975 1976 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) 1977 vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx); 1978 1979 VMXNET3_TXQ_LOCK(txq); 1980 vmxnet3_txq_eof(txq); 1981 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1982 vmxnet3_start_locked(ifp); 1983 VMXNET3_TXQ_UNLOCK(txq); 1984 1985 vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx); 1986 } 1987 1988 static void 1989 vmxnet3_rxq_intr(void *xrxq) 1990 { 1991 struct vmxnet3_softc *sc; 1992 struct vmxnet3_rxqueue *rxq; 1993 1994 rxq = xrxq; 1995 sc = rxq->vxrxq_sc; 1996 1997 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) 1998 vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx); 1999 2000 VMXNET3_RXQ_LOCK(rxq); 2001 vmxnet3_rxq_eof(rxq); 2002 VMXNET3_RXQ_UNLOCK(rxq); 2003 2004 vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx); 2005 } 2006 2007 static void 2008 vmxnet3_event_intr(void *xsc) 2009 { 2010 struct vmxnet3_softc *sc; 2011 2012 sc = xsc; 2013 2014 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) 2015 vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx); 2016 2017 if (sc->vmx_ds->event != 0) 2018 vmxnet3_evintr(sc); 2019 2020 vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx); 2021 } 2022 2023 static void 2024 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) 2025 { 2026 struct vmxnet3_txring *txr; 2027 struct vmxnet3_txbuf *txb; 2028 int i; 2029 2030 txr = &txq->vxtxq_cmd_ring; 2031 2032 for (i = 0; i < txr->vxtxr_ndesc; i++) { 2033 txb = &txr->vxtxr_txbuf[i]; 2034 2035 if (txb->vtxb_m == NULL) 2036 continue; 2037 2038 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap, 2039 BUS_DMASYNC_POSTWRITE); 2040 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap); 2041 m_freem(txb->vtxb_m); 2042 txb->vtxb_m = NULL; 2043 } 2044 } 2045 2046 static void 2047 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) 2048 { 2049 struct vmxnet3_rxring *rxr; 2050 struct vmxnet3_rxbuf *rxb; 2051 int i, j; 2052 2053 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { 2054 rxr = &rxq->vxrxq_cmd_ring[i]; 2055 2056 for (j = 0; j < rxr->vxrxr_ndesc; j++) { 2057 rxb = &rxr->vxrxr_rxbuf[j]; 2058 2059 if (rxb->vrxb_m == NULL) 2060 continue; 2061 bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap, 2062 BUS_DMASYNC_POSTREAD); 2063 bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap); 2064 m_freem(rxb->vrxb_m); 2065 rxb->vrxb_m = NULL; 2066 } 2067 } 2068 } 2069 2070 static void 2071 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc) 2072 { 2073 struct vmxnet3_rxqueue *rxq; 2074 struct vmxnet3_txqueue *txq; 2075 int i; 2076 2077 for (i = 0; i < sc->vmx_nrxqueues; i++) { 2078 rxq = &sc->vmx_rxq[i]; 2079 VMXNET3_RXQ_LOCK(rxq); 2080 VMXNET3_RXQ_UNLOCK(rxq); 2081 } 2082 2083 for (i = 0; i < sc->vmx_ntxqueues; i++) { 2084 txq = &sc->vmx_txq[i]; 2085 VMXNET3_TXQ_LOCK(txq); 2086 VMXNET3_TXQ_UNLOCK(txq); 2087 } 2088 } 2089 2090 static void 2091 vmxnet3_stop(struct vmxnet3_softc *sc) 2092 { 2093 struct ifnet *ifp; 2094 int q; 2095 2096 ifp = sc->vmx_ifp; 2097 VMXNET3_CORE_LOCK_ASSERT(sc); 2098 2099 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2100 sc->vmx_link_active = 0; 2101 callout_stop(&sc->vmx_tick); 2102 2103 /* Disable interrupts. */ 2104 vmxnet3_disable_all_intrs(sc); 2105 vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE); 2106 2107 vmxnet3_stop_rendezvous(sc); 2108 2109 for (q = 0; q < sc->vmx_ntxqueues; q++) 2110 vmxnet3_txstop(sc, &sc->vmx_txq[q]); 2111 for (q = 0; q < sc->vmx_nrxqueues; q++) 2112 vmxnet3_rxstop(sc, &sc->vmx_rxq[q]); 2113 2114 vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET); 2115 } 2116 2117 static void 2118 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) 2119 { 2120 struct vmxnet3_txring *txr; 2121 struct vmxnet3_comp_ring *txc; 2122 2123 txr = &txq->vxtxq_cmd_ring; 2124 txr->vxtxr_head = 0; 2125 txr->vxtxr_next = 0; 2126 txr->vxtxr_gen = VMXNET3_INIT_GEN; 2127 bzero(txr->vxtxr_txd, 2128 txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc)); 2129 2130 txc = &txq->vxtxq_comp_ring; 2131 txc->vxcr_next = 0; 2132 txc->vxcr_gen = VMXNET3_INIT_GEN; 2133 bzero(txc->vxcr_u.txcd, 2134 txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc)); 2135 } 2136 2137 static int 2138 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) 2139 { 2140 struct ifnet *ifp; 2141 struct vmxnet3_rxring *rxr; 2142 struct vmxnet3_comp_ring *rxc; 2143 int i, populate, idx, frame_size, error; 2144 2145 ifp = sc->vmx_ifp; 2146 frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) + 2147 ifp->if_mtu; 2148 2149 /* 2150 * If the MTU causes us to exceed what a regular sized cluster can 2151 * handle, we allocate a second MJUMPAGESIZE cluster after it in 2152 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters. 2153 * 2154 * Keep rx_max_chain a divisor of the maximum Rx ring size to make 2155 * our life easier. We do not support changing the ring size after 2156 * the attach. 2157 */ 2158 if (frame_size <= MCLBYTES) 2159 sc->vmx_rx_max_chain = 1; 2160 else 2161 sc->vmx_rx_max_chain = 2; 2162 2163 /* 2164 * Only populate ring 1 if the configuration will take advantage 2165 * of it. That is either when LRO is enabled or the frame size 2166 * exceeds what ring 0 can contain. 2167 */ 2168 if ((ifp->if_capenable & IFCAP_LRO) == 0 && 2169 frame_size <= MCLBYTES + MJUMPAGESIZE) 2170 populate = 1; 2171 else 2172 populate = VMXNET3_RXRINGS_PERQ; 2173 2174 for (i = 0; i < populate; i++) { 2175 rxr = &rxq->vxrxq_cmd_ring[i]; 2176 rxr->vxrxr_fill = 0; 2177 rxr->vxrxr_gen = VMXNET3_INIT_GEN; 2178 bzero(rxr->vxrxr_rxd, 2179 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); 2180 2181 for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) { 2182 error = vmxnet3_newbuf(sc, rxr); 2183 if (error) 2184 return (error); 2185 } 2186 } 2187 2188 for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) { 2189 rxr = &rxq->vxrxq_cmd_ring[i]; 2190 rxr->vxrxr_fill = 0; 2191 rxr->vxrxr_gen = 0; 2192 bzero(rxr->vxrxr_rxd, 2193 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); 2194 } 2195 2196 rxc = &rxq->vxrxq_comp_ring; 2197 rxc->vxcr_next = 0; 2198 rxc->vxcr_gen = VMXNET3_INIT_GEN; 2199 bzero(rxc->vxcr_u.rxcd, 2200 rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc)); 2201 2202 return (0); 2203 } 2204 2205 static int 2206 vmxnet3_reinit_queues(struct vmxnet3_softc *sc) 2207 { 2208 device_t dev; 2209 int q, error; 2210 2211 dev = sc->vmx_dev; 2212 2213 for (q = 0; q < sc->vmx_ntxqueues; q++) 2214 vmxnet3_txinit(sc, &sc->vmx_txq[q]); 2215 2216 for (q = 0; q < sc->vmx_nrxqueues; q++) { 2217 error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]); 2218 if (error) { 2219 device_printf(dev, "cannot populate Rx queue %d\n", q); 2220 return (error); 2221 } 2222 } 2223 2224 return (0); 2225 } 2226 2227 static int 2228 vmxnet3_enable_device(struct vmxnet3_softc *sc) 2229 { 2230 int q; 2231 2232 if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) { 2233 device_printf(sc->vmx_dev, "device enable command failed!\n"); 2234 return (1); 2235 } 2236 2237 /* Reset the Rx queue heads. */ 2238 for (q = 0; q < sc->vmx_nrxqueues; q++) { 2239 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0); 2240 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0); 2241 } 2242 2243 return (0); 2244 } 2245 2246 static void 2247 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc) 2248 { 2249 struct ifnet *ifp; 2250 2251 ifp = sc->vmx_ifp; 2252 2253 vmxnet3_set_rxfilter(sc); 2254 2255 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2256 bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter, 2257 sizeof(sc->vmx_ds->vlan_filter)); 2258 else 2259 bzero(sc->vmx_ds->vlan_filter, 2260 sizeof(sc->vmx_ds->vlan_filter)); 2261 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); 2262 } 2263 2264 static int 2265 vmxnet3_reinit(struct vmxnet3_softc *sc) 2266 { 2267 2268 vmxnet3_reinit_interface(sc); 2269 vmxnet3_reinit_shared_data(sc); 2270 2271 if (vmxnet3_reinit_queues(sc) != 0) 2272 return (ENXIO); 2273 2274 if (vmxnet3_enable_device(sc) != 0) 2275 return (ENXIO); 2276 2277 vmxnet3_reinit_rxfilters(sc); 2278 2279 return (0); 2280 } 2281 2282 static void 2283 vmxnet3_init_locked(struct vmxnet3_softc *sc) 2284 { 2285 struct ifnet *ifp; 2286 2287 ifp = sc->vmx_ifp; 2288 2289 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2290 return; 2291 2292 vmxnet3_stop(sc); 2293 2294 if (vmxnet3_reinit(sc) != 0) { 2295 vmxnet3_stop(sc); 2296 return; 2297 } 2298 2299 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2300 vmxnet3_link_status(sc); 2301 2302 vmxnet3_enable_all_intrs(sc); 2303 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); 2304 } 2305 2306 static void 2307 vmxnet3_init(void *xsc) 2308 { 2309 struct vmxnet3_softc *sc; 2310 2311 sc = xsc; 2312 2313 VMXNET3_CORE_LOCK(sc); 2314 vmxnet3_init_locked(sc); 2315 VMXNET3_CORE_UNLOCK(sc); 2316 } 2317 2318 /* 2319 * BMV: Much of this can go away once we finally have offsets in 2320 * the mbuf packet header. Bug andre@. 2321 */ 2322 static int 2323 vmxnet3_txq_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 2324 { 2325 struct ether_vlan_header *evh; 2326 int offset; 2327 2328 evh = mtod(m, struct ether_vlan_header *); 2329 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2330 /* BMV: We should handle nested VLAN tags too. */ 2331 *etype = ntohs(evh->evl_proto); 2332 offset = sizeof(struct ether_vlan_header); 2333 } else { 2334 *etype = ntohs(evh->evl_encap_proto); 2335 offset = sizeof(struct ether_header); 2336 } 2337 2338 switch (*etype) { 2339 #if defined(INET) 2340 case ETHERTYPE_IP: { 2341 struct ip *ip, iphdr; 2342 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 2343 m_copydata(m, offset, sizeof(struct ip), 2344 (caddr_t) &iphdr); 2345 ip = &iphdr; 2346 } else 2347 ip = (struct ip *)(m->m_data + offset); 2348 *proto = ip->ip_p; 2349 *start = offset + (ip->ip_hl << 2); 2350 break; 2351 } 2352 #endif 2353 #if defined(INET6) 2354 case ETHERTYPE_IPV6: 2355 *proto = -1; 2356 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 2357 /* Assert the network stack sent us a valid packet. */ 2358 KASSERT(*start > offset, 2359 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 2360 *start, offset, *proto)); 2361 break; 2362 #endif 2363 default: 2364 return (EINVAL); 2365 } 2366 2367 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 2368 struct tcphdr *tcp, tcphdr; 2369 2370 if (__predict_false(*proto != IPPROTO_TCP)) { 2371 /* Likely failed to correctly parse the mbuf. */ 2372 return (EINVAL); 2373 } 2374 2375 if (m->m_len < *start + sizeof(struct tcphdr)) { 2376 m_copydata(m, offset, sizeof(struct tcphdr), 2377 (caddr_t) &tcphdr); 2378 tcp = &tcphdr; 2379 } else 2380 tcp = (struct tcphdr *)(m->m_data + *start); 2381 2382 /* 2383 * For TSO, the size of the protocol header is also 2384 * included in the descriptor header size. 2385 */ 2386 *start += (tcp->th_off << 2); 2387 } 2388 2389 return (0); 2390 } 2391 2392 static int 2393 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0, 2394 bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs) 2395 { 2396 struct vmxnet3_txring *txr; 2397 struct mbuf *m; 2398 bus_dma_tag_t tag; 2399 int maxsegs, error; 2400 2401 txr = &txq->vxtxq_cmd_ring; 2402 m = *m0; 2403 tag = txr->vxtxr_txtag; 2404 maxsegs = VMXNET3_TX_MAXSEGS; 2405 2406 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0); 2407 if (error == 0 || error != EFBIG) 2408 return (error); 2409 2410 m = m_collapse(m, M_NOWAIT, maxsegs); 2411 if (m != NULL) { 2412 *m0 = m; 2413 error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0); 2414 } else 2415 error = ENOBUFS; 2416 2417 if (error) { 2418 m_freem(*m0); 2419 *m0 = NULL; 2420 } else 2421 txq->vxtxq_sc->vmx_stats.vmst_collapsed++; 2422 2423 return (error); 2424 } 2425 2426 static void 2427 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap) 2428 { 2429 struct vmxnet3_txring *txr; 2430 2431 txr = &txq->vxtxq_cmd_ring; 2432 bus_dmamap_unload(txr->vxtxr_txtag, dmap); 2433 } 2434 2435 static int 2436 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0) 2437 { 2438 struct vmxnet3_softc *sc; 2439 struct ifnet *ifp; 2440 struct vmxnet3_txring *txr; 2441 struct vmxnet3_txdesc *txd, *sop; 2442 struct mbuf *m; 2443 bus_dmamap_t dmap; 2444 bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS]; 2445 int i, gen, nsegs, etype, proto, start, error; 2446 2447 sc = txq->vxtxq_sc; 2448 ifp = sc->vmx_ifp; 2449 start = 0; 2450 txd = NULL; 2451 txr = &txq->vxtxq_cmd_ring; 2452 dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap; 2453 2454 error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs); 2455 if (error) 2456 return (error); 2457 2458 m = *m0; 2459 M_ASSERTPKTHDR(m); 2460 KASSERT(nsegs <= VMXNET3_TX_MAXSEGS, 2461 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs)); 2462 2463 if (VMXNET3_TXRING_AVAIL(txr) < nsegs) { 2464 txq->vxtxq_stats.vtxrs_full++; 2465 vmxnet3_txq_unload_mbuf(txq, dmap); 2466 return (ENOSPC); 2467 } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) { 2468 error = vmxnet3_txq_offload_ctx(m, &etype, &proto, &start); 2469 if (error) { 2470 txq->vxtxq_stats.vtxrs_offload_failed++; 2471 vmxnet3_txq_unload_mbuf(txq, dmap); 2472 m_freem(m); 2473 *m0 = NULL; 2474 return (error); 2475 } 2476 } 2477 2478 txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m = *m0; 2479 sop = &txr->vxtxr_txd[txr->vxtxr_head]; 2480 gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */ 2481 2482 for (i = 0; i < nsegs; i++) { 2483 txd = &txr->vxtxr_txd[txr->vxtxr_head]; 2484 2485 txd->addr = segs[i].ds_addr; 2486 txd->len = segs[i].ds_len; 2487 txd->gen = gen; 2488 txd->dtype = 0; 2489 txd->offload_mode = VMXNET3_OM_NONE; 2490 txd->offload_pos = 0; 2491 txd->hlen = 0; 2492 txd->eop = 0; 2493 txd->compreq = 0; 2494 txd->vtag_mode = 0; 2495 txd->vtag = 0; 2496 2497 if (++txr->vxtxr_head == txr->vxtxr_ndesc) { 2498 txr->vxtxr_head = 0; 2499 txr->vxtxr_gen ^= 1; 2500 } 2501 gen = txr->vxtxr_gen; 2502 } 2503 txd->eop = 1; 2504 txd->compreq = 1; 2505 2506 if (m->m_flags & M_VLANTAG) { 2507 sop->vtag_mode = 1; 2508 sop->vtag = m->m_pkthdr.ether_vtag; 2509 } 2510 2511 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 2512 sop->offload_mode = VMXNET3_OM_TSO; 2513 sop->hlen = start; 2514 sop->offload_pos = m->m_pkthdr.tso_segsz; 2515 } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD | 2516 VMXNET3_CSUM_OFFLOAD_IPV6)) { 2517 sop->offload_mode = VMXNET3_OM_CSUM; 2518 sop->hlen = start; 2519 sop->offload_pos = start + m->m_pkthdr.csum_data; 2520 } 2521 2522 /* Finally, change the ownership. */ 2523 vmxnet3_barrier(sc, VMXNET3_BARRIER_WR); 2524 sop->gen ^= 1; 2525 2526 if (++txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) { 2527 txq->vxtxq_ts->npending = 0; 2528 vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), 2529 txr->vxtxr_head); 2530 } 2531 2532 return (0); 2533 } 2534 2535 static void 2536 vmxnet3_start_locked(struct ifnet *ifp) 2537 { 2538 struct vmxnet3_softc *sc; 2539 struct vmxnet3_txqueue *txq; 2540 struct vmxnet3_txring *txr; 2541 struct mbuf *m_head; 2542 int tx, avail; 2543 2544 sc = ifp->if_softc; 2545 txq = &sc->vmx_txq[0]; 2546 txr = &txq->vxtxq_cmd_ring; 2547 tx = 0; 2548 2549 VMXNET3_TXQ_LOCK_ASSERT(txq); 2550 2551 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2552 sc->vmx_link_active == 0) 2553 return; 2554 2555 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 2556 if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2) 2557 break; 2558 2559 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2560 if (m_head == NULL) 2561 break; 2562 2563 /* Assume worse case if this mbuf is the head of a chain. */ 2564 if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) { 2565 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2566 break; 2567 } 2568 2569 if (vmxnet3_txq_encap(txq, &m_head) != 0) { 2570 if (m_head != NULL) 2571 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2572 break; 2573 } 2574 2575 tx++; 2576 ETHER_BPF_MTAP(ifp, m_head); 2577 } 2578 2579 if (tx > 0) { 2580 if (txq->vxtxq_ts->npending > 0) { 2581 txq->vxtxq_ts->npending = 0; 2582 vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), 2583 txr->vxtxr_head); 2584 } 2585 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; 2586 } 2587 } 2588 2589 static void 2590 vmxnet3_start(struct ifnet *ifp) 2591 { 2592 struct vmxnet3_softc *sc; 2593 struct vmxnet3_txqueue *txq; 2594 2595 sc = ifp->if_softc; 2596 txq = &sc->vmx_txq[0]; 2597 2598 VMXNET3_TXQ_LOCK(txq); 2599 vmxnet3_start_locked(ifp); 2600 VMXNET3_TXQ_UNLOCK(txq); 2601 } 2602 2603 static void 2604 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag) 2605 { 2606 struct ifnet *ifp; 2607 int idx, bit; 2608 2609 ifp = sc->vmx_ifp; 2610 idx = (tag >> 5) & 0x7F; 2611 bit = tag & 0x1F; 2612 2613 if (tag == 0 || tag > 4095) 2614 return; 2615 2616 VMXNET3_CORE_LOCK(sc); 2617 2618 /* Update our private VLAN bitvector. */ 2619 if (add) 2620 sc->vmx_vlan_filter[idx] |= (1 << bit); 2621 else 2622 sc->vmx_vlan_filter[idx] &= ~(1 << bit); 2623 2624 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2625 if (add) 2626 sc->vmx_ds->vlan_filter[idx] |= (1 << bit); 2627 else 2628 sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit); 2629 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); 2630 } 2631 2632 VMXNET3_CORE_UNLOCK(sc); 2633 } 2634 2635 static void 2636 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2637 { 2638 2639 if (ifp->if_softc == arg) 2640 vmxnet3_update_vlan_filter(arg, 1, tag); 2641 } 2642 2643 static void 2644 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2645 { 2646 2647 if (ifp->if_softc == arg) 2648 vmxnet3_update_vlan_filter(arg, 0, tag); 2649 } 2650 2651 static void 2652 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc) 2653 { 2654 struct ifnet *ifp; 2655 struct vmxnet3_driver_shared *ds; 2656 struct ifmultiaddr *ifma; 2657 u_int mode; 2658 2659 ifp = sc->vmx_ifp; 2660 ds = sc->vmx_ds; 2661 2662 mode = VMXNET3_RXMODE_UCAST; 2663 if (ifp->if_flags & IFF_BROADCAST) 2664 mode |= VMXNET3_RXMODE_BCAST; 2665 if (ifp->if_flags & IFF_PROMISC) 2666 mode |= VMXNET3_RXMODE_PROMISC; 2667 if (ifp->if_flags & IFF_ALLMULTI) 2668 mode |= VMXNET3_RXMODE_ALLMULTI; 2669 else { 2670 int cnt = 0, overflow = 0; 2671 2672 if_maddr_rlock(ifp); 2673 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2674 if (ifma->ifma_addr->sa_family != AF_LINK) 2675 continue; 2676 else if (cnt == VMXNET3_MULTICAST_MAX) { 2677 overflow = 1; 2678 break; 2679 } 2680 2681 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2682 &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN); 2683 cnt++; 2684 } 2685 if_maddr_runlock(ifp); 2686 2687 if (overflow != 0) { 2688 cnt = 0; 2689 mode |= VMXNET3_RXMODE_ALLMULTI; 2690 } else if (cnt > 0) 2691 mode |= VMXNET3_RXMODE_MCAST; 2692 ds->mcast_tablelen = cnt * ETHER_ADDR_LEN; 2693 } 2694 2695 ds->rxmode = mode; 2696 2697 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER); 2698 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE); 2699 } 2700 2701 static int 2702 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu) 2703 { 2704 struct ifnet *ifp; 2705 2706 ifp = sc->vmx_ifp; 2707 2708 if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU) 2709 return (EINVAL); 2710 2711 ifp->if_mtu = mtu; 2712 2713 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2714 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2715 vmxnet3_init_locked(sc); 2716 } 2717 2718 return (0); 2719 } 2720 2721 static int 2722 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2723 { 2724 struct vmxnet3_softc *sc; 2725 struct ifreq *ifr; 2726 int reinit, mask, error; 2727 2728 sc = ifp->if_softc; 2729 ifr = (struct ifreq *) data; 2730 error = 0; 2731 2732 switch (cmd) { 2733 case SIOCSIFMTU: 2734 if (ifp->if_mtu != ifr->ifr_mtu) { 2735 VMXNET3_CORE_LOCK(sc); 2736 error = vmxnet3_change_mtu(sc, ifr->ifr_mtu); 2737 VMXNET3_CORE_UNLOCK(sc); 2738 } 2739 break; 2740 2741 case SIOCSIFFLAGS: 2742 VMXNET3_CORE_LOCK(sc); 2743 if (ifp->if_flags & IFF_UP) { 2744 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2745 if ((ifp->if_flags ^ sc->vmx_if_flags) & 2746 (IFF_PROMISC | IFF_ALLMULTI)) { 2747 vmxnet3_set_rxfilter(sc); 2748 } 2749 } else 2750 vmxnet3_init_locked(sc); 2751 } else { 2752 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2753 vmxnet3_stop(sc); 2754 } 2755 sc->vmx_if_flags = ifp->if_flags; 2756 VMXNET3_CORE_UNLOCK(sc); 2757 break; 2758 2759 case SIOCADDMULTI: 2760 case SIOCDELMULTI: 2761 VMXNET3_CORE_LOCK(sc); 2762 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2763 vmxnet3_set_rxfilter(sc); 2764 VMXNET3_CORE_UNLOCK(sc); 2765 break; 2766 2767 case SIOCSIFMEDIA: 2768 case SIOCGIFMEDIA: 2769 error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd); 2770 break; 2771 2772 case SIOCSIFCAP: 2773 VMXNET3_CORE_LOCK(sc); 2774 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2775 2776 if (mask & IFCAP_TXCSUM) 2777 ifp->if_capenable ^= IFCAP_TXCSUM; 2778 if (mask & IFCAP_TXCSUM_IPV6) 2779 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 2780 if (mask & IFCAP_TSO4) 2781 ifp->if_capenable ^= IFCAP_TSO4; 2782 if (mask & IFCAP_TSO6) 2783 ifp->if_capenable ^= IFCAP_TSO6; 2784 2785 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | 2786 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) { 2787 /* Changing these features requires us to reinit. */ 2788 reinit = 1; 2789 2790 if (mask & IFCAP_RXCSUM) 2791 ifp->if_capenable ^= IFCAP_RXCSUM; 2792 if (mask & IFCAP_RXCSUM_IPV6) 2793 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 2794 if (mask & IFCAP_LRO) 2795 ifp->if_capenable ^= IFCAP_LRO; 2796 if (mask & IFCAP_VLAN_HWTAGGING) 2797 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2798 if (mask & IFCAP_VLAN_HWFILTER) 2799 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 2800 } else 2801 reinit = 0; 2802 2803 if (mask & IFCAP_VLAN_HWTSO) 2804 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2805 2806 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2807 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2808 vmxnet3_init_locked(sc); 2809 } 2810 2811 VMXNET3_CORE_UNLOCK(sc); 2812 VLAN_CAPABILITIES(ifp); 2813 break; 2814 2815 default: 2816 error = ether_ioctl(ifp, cmd, data); 2817 break; 2818 } 2819 2820 VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc); 2821 2822 return (error); 2823 } 2824 2825 static int 2826 vmxnet3_watchdog(struct vmxnet3_txqueue *txq) 2827 { 2828 struct vmxnet3_softc *sc; 2829 2830 sc = txq->vxtxq_sc; 2831 2832 VMXNET3_TXQ_LOCK(txq); 2833 if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) { 2834 VMXNET3_TXQ_UNLOCK(txq); 2835 return (0); 2836 } 2837 VMXNET3_TXQ_UNLOCK(txq); 2838 2839 if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n", 2840 txq->vxtxq_id); 2841 return (1); 2842 } 2843 2844 static void 2845 vmxnet3_refresh_stats(struct vmxnet3_softc *sc) 2846 { 2847 2848 vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS); 2849 } 2850 2851 static void 2852 vmxnet3_tick(void *xsc) 2853 { 2854 struct vmxnet3_softc *sc; 2855 struct ifnet *ifp; 2856 int i, timedout; 2857 2858 sc = xsc; 2859 ifp = sc->vmx_ifp; 2860 timedout = 0; 2861 2862 VMXNET3_CORE_LOCK_ASSERT(sc); 2863 vmxnet3_refresh_stats(sc); 2864 2865 for (i = 0; i < sc->vmx_ntxqueues; i++) 2866 timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]); 2867 2868 if (timedout != 0) { 2869 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2870 vmxnet3_init_locked(sc); 2871 } else 2872 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); 2873 } 2874 2875 static int 2876 vmxnet3_link_is_up(struct vmxnet3_softc *sc) 2877 { 2878 uint32_t status; 2879 2880 /* Also update the link speed while here. */ 2881 status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK); 2882 sc->vmx_link_speed = status >> 16; 2883 return !!(status & 0x1); 2884 } 2885 2886 static void 2887 vmxnet3_link_status(struct vmxnet3_softc *sc) 2888 { 2889 struct ifnet *ifp; 2890 int link; 2891 2892 ifp = sc->vmx_ifp; 2893 link = vmxnet3_link_is_up(sc); 2894 2895 if (link != 0 && sc->vmx_link_active == 0) { 2896 sc->vmx_link_active = 1; 2897 if_link_state_change(ifp, LINK_STATE_UP); 2898 } else if (link == 0 && sc->vmx_link_active != 0) { 2899 sc->vmx_link_active = 0; 2900 if_link_state_change(ifp, LINK_STATE_DOWN); 2901 } 2902 } 2903 2904 static void 2905 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2906 { 2907 struct vmxnet3_softc *sc; 2908 2909 sc = ifp->if_softc; 2910 2911 ifmr->ifm_active = IFM_ETHER | IFM_AUTO; 2912 ifmr->ifm_status = IFM_AVALID; 2913 2914 VMXNET3_CORE_LOCK(sc); 2915 if (vmxnet3_link_is_up(sc) != 0) 2916 ifmr->ifm_status |= IFM_ACTIVE; 2917 else 2918 ifmr->ifm_status |= IFM_NONE; 2919 VMXNET3_CORE_UNLOCK(sc); 2920 } 2921 2922 static int 2923 vmxnet3_media_change(struct ifnet *ifp) 2924 { 2925 2926 /* Ignore. */ 2927 return (0); 2928 } 2929 2930 static void 2931 vmxnet3_set_lladdr(struct vmxnet3_softc *sc) 2932 { 2933 uint32_t ml, mh; 2934 2935 ml = sc->vmx_lladdr[0]; 2936 ml |= sc->vmx_lladdr[1] << 8; 2937 ml |= sc->vmx_lladdr[2] << 16; 2938 ml |= sc->vmx_lladdr[3] << 24; 2939 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml); 2940 2941 mh = sc->vmx_lladdr[4]; 2942 mh |= sc->vmx_lladdr[5] << 8; 2943 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh); 2944 } 2945 2946 static void 2947 vmxnet3_get_lladdr(struct vmxnet3_softc *sc) 2948 { 2949 uint32_t ml, mh; 2950 2951 ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL); 2952 mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH); 2953 2954 sc->vmx_lladdr[0] = ml; 2955 sc->vmx_lladdr[1] = ml >> 8; 2956 sc->vmx_lladdr[2] = ml >> 16; 2957 sc->vmx_lladdr[3] = ml >> 24; 2958 sc->vmx_lladdr[4] = mh; 2959 sc->vmx_lladdr[5] = mh >> 8; 2960 } 2961 2962 static void 2963 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq, 2964 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) 2965 { 2966 struct sysctl_oid *node, *txsnode; 2967 struct sysctl_oid_list *list, *txslist; 2968 struct vmxnet3_txq_stats *stats; 2969 struct UPT1_TxStats *txstats; 2970 char namebuf[16]; 2971 2972 stats = &txq->vxtxq_stats; 2973 txstats = &txq->vxtxq_ts->stats; 2974 2975 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id); 2976 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, 2977 NULL, "Transmit Queue"); 2978 txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node); 2979 2980 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD, 2981 &stats->vtxrs_full, "Tx ring full"); 2982 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD, 2983 &stats->vtxrs_offload_failed, "Tx checksum offload failed"); 2984 2985 /* 2986 * Add statistics reported by the host. These are updated once 2987 * per second. 2988 */ 2989 txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD, 2990 NULL, "Host Statistics"); 2991 txslist = SYSCTL_CHILDREN(txsnode); 2992 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD, 2993 &txstats->TSO_packets, "TSO packets"); 2994 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD, 2995 &txstats->TSO_bytes, "TSO bytes"); 2996 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD, 2997 &txstats->ucast_packets, "Unicast packets"); 2998 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD, 2999 &txstats->ucast_bytes, "Unicast bytes"); 3000 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD, 3001 &txstats->mcast_packets, "Multicast packets"); 3002 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD, 3003 &txstats->mcast_bytes, "Multicast bytes"); 3004 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD, 3005 &txstats->error, "Errors"); 3006 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD, 3007 &txstats->discard, "Discards"); 3008 } 3009 3010 static void 3011 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq, 3012 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) 3013 { 3014 struct sysctl_oid *node, *rxsnode; 3015 struct sysctl_oid_list *list, *rxslist; 3016 struct vmxnet3_rxq_stats *stats; 3017 struct UPT1_RxStats *rxstats; 3018 char namebuf[16]; 3019 3020 stats = &rxq->vxrxq_stats; 3021 rxstats = &rxq->vxrxq_rs->stats; 3022 3023 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id); 3024 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, 3025 NULL, "Receive Queue"); 3026 rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node); 3027 3028 /* 3029 * Add statistics reported by the host. These are updated once 3030 * per second. 3031 */ 3032 rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD, 3033 NULL, "Host Statistics"); 3034 rxslist = SYSCTL_CHILDREN(rxsnode); 3035 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD, 3036 &rxstats->LRO_packets, "LRO packets"); 3037 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD, 3038 &rxstats->LRO_bytes, "LRO bytes"); 3039 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD, 3040 &rxstats->ucast_packets, "Unicast packets"); 3041 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD, 3042 &rxstats->ucast_bytes, "Unicast bytes"); 3043 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD, 3044 &rxstats->mcast_packets, "Multicast packets"); 3045 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD, 3046 &rxstats->mcast_bytes, "Multicast bytes"); 3047 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD, 3048 &rxstats->bcast_packets, "Broadcast packets"); 3049 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD, 3050 &rxstats->bcast_bytes, "Broadcast bytes"); 3051 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD, 3052 &rxstats->nobuffer, "No buffer"); 3053 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD, 3054 &rxstats->error, "Errors"); 3055 } 3056 3057 #ifdef VMXNET3_DEBUG_SYSCTL 3058 static void 3059 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc, 3060 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) 3061 { 3062 struct sysctl_oid *node; 3063 struct sysctl_oid_list *list; 3064 int i; 3065 3066 for (i = 0; i < sc->vmx_ntxqueues; i++) { 3067 struct vmxnet3_txqueue *txq = &sc->vmx_txq[i]; 3068 3069 node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO, 3070 "debug", CTLFLAG_RD, NULL, ""); 3071 list = SYSCTL_CHILDREN(node); 3072 3073 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD, 3074 &txq->vxtxq_cmd_ring.vxtxr_head, 0, ""); 3075 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD, 3076 &txq->vxtxq_cmd_ring.vxtxr_next, 0, ""); 3077 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD, 3078 &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, ""); 3079 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD, 3080 &txq->vxtxq_cmd_ring.vxtxr_gen, 0, ""); 3081 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD, 3082 &txq->vxtxq_comp_ring.vxcr_next, 0, ""); 3083 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD, 3084 &txq->vxtxq_comp_ring.vxcr_ndesc, 0,""); 3085 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD, 3086 &txq->vxtxq_comp_ring.vxcr_gen, 0, ""); 3087 } 3088 3089 for (i = 0; i < sc->vmx_nrxqueues; i++) { 3090 struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i]; 3091 3092 node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO, 3093 "debug", CTLFLAG_RD, NULL, ""); 3094 list = SYSCTL_CHILDREN(node); 3095 3096 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD, 3097 &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, ""); 3098 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD, 3099 &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, ""); 3100 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD, 3101 &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, ""); 3102 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD, 3103 &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, ""); 3104 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD, 3105 &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, ""); 3106 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD, 3107 &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, ""); 3108 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD, 3109 &rxq->vxrxq_comp_ring.vxcr_next, 0, ""); 3110 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD, 3111 &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,""); 3112 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD, 3113 &rxq->vxrxq_comp_ring.vxcr_gen, 0, ""); 3114 } 3115 } 3116 #endif 3117 3118 static void 3119 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc, 3120 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) 3121 { 3122 int i; 3123 3124 for (i = 0; i < sc->vmx_ntxqueues; i++) 3125 vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child); 3126 for (i = 0; i < sc->vmx_nrxqueues; i++) 3127 vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child); 3128 3129 #ifdef VMXNET3_DEBUG_SYSCTL 3130 vmxnet3_setup_debug_sysctl(sc, ctx, child); 3131 #endif 3132 } 3133 3134 static void 3135 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc) 3136 { 3137 device_t dev; 3138 struct vmxnet3_statistics *stats; 3139 struct sysctl_ctx_list *ctx; 3140 struct sysctl_oid *tree; 3141 struct sysctl_oid_list *child; 3142 3143 dev = sc->vmx_dev; 3144 ctx = device_get_sysctl_ctx(dev); 3145 tree = device_get_sysctl_tree(dev); 3146 child = SYSCTL_CHILDREN(tree); 3147 3148 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD, 3149 &sc->vmx_ntxqueues, 0, "Number of Tx queues"); 3150 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD, 3151 &sc->vmx_nrxqueues, 0, "Number of Rx queues"); 3152 3153 stats = &sc->vmx_stats; 3154 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "collapsed", CTLFLAG_RD, 3155 &stats->vmst_collapsed, 0, "Tx mbuf chains collapsed"); 3156 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD, 3157 &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed"); 3158 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD, 3159 &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed"); 3160 3161 vmxnet3_setup_queue_sysctl(sc, ctx, child); 3162 } 3163 3164 static void 3165 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) 3166 { 3167 3168 bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v); 3169 } 3170 3171 static uint32_t 3172 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r) 3173 { 3174 3175 return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r)); 3176 } 3177 3178 static void 3179 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) 3180 { 3181 3182 bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v); 3183 } 3184 3185 static void 3186 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd) 3187 { 3188 3189 vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd); 3190 } 3191 3192 static uint32_t 3193 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd) 3194 { 3195 3196 vmxnet3_write_cmd(sc, cmd); 3197 bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0, 3198 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 3199 return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD)); 3200 } 3201 3202 static void 3203 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq) 3204 { 3205 3206 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0); 3207 } 3208 3209 static void 3210 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq) 3211 { 3212 3213 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1); 3214 } 3215 3216 static void 3217 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc) 3218 { 3219 int i; 3220 3221 sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL; 3222 for (i = 0; i < sc->vmx_nintrs; i++) 3223 vmxnet3_enable_intr(sc, i); 3224 } 3225 3226 static void 3227 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc) 3228 { 3229 int i; 3230 3231 sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; 3232 for (i = 0; i < sc->vmx_nintrs; i++) 3233 vmxnet3_disable_intr(sc, i); 3234 } 3235 3236 static void 3237 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3238 { 3239 bus_addr_t *baddr = arg; 3240 3241 if (error == 0) 3242 *baddr = segs->ds_addr; 3243 } 3244 3245 static int 3246 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align, 3247 struct vmxnet3_dma_alloc *dma) 3248 { 3249 device_t dev; 3250 int error; 3251 3252 dev = sc->vmx_dev; 3253 bzero(dma, sizeof(struct vmxnet3_dma_alloc)); 3254 3255 error = bus_dma_tag_create(bus_get_dma_tag(dev), 3256 align, 0, /* alignment, bounds */ 3257 BUS_SPACE_MAXADDR, /* lowaddr */ 3258 BUS_SPACE_MAXADDR, /* highaddr */ 3259 NULL, NULL, /* filter, filterarg */ 3260 size, /* maxsize */ 3261 1, /* nsegments */ 3262 size, /* maxsegsize */ 3263 BUS_DMA_ALLOCNOW, /* flags */ 3264 NULL, /* lockfunc */ 3265 NULL, /* lockfuncarg */ 3266 &dma->dma_tag); 3267 if (error) { 3268 device_printf(dev, "bus_dma_tag_create failed: %d\n", error); 3269 goto fail; 3270 } 3271 3272 error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, 3273 BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map); 3274 if (error) { 3275 device_printf(dev, "bus_dmamem_alloc failed: %d\n", error); 3276 goto fail; 3277 } 3278 3279 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 3280 size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT); 3281 if (error) { 3282 device_printf(dev, "bus_dmamap_load failed: %d\n", error); 3283 goto fail; 3284 } 3285 3286 dma->dma_size = size; 3287 3288 fail: 3289 if (error) 3290 vmxnet3_dma_free(sc, dma); 3291 3292 return (error); 3293 } 3294 3295 static void 3296 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma) 3297 { 3298 3299 if (dma->dma_tag != NULL) { 3300 if (dma->dma_map != NULL) { 3301 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 3302 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3303 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 3304 } 3305 3306 if (dma->dma_vaddr != NULL) { 3307 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, 3308 dma->dma_map); 3309 } 3310 3311 bus_dma_tag_destroy(dma->dma_tag); 3312 } 3313 bzero(dma, sizeof(struct vmxnet3_dma_alloc)); 3314 } 3315 3316 static int 3317 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def) 3318 { 3319 char path[64]; 3320 3321 snprintf(path, sizeof(path), 3322 "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob); 3323 TUNABLE_INT_FETCH(path, &def); 3324 3325 return (def); 3326 } 3327 3328 /* 3329 * Since this is a purely paravirtualized device, we do not have 3330 * to worry about DMA coherency. But at times, we must make sure 3331 * both the compiler and CPU do not reorder memory operations. 3332 */ 3333 static inline void 3334 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type) 3335 { 3336 3337 switch (type) { 3338 case VMXNET3_BARRIER_RD: 3339 rmb(); 3340 break; 3341 case VMXNET3_BARRIER_WR: 3342 wmb(); 3343 break; 3344 case VMXNET3_BARRIER_RDWR: 3345 mb(); 3346 break; 3347 default: 3348 panic("%s: bad barrier type %d", __func__, type); 3349 } 3350 } 3351