1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Driver for ptnet paravirtualized network device. */ 30 31 #include <sys/cdefs.h> 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/taskqueue.h> 46 #include <sys/smp.h> 47 #include <sys/time.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 #include <net/bpf.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet/udp.h> 70 #include <netinet/tcp.h> 71 72 #include <machine/bus.h> 73 #include <machine/resource.h> 74 #include <sys/bus.h> 75 #include <sys/rman.h> 76 77 #include <dev/pci/pcivar.h> 78 #include <dev/pci/pcireg.h> 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 83 #include <sys/selinfo.h> 84 #include <net/netmap.h> 85 #include <dev/netmap/netmap_kern.h> 86 #include <net/netmap_virt.h> 87 #include <dev/netmap/netmap_mem2.h> 88 #include <dev/virtio/network/virtio_net.h> 89 90 #ifdef WITH_PTNETMAP 91 92 #ifndef INET 93 #error "INET not defined, cannot support offloadings" 94 #endif 95 96 #if __FreeBSD_version >= 1100000 97 static uint64_t ptnet_get_counter(if_t, ift_counter); 98 #else 99 typedef struct ifnet *if_t; 100 #define if_getsoftc(_ifp) (_ifp)->if_softc 101 #endif 102 103 //#define PTNETMAP_STATS 104 //#define DEBUG 105 #ifdef DEBUG 106 #define DBG(x) x 107 #else /* !DEBUG */ 108 #define DBG(x) 109 #endif /* !DEBUG */ 110 111 extern int ptnet_vnet_hdr; /* Tunable parameter */ 112 113 struct ptnet_softc; 114 115 struct ptnet_queue_stats { 116 uint64_t packets; /* if_[io]packets */ 117 uint64_t bytes; /* if_[io]bytes */ 118 uint64_t errors; /* if_[io]errors */ 119 uint64_t iqdrops; /* if_iqdrops */ 120 uint64_t mcasts; /* if_[io]mcasts */ 121 #ifdef PTNETMAP_STATS 122 uint64_t intrs; 123 uint64_t kicks; 124 #endif /* PTNETMAP_STATS */ 125 }; 126 127 struct ptnet_queue { 128 struct ptnet_softc *sc; 129 struct resource *irq; 130 void *cookie; 131 int kring_id; 132 struct nm_csb_atok *atok; 133 struct nm_csb_ktoa *ktoa; 134 unsigned int kick; 135 struct mtx lock; 136 struct buf_ring *bufring; /* for TX queues */ 137 struct ptnet_queue_stats stats; 138 #ifdef PTNETMAP_STATS 139 struct ptnet_queue_stats last_stats; 140 #endif /* PTNETMAP_STATS */ 141 struct taskqueue *taskq; 142 struct task task; 143 char lock_name[16]; 144 }; 145 146 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 147 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 148 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 149 150 struct ptnet_softc { 151 device_t dev; 152 if_t ifp; 153 struct ifmedia media; 154 struct mtx lock; 155 char lock_name[16]; 156 char hwaddr[ETHER_ADDR_LEN]; 157 158 /* Mirror of PTFEAT register. */ 159 uint32_t ptfeatures; 160 unsigned int vnet_hdr_len; 161 162 /* PCI BARs support. */ 163 struct resource *iomem; 164 struct resource *msix_mem; 165 166 unsigned int num_rings; 167 unsigned int num_tx_rings; 168 struct ptnet_queue *queues; 169 struct ptnet_queue *rxqueues; 170 struct nm_csb_atok *csb_gh; 171 struct nm_csb_ktoa *csb_hg; 172 173 unsigned int min_tx_space; 174 175 struct netmap_pt_guest_adapter *ptna; 176 177 struct callout tick; 178 #ifdef PTNETMAP_STATS 179 struct timeval last_ts; 180 #endif /* PTNETMAP_STATS */ 181 }; 182 183 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 184 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 185 186 static int ptnet_probe(device_t); 187 static int ptnet_attach(device_t); 188 static int ptnet_detach(device_t); 189 static int ptnet_suspend(device_t); 190 static int ptnet_resume(device_t); 191 static int ptnet_shutdown(device_t); 192 193 static void ptnet_init(void *opaque); 194 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 195 static int ptnet_init_locked(struct ptnet_softc *sc); 196 static int ptnet_stop(struct ptnet_softc *sc); 197 static int ptnet_transmit(if_t ifp, struct mbuf *m); 198 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 199 unsigned int budget, 200 bool may_resched); 201 static void ptnet_qflush(if_t ifp); 202 static void ptnet_tx_task(void *context, int pending); 203 204 static int ptnet_media_change(if_t ifp); 205 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 206 #ifdef PTNETMAP_STATS 207 static void ptnet_tick(void *opaque); 208 #endif 209 210 static int ptnet_irqs_init(struct ptnet_softc *sc); 211 static void ptnet_irqs_fini(struct ptnet_softc *sc); 212 213 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); 214 static int ptnet_nm_config(struct netmap_adapter *na, 215 struct nm_config_info *info); 216 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 217 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 218 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 219 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 220 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); 221 222 static void ptnet_tx_intr(void *opaque); 223 static void ptnet_rx_intr(void *opaque); 224 225 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 226 unsigned int head); 227 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 228 bool may_resched); 229 static void ptnet_rx_task(void *context, int pending); 230 231 #ifdef DEVICE_POLLING 232 static poll_handler_t ptnet_poll; 233 #endif 234 235 static device_method_t ptnet_methods[] = { 236 DEVMETHOD(device_probe, ptnet_probe), 237 DEVMETHOD(device_attach, ptnet_attach), 238 DEVMETHOD(device_detach, ptnet_detach), 239 DEVMETHOD(device_suspend, ptnet_suspend), 240 DEVMETHOD(device_resume, ptnet_resume), 241 DEVMETHOD(device_shutdown, ptnet_shutdown), 242 DEVMETHOD_END 243 }; 244 245 static driver_t ptnet_driver = { 246 "ptnet", 247 ptnet_methods, 248 sizeof(struct ptnet_softc) 249 }; 250 251 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 252 static devclass_t ptnet_devclass; 253 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 254 NULL, NULL, SI_ORDER_MIDDLE + 2); 255 256 static int 257 ptnet_probe(device_t dev) 258 { 259 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 260 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 261 return (ENXIO); 262 } 263 264 device_set_desc(dev, "ptnet network adapter"); 265 266 return (BUS_PROBE_DEFAULT); 267 } 268 269 static inline void ptnet_kick(struct ptnet_queue *pq) 270 { 271 #ifdef PTNETMAP_STATS 272 pq->stats.kicks ++; 273 #endif /* PTNETMAP_STATS */ 274 bus_write_4(pq->sc->iomem, pq->kick, 0); 275 } 276 277 #define PTNET_BUF_RING_SIZE 4096 278 #define PTNET_RX_BUDGET 512 279 #define PTNET_RX_BATCH 1 280 #define PTNET_TX_BUDGET 512 281 #define PTNET_TX_BATCH 64 282 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 283 #define PTNET_MAX_PKT_SIZE 65536 284 285 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) 286 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 287 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 288 PTNET_CSUM_OFFLOAD_IPV6) 289 290 static int 291 ptnet_attach(device_t dev) 292 { 293 uint32_t ptfeatures = 0; 294 unsigned int num_rx_rings, num_tx_rings; 295 struct netmap_adapter na_arg; 296 unsigned int nifp_offset; 297 struct ptnet_softc *sc; 298 if_t ifp; 299 uint32_t macreg; 300 int err, rid; 301 int i; 302 303 sc = device_get_softc(dev); 304 sc->dev = dev; 305 306 /* Setup PCI resources. */ 307 pci_enable_busmaster(dev); 308 309 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 310 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 311 RF_ACTIVE); 312 if (sc->iomem == NULL) { 313 device_printf(dev, "Failed to map I/O BAR\n"); 314 return (ENXIO); 315 } 316 317 /* Negotiate features with the hypervisor. */ 318 if (ptnet_vnet_hdr) { 319 ptfeatures |= PTNETMAP_F_VNET_HDR; 320 } 321 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 322 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 323 sc->ptfeatures = ptfeatures; 324 325 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 326 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 327 sc->num_rings = num_tx_rings + num_rx_rings; 328 sc->num_tx_rings = num_tx_rings; 329 330 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { 331 device_printf(dev, "CSB cannot handle that many rings (%u)\n", 332 sc->num_rings); 333 err = ENOMEM; 334 goto err_path; 335 } 336 337 /* Allocate CSB and carry out CSB allocation protocol. */ 338 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 339 (size_t)0, -1UL, PAGE_SIZE, 0); 340 if (sc->csb_gh == NULL) { 341 device_printf(dev, "Failed to allocate CSB\n"); 342 err = ENOMEM; 343 goto err_path; 344 } 345 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); 346 347 { 348 /* 349 * We use uint64_t rather than vm_paddr_t since we 350 * need 64 bit addresses even on 32 bit platforms. 351 */ 352 uint64_t paddr = vtophys(sc->csb_gh); 353 354 /* CSB allocation protocol: write to BAH first, then 355 * to BAL (for both GH and HG sections). */ 356 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 357 (paddr >> 32) & 0xffffffff); 358 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 359 paddr & 0xffffffff); 360 paddr = vtophys(sc->csb_hg); 361 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 362 (paddr >> 32) & 0xffffffff); 363 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 364 paddr & 0xffffffff); 365 } 366 367 /* Allocate and initialize per-queue data structures. */ 368 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 369 M_DEVBUF, M_NOWAIT | M_ZERO); 370 if (sc->queues == NULL) { 371 err = ENOMEM; 372 goto err_path; 373 } 374 sc->rxqueues = sc->queues + num_tx_rings; 375 376 for (i = 0; i < sc->num_rings; i++) { 377 struct ptnet_queue *pq = sc->queues + i; 378 379 pq->sc = sc; 380 pq->kring_id = i; 381 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 382 pq->atok = sc->csb_gh + i; 383 pq->ktoa = sc->csb_hg + i; 384 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 385 device_get_nameunit(dev), i); 386 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 387 if (i >= num_tx_rings) { 388 /* RX queue: fix kring_id. */ 389 pq->kring_id -= num_tx_rings; 390 } else { 391 /* TX queue: allocate buf_ring. */ 392 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 393 M_DEVBUF, M_NOWAIT, &pq->lock); 394 if (pq->bufring == NULL) { 395 err = ENOMEM; 396 goto err_path; 397 } 398 } 399 } 400 401 sc->min_tx_space = 64; /* Safe initial value. */ 402 403 err = ptnet_irqs_init(sc); 404 if (err) { 405 goto err_path; 406 } 407 408 /* Setup Ethernet interface. */ 409 sc->ifp = ifp = if_alloc(IFT_ETHER); 410 if (ifp == NULL) { 411 device_printf(dev, "Failed to allocate ifnet\n"); 412 err = ENOMEM; 413 goto err_path; 414 } 415 416 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 417 ifp->if_baudrate = IF_Gbps(10); 418 ifp->if_softc = sc; 419 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 420 ifp->if_init = ptnet_init; 421 ifp->if_ioctl = ptnet_ioctl; 422 #if __FreeBSD_version >= 1100000 423 ifp->if_get_counter = ptnet_get_counter; 424 #endif 425 ifp->if_transmit = ptnet_transmit; 426 ifp->if_qflush = ptnet_qflush; 427 428 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 429 ptnet_media_status); 430 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 431 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 432 433 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 434 sc->hwaddr[0] = (macreg >> 8) & 0xff; 435 sc->hwaddr[1] = macreg & 0xff; 436 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 437 sc->hwaddr[2] = (macreg >> 24) & 0xff; 438 sc->hwaddr[3] = (macreg >> 16) & 0xff; 439 sc->hwaddr[4] = (macreg >> 8) & 0xff; 440 sc->hwaddr[5] = macreg & 0xff; 441 442 ether_ifattach(ifp, sc->hwaddr); 443 444 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 445 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 446 447 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 448 /* Similarly to what the vtnet driver does, we can emulate 449 * VLAN offloadings by inserting and removing the 802.1Q 450 * header during transmit and receive. We are then able 451 * to do checksum offloading of VLAN frames. */ 452 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 453 | IFCAP_VLAN_HWCSUM 454 | IFCAP_TSO | IFCAP_LRO 455 | IFCAP_VLAN_HWTSO 456 | IFCAP_VLAN_HWTAGGING; 457 } 458 459 ifp->if_capenable = ifp->if_capabilities; 460 #ifdef DEVICE_POLLING 461 /* Don't enable polling by default. */ 462 ifp->if_capabilities |= IFCAP_POLLING; 463 #endif 464 snprintf(sc->lock_name, sizeof(sc->lock_name), 465 "%s", device_get_nameunit(dev)); 466 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 467 callout_init_mtx(&sc->tick, &sc->lock, 0); 468 469 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 470 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 471 memset(&na_arg, 0, sizeof(na_arg)); 472 na_arg.ifp = ifp; 473 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 474 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 475 na_arg.num_tx_rings = num_tx_rings; 476 na_arg.num_rx_rings = num_rx_rings; 477 na_arg.nm_config = ptnet_nm_config; 478 na_arg.nm_krings_create = ptnet_nm_krings_create; 479 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 480 na_arg.nm_dtor = ptnet_nm_dtor; 481 na_arg.nm_intr = ptnet_nm_intr; 482 na_arg.nm_register = ptnet_nm_register; 483 na_arg.nm_txsync = ptnet_nm_txsync; 484 na_arg.nm_rxsync = ptnet_nm_rxsync; 485 486 netmap_pt_guest_attach(&na_arg, nifp_offset, 487 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 488 489 /* Now a netmap adapter for this ifp has been allocated, and it 490 * can be accessed through NA(ifp). We also have to initialize the CSB 491 * pointer. */ 492 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 493 494 /* If virtio-net header was negotiated, set the virt_hdr_len field in 495 * the netmap adapter, to inform users that this netmap adapter requires 496 * the application to deal with the headers. */ 497 ptnet_update_vnet_hdr(sc); 498 499 device_printf(dev, "%s() completed\n", __func__); 500 501 return (0); 502 503 err_path: 504 ptnet_detach(dev); 505 return err; 506 } 507 508 /* Stop host sync-kloop if it was running. */ 509 static void 510 ptnet_device_shutdown(struct ptnet_softc *sc) 511 { 512 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 513 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); 514 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); 515 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); 516 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); 517 } 518 519 static int 520 ptnet_detach(device_t dev) 521 { 522 struct ptnet_softc *sc = device_get_softc(dev); 523 int i; 524 525 ptnet_device_shutdown(sc); 526 527 #ifdef DEVICE_POLLING 528 if (sc->ifp->if_capenable & IFCAP_POLLING) { 529 ether_poll_deregister(sc->ifp); 530 } 531 #endif 532 callout_drain(&sc->tick); 533 534 if (sc->queues) { 535 /* Drain taskqueues before calling if_detach. */ 536 for (i = 0; i < sc->num_rings; i++) { 537 struct ptnet_queue *pq = sc->queues + i; 538 539 if (pq->taskq) { 540 taskqueue_drain(pq->taskq, &pq->task); 541 } 542 } 543 } 544 545 if (sc->ifp) { 546 ether_ifdetach(sc->ifp); 547 548 /* Uninitialize netmap adapters for this device. */ 549 netmap_detach(sc->ifp); 550 551 ifmedia_removeall(&sc->media); 552 if_free(sc->ifp); 553 sc->ifp = NULL; 554 } 555 556 ptnet_irqs_fini(sc); 557 558 if (sc->csb_gh) { 559 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); 560 sc->csb_gh = NULL; 561 sc->csb_hg = NULL; 562 } 563 564 if (sc->queues) { 565 for (i = 0; i < sc->num_rings; i++) { 566 struct ptnet_queue *pq = sc->queues + i; 567 568 if (mtx_initialized(&pq->lock)) { 569 mtx_destroy(&pq->lock); 570 } 571 if (pq->bufring != NULL) { 572 buf_ring_free(pq->bufring, M_DEVBUF); 573 } 574 } 575 free(sc->queues, M_DEVBUF); 576 sc->queues = NULL; 577 } 578 579 if (sc->iomem) { 580 bus_release_resource(dev, SYS_RES_IOPORT, 581 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 582 sc->iomem = NULL; 583 } 584 585 mtx_destroy(&sc->lock); 586 587 device_printf(dev, "%s() completed\n", __func__); 588 589 return (0); 590 } 591 592 static int 593 ptnet_suspend(device_t dev) 594 { 595 struct ptnet_softc *sc = device_get_softc(dev); 596 597 (void)sc; 598 599 return (0); 600 } 601 602 static int 603 ptnet_resume(device_t dev) 604 { 605 struct ptnet_softc *sc = device_get_softc(dev); 606 607 (void)sc; 608 609 return (0); 610 } 611 612 static int 613 ptnet_shutdown(device_t dev) 614 { 615 struct ptnet_softc *sc = device_get_softc(dev); 616 617 ptnet_device_shutdown(sc); 618 619 return (0); 620 } 621 622 static int 623 ptnet_irqs_init(struct ptnet_softc *sc) 624 { 625 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 626 int nvecs = sc->num_rings; 627 device_t dev = sc->dev; 628 int err = ENOSPC; 629 int cpu_cur; 630 int i; 631 632 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 633 device_printf(dev, "Could not find MSI-X capability\n"); 634 return (ENXIO); 635 } 636 637 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 638 &rid, RF_ACTIVE); 639 if (sc->msix_mem == NULL) { 640 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 641 return (ENXIO); 642 } 643 644 if (pci_msix_count(dev) < nvecs) { 645 device_printf(dev, "Not enough MSI-X vectors\n"); 646 goto err_path; 647 } 648 649 err = pci_alloc_msix(dev, &nvecs); 650 if (err) { 651 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 652 goto err_path; 653 } 654 655 for (i = 0; i < nvecs; i++) { 656 struct ptnet_queue *pq = sc->queues + i; 657 658 rid = i + 1; 659 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 660 RF_ACTIVE); 661 if (pq->irq == NULL) { 662 device_printf(dev, "Failed to allocate interrupt " 663 "for queue #%d\n", i); 664 err = ENOSPC; 665 goto err_path; 666 } 667 } 668 669 cpu_cur = CPU_FIRST(); 670 for (i = 0; i < nvecs; i++) { 671 struct ptnet_queue *pq = sc->queues + i; 672 void (*handler)(void *) = ptnet_tx_intr; 673 674 if (i >= sc->num_tx_rings) { 675 handler = ptnet_rx_intr; 676 } 677 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 678 NULL /* intr_filter */, handler, 679 pq, &pq->cookie); 680 if (err) { 681 device_printf(dev, "Failed to register intr handler " 682 "for queue #%d\n", i); 683 goto err_path; 684 } 685 686 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 687 #if 0 688 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 689 #endif 690 cpu_cur = CPU_NEXT(cpu_cur); 691 } 692 693 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 694 695 cpu_cur = CPU_FIRST(); 696 for (i = 0; i < nvecs; i++) { 697 struct ptnet_queue *pq = sc->queues + i; 698 699 if (i < sc->num_tx_rings) 700 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq); 701 else 702 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq); 703 704 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 705 taskqueue_thread_enqueue, &pq->taskq); 706 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 707 device_get_nameunit(sc->dev), cpu_cur); 708 cpu_cur = CPU_NEXT(cpu_cur); 709 } 710 711 return 0; 712 err_path: 713 ptnet_irqs_fini(sc); 714 return err; 715 } 716 717 static void 718 ptnet_irqs_fini(struct ptnet_softc *sc) 719 { 720 device_t dev = sc->dev; 721 int i; 722 723 for (i = 0; i < sc->num_rings; i++) { 724 struct ptnet_queue *pq = sc->queues + i; 725 726 if (pq->taskq) { 727 taskqueue_free(pq->taskq); 728 pq->taskq = NULL; 729 } 730 731 if (pq->cookie) { 732 bus_teardown_intr(dev, pq->irq, pq->cookie); 733 pq->cookie = NULL; 734 } 735 736 if (pq->irq) { 737 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 738 pq->irq = NULL; 739 } 740 } 741 742 if (sc->msix_mem) { 743 pci_release_msi(dev); 744 745 bus_release_resource(dev, SYS_RES_MEMORY, 746 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 747 sc->msix_mem); 748 sc->msix_mem = NULL; 749 } 750 } 751 752 static void 753 ptnet_init(void *opaque) 754 { 755 struct ptnet_softc *sc = opaque; 756 757 PTNET_CORE_LOCK(sc); 758 ptnet_init_locked(sc); 759 PTNET_CORE_UNLOCK(sc); 760 } 761 762 static int 763 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 764 { 765 struct ptnet_softc *sc = if_getsoftc(ifp); 766 device_t dev = sc->dev; 767 struct ifreq *ifr = (struct ifreq *)data; 768 int mask __unused, err = 0; 769 770 switch (cmd) { 771 case SIOCSIFFLAGS: 772 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 773 PTNET_CORE_LOCK(sc); 774 if (ifp->if_flags & IFF_UP) { 775 /* Network stack wants the iff to be up. */ 776 err = ptnet_init_locked(sc); 777 } else { 778 /* Network stack wants the iff to be down. */ 779 err = ptnet_stop(sc); 780 } 781 /* We don't need to do nothing to support IFF_PROMISC, 782 * since that is managed by the backend port. */ 783 PTNET_CORE_UNLOCK(sc); 784 break; 785 786 case SIOCSIFCAP: 787 device_printf(dev, "SIOCSIFCAP %x %x\n", 788 ifr->ifr_reqcap, ifp->if_capenable); 789 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 790 #ifdef DEVICE_POLLING 791 if (mask & IFCAP_POLLING) { 792 struct ptnet_queue *pq; 793 int i; 794 795 if (ifr->ifr_reqcap & IFCAP_POLLING) { 796 err = ether_poll_register(ptnet_poll, ifp); 797 if (err) { 798 break; 799 } 800 /* Stop queues and sync with taskqueues. */ 801 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 802 for (i = 0; i < sc->num_rings; i++) { 803 pq = sc-> queues + i; 804 /* Make sure the worker sees the 805 * IFF_DRV_RUNNING down. */ 806 PTNET_Q_LOCK(pq); 807 pq->atok->appl_need_kick = 0; 808 PTNET_Q_UNLOCK(pq); 809 /* Wait for rescheduling to finish. */ 810 if (pq->taskq) { 811 taskqueue_drain(pq->taskq, 812 &pq->task); 813 } 814 } 815 ifp->if_drv_flags |= IFF_DRV_RUNNING; 816 } else { 817 err = ether_poll_deregister(ifp); 818 for (i = 0; i < sc->num_rings; i++) { 819 pq = sc-> queues + i; 820 PTNET_Q_LOCK(pq); 821 pq->atok->appl_need_kick = 1; 822 PTNET_Q_UNLOCK(pq); 823 } 824 } 825 } 826 #endif /* DEVICE_POLLING */ 827 ifp->if_capenable = ifr->ifr_reqcap; 828 break; 829 830 case SIOCSIFMTU: 831 /* We support any reasonable MTU. */ 832 if (ifr->ifr_mtu < ETHERMIN || 833 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 834 err = EINVAL; 835 } else { 836 PTNET_CORE_LOCK(sc); 837 ifp->if_mtu = ifr->ifr_mtu; 838 PTNET_CORE_UNLOCK(sc); 839 } 840 break; 841 842 case SIOCSIFMEDIA: 843 case SIOCGIFMEDIA: 844 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 845 break; 846 847 default: 848 err = ether_ioctl(ifp, cmd, data); 849 break; 850 } 851 852 return err; 853 } 854 855 static int 856 ptnet_init_locked(struct ptnet_softc *sc) 857 { 858 if_t ifp = sc->ifp; 859 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 860 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 861 unsigned int nm_buf_size; 862 int ret; 863 864 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 865 return 0; /* nothing to do */ 866 } 867 868 device_printf(sc->dev, "%s\n", __func__); 869 870 /* Translate offload capabilities according to if_capenable. */ 871 ifp->if_hwassist = 0; 872 if (ifp->if_capenable & IFCAP_TXCSUM) 873 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 874 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 875 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 876 if (ifp->if_capenable & IFCAP_TSO4) 877 ifp->if_hwassist |= CSUM_IP_TSO; 878 if (ifp->if_capenable & IFCAP_TSO6) 879 ifp->if_hwassist |= CSUM_IP6_TSO; 880 881 /* 882 * Prepare the interface for netmap mode access. 883 */ 884 netmap_update_config(na_dr); 885 886 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 887 if (ret) { 888 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 889 return ret; 890 } 891 892 if (sc->ptna->backend_users == 0) { 893 ret = ptnet_nm_krings_create(na_nm); 894 if (ret) { 895 device_printf(sc->dev, "ptnet_nm_krings_create() " 896 "failed\n"); 897 goto err_mem_finalize; 898 } 899 900 ret = netmap_mem_rings_create(na_dr); 901 if (ret) { 902 device_printf(sc->dev, "netmap_mem_rings_create() " 903 "failed\n"); 904 goto err_rings_create; 905 } 906 907 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 908 if (ret) { 909 device_printf(sc->dev, "netmap_mem_get_lut() " 910 "failed\n"); 911 goto err_get_lut; 912 } 913 } 914 915 ret = ptnet_nm_register(na_dr, 1 /* on */); 916 if (ret) { 917 goto err_register; 918 } 919 920 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 921 922 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 923 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 924 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 925 sc->min_tx_space); 926 #ifdef PTNETMAP_STATS 927 callout_reset(&sc->tick, hz, ptnet_tick, sc); 928 #endif 929 930 ifp->if_drv_flags |= IFF_DRV_RUNNING; 931 932 return 0; 933 934 err_register: 935 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 936 err_get_lut: 937 netmap_mem_rings_delete(na_dr); 938 err_rings_create: 939 ptnet_nm_krings_delete(na_nm); 940 err_mem_finalize: 941 netmap_mem_deref(na_dr->nm_mem, na_dr); 942 943 return ret; 944 } 945 946 /* To be called under core lock. */ 947 static int 948 ptnet_stop(struct ptnet_softc *sc) 949 { 950 if_t ifp = sc->ifp; 951 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 952 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 953 int i; 954 955 device_printf(sc->dev, "%s\n", __func__); 956 957 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 958 return 0; /* nothing to do */ 959 } 960 961 /* Clear the driver-ready flag, and synchronize with all the queues, 962 * so that after this loop we are sure nobody is working anymore with 963 * the device. This scheme is taken from the vtnet driver. */ 964 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 965 callout_stop(&sc->tick); 966 for (i = 0; i < sc->num_rings; i++) { 967 PTNET_Q_LOCK(sc->queues + i); 968 PTNET_Q_UNLOCK(sc->queues + i); 969 } 970 971 ptnet_nm_register(na_dr, 0 /* off */); 972 973 if (sc->ptna->backend_users == 0) { 974 netmap_mem_rings_delete(na_dr); 975 ptnet_nm_krings_delete(na_nm); 976 } 977 netmap_mem_deref(na_dr->nm_mem, na_dr); 978 979 return 0; 980 } 981 982 static void 983 ptnet_qflush(if_t ifp) 984 { 985 struct ptnet_softc *sc = if_getsoftc(ifp); 986 int i; 987 988 /* Flush all the bufrings and do the interface flush. */ 989 for (i = 0; i < sc->num_rings; i++) { 990 struct ptnet_queue *pq = sc->queues + i; 991 struct mbuf *m; 992 993 PTNET_Q_LOCK(pq); 994 if (pq->bufring) { 995 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 996 m_freem(m); 997 } 998 } 999 PTNET_Q_UNLOCK(pq); 1000 } 1001 1002 if_qflush(ifp); 1003 } 1004 1005 static int 1006 ptnet_media_change(if_t ifp) 1007 { 1008 struct ptnet_softc *sc = if_getsoftc(ifp); 1009 struct ifmedia *ifm = &sc->media; 1010 1011 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1012 return EINVAL; 1013 } 1014 1015 return 0; 1016 } 1017 1018 #if __FreeBSD_version >= 1100000 1019 static uint64_t 1020 ptnet_get_counter(if_t ifp, ift_counter cnt) 1021 { 1022 struct ptnet_softc *sc = if_getsoftc(ifp); 1023 struct ptnet_queue_stats stats[2]; 1024 int i; 1025 1026 /* Accumulate statistics over the queues. */ 1027 memset(stats, 0, sizeof(stats)); 1028 for (i = 0; i < sc->num_rings; i++) { 1029 struct ptnet_queue *pq = sc->queues + i; 1030 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1031 1032 stats[idx].packets += pq->stats.packets; 1033 stats[idx].bytes += pq->stats.bytes; 1034 stats[idx].errors += pq->stats.errors; 1035 stats[idx].iqdrops += pq->stats.iqdrops; 1036 stats[idx].mcasts += pq->stats.mcasts; 1037 } 1038 1039 switch (cnt) { 1040 case IFCOUNTER_IPACKETS: 1041 return (stats[1].packets); 1042 case IFCOUNTER_IQDROPS: 1043 return (stats[1].iqdrops); 1044 case IFCOUNTER_IERRORS: 1045 return (stats[1].errors); 1046 case IFCOUNTER_OPACKETS: 1047 return (stats[0].packets); 1048 case IFCOUNTER_OBYTES: 1049 return (stats[0].bytes); 1050 case IFCOUNTER_OMCASTS: 1051 return (stats[0].mcasts); 1052 default: 1053 return (if_get_counter_default(ifp, cnt)); 1054 } 1055 } 1056 #endif 1057 1058 1059 #ifdef PTNETMAP_STATS 1060 /* Called under core lock. */ 1061 static void 1062 ptnet_tick(void *opaque) 1063 { 1064 struct ptnet_softc *sc = opaque; 1065 int i; 1066 1067 for (i = 0; i < sc->num_rings; i++) { 1068 struct ptnet_queue *pq = sc->queues + i; 1069 struct ptnet_queue_stats cur = pq->stats; 1070 struct timeval now; 1071 unsigned int delta; 1072 1073 microtime(&now); 1074 delta = now.tv_usec - sc->last_ts.tv_usec + 1075 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1076 delta /= 1000; /* in milliseconds */ 1077 1078 if (delta == 0) 1079 continue; 1080 1081 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1082 "intr %lu\n", i, delta, 1083 (cur.packets - pq->last_stats.packets), 1084 (cur.kicks - pq->last_stats.kicks), 1085 (cur.intrs - pq->last_stats.intrs)); 1086 pq->last_stats = cur; 1087 } 1088 microtime(&sc->last_ts); 1089 callout_schedule(&sc->tick, hz); 1090 } 1091 #endif /* PTNETMAP_STATS */ 1092 1093 static void 1094 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1095 { 1096 /* We are always active, as the backend netmap port is 1097 * always open in netmap mode. */ 1098 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1099 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1100 } 1101 1102 static uint32_t 1103 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) 1104 { 1105 /* 1106 * Write a command and read back error status, 1107 * with zero meaning success. 1108 */ 1109 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1110 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1111 } 1112 1113 static int 1114 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) 1115 { 1116 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1117 1118 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1119 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1120 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1121 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1122 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); 1123 1124 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", 1125 info->num_tx_rings, info->num_rx_rings, 1126 info->num_tx_descs, info->num_rx_descs, 1127 info->rx_buf_maxsize); 1128 1129 return 0; 1130 } 1131 1132 static void 1133 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1134 { 1135 int i; 1136 1137 /* Sync krings from the host, reading from 1138 * CSB. */ 1139 for (i = 0; i < sc->num_rings; i++) { 1140 struct nm_csb_atok *atok = sc->queues[i].atok; 1141 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; 1142 struct netmap_kring *kring; 1143 1144 if (i < na->num_tx_rings) { 1145 kring = na->tx_rings[i]; 1146 } else { 1147 kring = na->rx_rings[i - na->num_tx_rings]; 1148 } 1149 kring->rhead = kring->ring->head = atok->head; 1150 kring->rcur = kring->ring->cur = atok->cur; 1151 kring->nr_hwcur = ktoa->hwcur; 1152 kring->nr_hwtail = kring->rtail = 1153 kring->ring->tail = ktoa->hwtail; 1154 1155 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1156 ktoa->hwcur, atok->head, atok->cur, 1157 ktoa->hwtail); 1158 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1159 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1160 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1161 kring->rtail, kring->ring->tail); 1162 } 1163 } 1164 1165 static void 1166 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1167 { 1168 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1169 1170 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1171 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1172 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1173 } 1174 1175 static int 1176 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1177 { 1178 /* device-specific */ 1179 if_t ifp = na->ifp; 1180 struct ptnet_softc *sc = if_getsoftc(ifp); 1181 int native = (na == &sc->ptna->hwup.up); 1182 struct ptnet_queue *pq; 1183 int ret = 0; 1184 int i; 1185 1186 if (!onoff) { 1187 sc->ptna->backend_users--; 1188 } 1189 1190 /* If this is the last netmap client, guest interrupt enable flags may 1191 * be in arbitrary state. Since these flags are going to be used also 1192 * by the netdevice driver, we have to make sure to start with 1193 * notifications enabled. Also, schedule NAPI to flush pending packets 1194 * in the RX rings, since we will not receive further interrupts 1195 * until these will be processed. */ 1196 if (native && !onoff && na->active_fds == 0) { 1197 nm_prinf("Exit netmap mode, re-enable interrupts"); 1198 for (i = 0; i < sc->num_rings; i++) { 1199 pq = sc->queues + i; 1200 pq->atok->appl_need_kick = 1; 1201 } 1202 } 1203 1204 if (onoff) { 1205 if (sc->ptna->backend_users == 0) { 1206 /* Initialize notification enable fields in the CSB. */ 1207 for (i = 0; i < sc->num_rings; i++) { 1208 pq = sc->queues + i; 1209 pq->ktoa->kern_need_kick = 1; 1210 pq->atok->appl_need_kick = 1211 (!(ifp->if_capenable & IFCAP_POLLING) 1212 && i >= sc->num_tx_rings); 1213 } 1214 1215 /* Set the virtio-net header length. */ 1216 ptnet_update_vnet_hdr(sc); 1217 1218 /* Make sure the host adapter passed through is ready 1219 * for txsync/rxsync. */ 1220 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); 1221 if (ret) { 1222 return ret; 1223 } 1224 1225 /* Align the guest krings and rings to the state stored 1226 * in the CSB. */ 1227 ptnet_sync_from_csb(sc, na); 1228 } 1229 1230 /* If not native, don't call nm_set_native_flags, since we don't want 1231 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1232 if (native) { 1233 netmap_krings_mode_commit(na, onoff); 1234 nm_set_native_flags(na); 1235 } 1236 1237 } else { 1238 if (native) { 1239 nm_clear_native_flags(na); 1240 netmap_krings_mode_commit(na, onoff); 1241 } 1242 1243 if (sc->ptna->backend_users == 0) { 1244 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 1245 } 1246 } 1247 1248 if (onoff) { 1249 sc->ptna->backend_users++; 1250 } 1251 1252 return ret; 1253 } 1254 1255 static int 1256 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1257 { 1258 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1259 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1260 bool notify; 1261 1262 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); 1263 if (notify) { 1264 ptnet_kick(pq); 1265 } 1266 1267 return 0; 1268 } 1269 1270 static int 1271 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1272 { 1273 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1274 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1275 bool notify; 1276 1277 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); 1278 if (notify) { 1279 ptnet_kick(pq); 1280 } 1281 1282 return 0; 1283 } 1284 1285 static void 1286 ptnet_nm_intr(struct netmap_adapter *na, int onoff) 1287 { 1288 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1289 int i; 1290 1291 for (i = 0; i < sc->num_rings; i++) { 1292 struct ptnet_queue *pq = sc->queues + i; 1293 pq->atok->appl_need_kick = onoff; 1294 } 1295 } 1296 1297 static void 1298 ptnet_tx_intr(void *opaque) 1299 { 1300 struct ptnet_queue *pq = opaque; 1301 struct ptnet_softc *sc = pq->sc; 1302 1303 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1304 #ifdef PTNETMAP_STATS 1305 pq->stats.intrs ++; 1306 #endif /* PTNETMAP_STATS */ 1307 1308 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1309 return; 1310 } 1311 1312 /* Schedule the tasqueue to flush process transmissions requests. 1313 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1314 * at least when using MSI-X interrupts. The if_em driver, instead 1315 * schedule taskqueue when using legacy interrupts. */ 1316 taskqueue_enqueue(pq->taskq, &pq->task); 1317 } 1318 1319 static void 1320 ptnet_rx_intr(void *opaque) 1321 { 1322 struct ptnet_queue *pq = opaque; 1323 struct ptnet_softc *sc = pq->sc; 1324 unsigned int unused; 1325 1326 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1327 #ifdef PTNETMAP_STATS 1328 pq->stats.intrs ++; 1329 #endif /* PTNETMAP_STATS */ 1330 1331 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1332 return; 1333 } 1334 1335 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1336 * receive-side processing is executed directly in the interrupt 1337 * service routine. Alternatively, we may schedule the taskqueue. */ 1338 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1339 } 1340 1341 static void 1342 ptnet_vlan_tag_remove(struct mbuf *m) 1343 { 1344 struct ether_vlan_header *evh; 1345 1346 evh = mtod(m, struct ether_vlan_header *); 1347 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1348 m->m_flags |= M_VLANTAG; 1349 1350 /* Strip the 802.1Q header. */ 1351 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1352 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1353 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1354 } 1355 1356 static void 1357 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1358 unsigned int head, unsigned int sync_flags) 1359 { 1360 struct netmap_ring *ring = kring->ring; 1361 struct nm_csb_atok *atok = pq->atok; 1362 struct nm_csb_ktoa *ktoa = pq->ktoa; 1363 1364 /* Some packets have been pushed to the netmap ring. We have 1365 * to tell the host to process the new packets, updating cur 1366 * and head in the CSB. */ 1367 ring->head = ring->cur = head; 1368 1369 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1370 kring->rcur = kring->rhead = head; 1371 1372 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 1373 1374 /* Kick the host if needed. */ 1375 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { 1376 atok->sync_flags = sync_flags; 1377 ptnet_kick(pq); 1378 } 1379 } 1380 1381 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1382 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1383 (_k)->rtail - (_h)) < (_min) 1384 1385 /* This function may be called by the network stack, or by 1386 * by the taskqueue thread. */ 1387 static int 1388 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1389 bool may_resched) 1390 { 1391 struct ptnet_softc *sc = pq->sc; 1392 bool have_vnet_hdr = sc->vnet_hdr_len; 1393 struct netmap_adapter *na = &sc->ptna->dr.up; 1394 if_t ifp = sc->ifp; 1395 unsigned int batch_count = 0; 1396 struct nm_csb_atok *atok; 1397 struct nm_csb_ktoa *ktoa; 1398 struct netmap_kring *kring; 1399 struct netmap_ring *ring; 1400 struct netmap_slot *slot; 1401 unsigned int count = 0; 1402 unsigned int minspace; 1403 unsigned int head; 1404 unsigned int lim; 1405 struct mbuf *mhead; 1406 struct mbuf *mf; 1407 int nmbuf_bytes; 1408 uint8_t *nmbuf; 1409 1410 if (!PTNET_Q_TRYLOCK(pq)) { 1411 /* We failed to acquire the lock, schedule the taskqueue. */ 1412 nm_prlim(1, "Deferring TX work"); 1413 if (may_resched) { 1414 taskqueue_enqueue(pq->taskq, &pq->task); 1415 } 1416 1417 return 0; 1418 } 1419 1420 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1421 PTNET_Q_UNLOCK(pq); 1422 nm_prlim(1, "Interface is down"); 1423 return ENETDOWN; 1424 } 1425 1426 atok = pq->atok; 1427 ktoa = pq->ktoa; 1428 kring = na->tx_rings[pq->kring_id]; 1429 ring = kring->ring; 1430 lim = kring->nkr_num_slots - 1; 1431 head = ring->head; 1432 minspace = sc->min_tx_space; 1433 1434 while (count < budget) { 1435 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1436 /* We ran out of slot, let's see if the host has 1437 * freed up some, by reading hwcur and hwtail from 1438 * the CSB. */ 1439 ptnet_sync_tail(ktoa, kring); 1440 1441 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1442 /* Still no slots available. Reactivate the 1443 * interrupts so that we can be notified 1444 * when some free slots are made available by 1445 * the host. */ 1446 atok->appl_need_kick = 1; 1447 1448 /* Double check. We need a full barrier to 1449 * prevent the store to atok->appl_need_kick 1450 * to be reordered with the load from 1451 * ktoa->hwcur and ktoa->hwtail (store-load 1452 * barrier). */ 1453 nm_stld_barrier(); 1454 ptnet_sync_tail(ktoa, kring); 1455 if (likely(PTNET_TX_NOSPACE(head, kring, 1456 minspace))) { 1457 break; 1458 } 1459 1460 nm_prlim(1, "Found more slots by doublecheck"); 1461 /* More slots were freed before reactivating 1462 * the interrupts. */ 1463 atok->appl_need_kick = 0; 1464 } 1465 } 1466 1467 mhead = drbr_peek(ifp, pq->bufring); 1468 if (!mhead) { 1469 break; 1470 } 1471 1472 /* Initialize transmission state variables. */ 1473 slot = ring->slot + head; 1474 nmbuf = NMB(na, slot); 1475 nmbuf_bytes = 0; 1476 1477 /* If needed, prepare the virtio-net header at the beginning 1478 * of the first slot. */ 1479 if (have_vnet_hdr) { 1480 struct virtio_net_hdr *vh = 1481 (struct virtio_net_hdr *)nmbuf; 1482 1483 /* For performance, we could replace this memset() with 1484 * two 8-bytes-wide writes. */ 1485 memset(nmbuf, 0, PTNET_HDR_SIZE); 1486 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1487 mhead = virtio_net_tx_offload(ifp, mhead, false, 1488 vh); 1489 if (unlikely(!mhead)) { 1490 /* Packet dropped because errors 1491 * occurred while preparing the vnet 1492 * header. Let's go ahead with the next 1493 * packet. */ 1494 pq->stats.errors ++; 1495 drbr_advance(ifp, pq->bufring); 1496 continue; 1497 } 1498 } 1499 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1500 "csum_start %u csum_ofs %u hdr_len = %u " 1501 "gso_size %u gso_type %x", __func__, 1502 mhead->m_pkthdr.csum_flags, vh->flags, 1503 vh->csum_start, vh->csum_offset, vh->hdr_len, 1504 vh->gso_size, vh->gso_type); 1505 1506 nmbuf += PTNET_HDR_SIZE; 1507 nmbuf_bytes += PTNET_HDR_SIZE; 1508 } 1509 1510 for (mf = mhead; mf; mf = mf->m_next) { 1511 uint8_t *mdata = mf->m_data; 1512 int mlen = mf->m_len; 1513 1514 for (;;) { 1515 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1516 1517 if (mlen < copy) { 1518 copy = mlen; 1519 } 1520 memcpy(nmbuf, mdata, copy); 1521 1522 mdata += copy; 1523 mlen -= copy; 1524 nmbuf += copy; 1525 nmbuf_bytes += copy; 1526 1527 if (!mlen) { 1528 break; 1529 } 1530 1531 slot->len = nmbuf_bytes; 1532 slot->flags = NS_MOREFRAG; 1533 1534 head = nm_next(head, lim); 1535 KASSERT(head != ring->tail, 1536 ("Unexpectedly run out of TX space")); 1537 slot = ring->slot + head; 1538 nmbuf = NMB(na, slot); 1539 nmbuf_bytes = 0; 1540 } 1541 } 1542 1543 /* Complete last slot and update head. */ 1544 slot->len = nmbuf_bytes; 1545 slot->flags = 0; 1546 head = nm_next(head, lim); 1547 1548 /* Consume the packet just processed. */ 1549 drbr_advance(ifp, pq->bufring); 1550 1551 /* Copy the packet to listeners. */ 1552 ETHER_BPF_MTAP(ifp, mhead); 1553 1554 pq->stats.packets ++; 1555 pq->stats.bytes += mhead->m_pkthdr.len; 1556 if (mhead->m_flags & M_MCAST) { 1557 pq->stats.mcasts ++; 1558 } 1559 1560 m_freem(mhead); 1561 1562 count ++; 1563 if (++batch_count == PTNET_TX_BATCH) { 1564 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1565 batch_count = 0; 1566 } 1567 } 1568 1569 if (batch_count) { 1570 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1571 } 1572 1573 if (count >= budget && may_resched) { 1574 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n", 1575 drbr_inuse(ifp, pq->bufring))); 1576 taskqueue_enqueue(pq->taskq, &pq->task); 1577 } 1578 1579 PTNET_Q_UNLOCK(pq); 1580 1581 return count; 1582 } 1583 1584 static int 1585 ptnet_transmit(if_t ifp, struct mbuf *m) 1586 { 1587 struct ptnet_softc *sc = if_getsoftc(ifp); 1588 struct ptnet_queue *pq; 1589 unsigned int queue_idx; 1590 int err; 1591 1592 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1593 1594 /* Insert 802.1Q header if needed. */ 1595 if (m->m_flags & M_VLANTAG) { 1596 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1597 if (m == NULL) { 1598 return ENOBUFS; 1599 } 1600 m->m_flags &= ~M_VLANTAG; 1601 } 1602 1603 /* Get the flow-id if available. */ 1604 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1605 m->m_pkthdr.flowid : curcpu; 1606 1607 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1608 queue_idx %= sc->num_tx_rings; 1609 } 1610 1611 pq = sc->queues + queue_idx; 1612 1613 err = drbr_enqueue(ifp, pq->bufring, m); 1614 if (err) { 1615 /* ENOBUFS when the bufring is full */ 1616 nm_prlim(1, "%s: drbr_enqueue() failed %d\n", 1617 __func__, err); 1618 pq->stats.errors ++; 1619 return err; 1620 } 1621 1622 if (ifp->if_capenable & IFCAP_POLLING) { 1623 /* If polling is on, the transmit queues will be 1624 * drained by the poller. */ 1625 return 0; 1626 } 1627 1628 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1629 1630 return (err < 0) ? err : 0; 1631 } 1632 1633 static unsigned int 1634 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1635 { 1636 struct netmap_ring *ring = kring->ring; 1637 struct netmap_slot *slot = ring->slot + head; 1638 1639 for (;;) { 1640 head = nm_next(head, kring->nkr_num_slots - 1); 1641 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1642 break; 1643 } 1644 slot = ring->slot + head; 1645 } 1646 1647 return head; 1648 } 1649 1650 static inline struct mbuf * 1651 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1652 { 1653 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1654 1655 do { 1656 unsigned int copy; 1657 1658 if (mtail->m_len == MCLBYTES) { 1659 struct mbuf *mf; 1660 1661 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1662 if (unlikely(!mf)) { 1663 return NULL; 1664 } 1665 1666 mtail->m_next = mf; 1667 mtail = mf; 1668 mdata = mtod(mtail, uint8_t *); 1669 mtail->m_len = 0; 1670 } 1671 1672 copy = MCLBYTES - mtail->m_len; 1673 if (nmbuf_len < copy) { 1674 copy = nmbuf_len; 1675 } 1676 1677 memcpy(mdata, nmbuf, copy); 1678 1679 nmbuf += copy; 1680 nmbuf_len -= copy; 1681 mdata += copy; 1682 mtail->m_len += copy; 1683 } while (nmbuf_len); 1684 1685 return mtail; 1686 } 1687 1688 static int 1689 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 1690 { 1691 struct ptnet_softc *sc = pq->sc; 1692 bool have_vnet_hdr = sc->vnet_hdr_len; 1693 struct nm_csb_atok *atok = pq->atok; 1694 struct nm_csb_ktoa *ktoa = pq->ktoa; 1695 struct netmap_adapter *na = &sc->ptna->dr.up; 1696 struct netmap_kring *kring = na->rx_rings[pq->kring_id]; 1697 struct netmap_ring *ring = kring->ring; 1698 unsigned int const lim = kring->nkr_num_slots - 1; 1699 unsigned int batch_count = 0; 1700 if_t ifp = sc->ifp; 1701 unsigned int count = 0; 1702 uint32_t head; 1703 1704 PTNET_Q_LOCK(pq); 1705 1706 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1707 goto unlock; 1708 } 1709 1710 kring->nr_kflags &= ~NKR_PENDINTR; 1711 1712 head = ring->head; 1713 while (count < budget) { 1714 uint32_t prev_head = head; 1715 struct mbuf *mhead, *mtail; 1716 struct virtio_net_hdr *vh; 1717 struct netmap_slot *slot; 1718 unsigned int nmbuf_len; 1719 uint8_t *nmbuf; 1720 int deliver = 1; /* the mbuf to the network stack. */ 1721 host_sync: 1722 if (head == ring->tail) { 1723 /* We ran out of slot, let's see if the host has 1724 * added some, by reading hwcur and hwtail from 1725 * the CSB. */ 1726 ptnet_sync_tail(ktoa, kring); 1727 1728 if (head == ring->tail) { 1729 /* Still no slots available. Reactivate 1730 * interrupts as they were disabled by the 1731 * host thread right before issuing the 1732 * last interrupt. */ 1733 atok->appl_need_kick = 1; 1734 1735 /* Double check for more completed RX slots. 1736 * We need a full barrier to prevent the store 1737 * to atok->appl_need_kick to be reordered with 1738 * the load from ktoa->hwcur and ktoa->hwtail 1739 * (store-load barrier). */ 1740 nm_stld_barrier(); 1741 ptnet_sync_tail(ktoa, kring); 1742 if (likely(head == ring->tail)) { 1743 break; 1744 } 1745 atok->appl_need_kick = 0; 1746 } 1747 } 1748 1749 /* Initialize ring state variables, possibly grabbing the 1750 * virtio-net header. */ 1751 slot = ring->slot + head; 1752 nmbuf = NMB(na, slot); 1753 nmbuf_len = slot->len; 1754 1755 vh = (struct virtio_net_hdr *)nmbuf; 1756 if (have_vnet_hdr) { 1757 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 1758 /* There is no good reason why host should 1759 * put the header in multiple netmap slots. 1760 * If this is the case, discard. */ 1761 nm_prlim(1, "Fragmented vnet-hdr: dropping"); 1762 head = ptnet_rx_discard(kring, head); 1763 pq->stats.iqdrops ++; 1764 deliver = 0; 1765 goto skip; 1766 } 1767 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u " 1768 "csum_ofs %u hdr_len = %u gso_size %u " 1769 "gso_type %x", __func__, vh->flags, 1770 vh->csum_start, vh->csum_offset, vh->hdr_len, 1771 vh->gso_size, vh->gso_type); 1772 nmbuf += PTNET_HDR_SIZE; 1773 nmbuf_len -= PTNET_HDR_SIZE; 1774 } 1775 1776 /* Allocate the head of a new mbuf chain. 1777 * We use m_getcl() to allocate an mbuf with standard cluster 1778 * size (MCLBYTES). In the future we could use m_getjcl() 1779 * to choose different sizes. */ 1780 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1781 if (unlikely(mhead == NULL)) { 1782 device_printf(sc->dev, "%s: failed to allocate mbuf " 1783 "head\n", __func__); 1784 pq->stats.errors ++; 1785 break; 1786 } 1787 1788 /* Initialize the mbuf state variables. */ 1789 mhead->m_pkthdr.len = nmbuf_len; 1790 mtail->m_len = 0; 1791 1792 /* Scan all the netmap slots containing the current packet. */ 1793 for (;;) { 1794 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 1795 "len %u, flags %u\n", __func__, 1796 head, ring->tail, slot->len, 1797 slot->flags)); 1798 1799 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 1800 if (unlikely(!mtail)) { 1801 /* Ouch. We ran out of memory while processing 1802 * a packet. We have to restore the previous 1803 * head position, free the mbuf chain, and 1804 * schedule the taskqueue to give the packet 1805 * another chance. */ 1806 device_printf(sc->dev, "%s: failed to allocate" 1807 " mbuf frag, reset head %u --> %u\n", 1808 __func__, head, prev_head); 1809 head = prev_head; 1810 m_freem(mhead); 1811 pq->stats.errors ++; 1812 if (may_resched) { 1813 taskqueue_enqueue(pq->taskq, 1814 &pq->task); 1815 } 1816 goto escape; 1817 } 1818 1819 /* We have to increment head irrespective of the 1820 * NS_MOREFRAG being set or not. */ 1821 head = nm_next(head, lim); 1822 1823 if (!(slot->flags & NS_MOREFRAG)) { 1824 break; 1825 } 1826 1827 if (unlikely(head == ring->tail)) { 1828 /* The very last slot prepared by the host has 1829 * the NS_MOREFRAG set. Drop it and continue 1830 * the outer cycle (to do the double-check). */ 1831 nm_prlim(1, "Incomplete packet: dropping"); 1832 m_freem(mhead); 1833 pq->stats.iqdrops ++; 1834 goto host_sync; 1835 } 1836 1837 slot = ring->slot + head; 1838 nmbuf = NMB(na, slot); 1839 nmbuf_len = slot->len; 1840 mhead->m_pkthdr.len += nmbuf_len; 1841 } 1842 1843 mhead->m_pkthdr.rcvif = ifp; 1844 mhead->m_pkthdr.csum_flags = 0; 1845 1846 /* Store the queue idx in the packet header. */ 1847 mhead->m_pkthdr.flowid = pq->kring_id; 1848 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 1849 1850 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1851 struct ether_header *eh; 1852 1853 eh = mtod(mhead, struct ether_header *); 1854 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1855 ptnet_vlan_tag_remove(mhead); 1856 /* 1857 * With the 802.1Q header removed, update the 1858 * checksum starting location accordingly. 1859 */ 1860 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1861 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 1862 } 1863 } 1864 1865 if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) { 1866 m_freem(mhead); 1867 nm_prlim(1, "Csum offload error: dropping"); 1868 pq->stats.iqdrops ++; 1869 deliver = 0; 1870 } 1871 1872 skip: 1873 count ++; 1874 if (++batch_count >= PTNET_RX_BATCH) { 1875 /* Some packets have been (or will be) pushed to the network 1876 * stack. We need to update the CSB to tell the host about 1877 * the new ring->cur and ring->head (RX buffer refill). */ 1878 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 1879 batch_count = 0; 1880 } 1881 1882 if (likely(deliver)) { 1883 pq->stats.packets ++; 1884 pq->stats.bytes += mhead->m_pkthdr.len; 1885 1886 PTNET_Q_UNLOCK(pq); 1887 (*ifp->if_input)(ifp, mhead); 1888 PTNET_Q_LOCK(pq); 1889 /* The ring->head index (and related indices) are 1890 * updated under pq lock by ptnet_ring_update(). 1891 * Since we dropped the lock to call if_input(), we 1892 * must reload ring->head and restart processing the 1893 * ring from there. */ 1894 head = ring->head; 1895 1896 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1897 /* The interface has gone down while we didn't 1898 * have the lock. Stop any processing and exit. */ 1899 goto unlock; 1900 } 1901 } 1902 } 1903 escape: 1904 if (batch_count) { 1905 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 1906 1907 } 1908 1909 if (count >= budget && may_resched) { 1910 /* If we ran out of budget or the double-check found new 1911 * slots to process, schedule the taskqueue. */ 1912 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n", 1913 head, ring->tail)); 1914 taskqueue_enqueue(pq->taskq, &pq->task); 1915 } 1916 unlock: 1917 PTNET_Q_UNLOCK(pq); 1918 1919 return count; 1920 } 1921 1922 static void 1923 ptnet_rx_task(void *context, int pending) 1924 { 1925 struct ptnet_queue *pq = context; 1926 1927 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 1928 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1929 } 1930 1931 static void 1932 ptnet_tx_task(void *context, int pending) 1933 { 1934 struct ptnet_queue *pq = context; 1935 1936 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 1937 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1938 } 1939 1940 #ifdef DEVICE_POLLING 1941 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 1942 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 1943 static int 1944 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 1945 { 1946 struct ptnet_softc *sc = if_getsoftc(ifp); 1947 unsigned int queue_budget; 1948 unsigned int count = 0; 1949 bool borrow = false; 1950 int i; 1951 1952 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 1953 queue_budget = MAX(budget / sc->num_rings, 1); 1954 nm_prlim(1, "Per-queue budget is %d", queue_budget); 1955 1956 while (budget) { 1957 unsigned int rcnt = 0; 1958 1959 for (i = 0; i < sc->num_rings; i++) { 1960 struct ptnet_queue *pq = sc->queues + i; 1961 1962 if (borrow) { 1963 queue_budget = MIN(queue_budget, budget); 1964 if (queue_budget == 0) { 1965 break; 1966 } 1967 } 1968 1969 if (i < sc->num_tx_rings) { 1970 rcnt += ptnet_drain_transmit_queue(pq, 1971 queue_budget, false); 1972 } else { 1973 rcnt += ptnet_rx_eof(pq, queue_budget, 1974 false); 1975 } 1976 } 1977 1978 if (!rcnt) { 1979 /* A scan of the queues gave no result, we can 1980 * stop here. */ 1981 break; 1982 } 1983 1984 if (rcnt > budget) { 1985 /* This may happen when initial budget < sc->num_rings, 1986 * since one packet budget is given to each queue 1987 * anyway. Just pretend we didn't eat "so much". */ 1988 rcnt = budget; 1989 } 1990 count += rcnt; 1991 budget -= rcnt; 1992 borrow = true; 1993 } 1994 1995 1996 return count; 1997 } 1998 #endif /* DEVICE_POLLING */ 1999 #endif /* WITH_PTNETMAP */ 2000