1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for ptnet paravirtualized network device. */ 28 29 #include <sys/cdefs.h> 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/sockio.h> 37 #include <sys/mbuf.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/socket.h> 41 #include <sys/sysctl.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/taskqueue.h> 45 #include <sys/smp.h> 46 #include <sys/time.h> 47 #include <machine/smp.h> 48 49 #include <vm/uma.h> 50 #include <vm/vm.h> 51 #include <vm/pmap.h> 52 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_arp.h> 57 #include <net/if_dl.h> 58 #include <net/if_types.h> 59 #include <net/if_media.h> 60 #include <net/if_vlan_var.h> 61 #include <net/bpf.h> 62 63 #include <netinet/in_systm.h> 64 #include <netinet/in.h> 65 #include <netinet/ip.h> 66 #include <netinet/ip6.h> 67 #include <netinet6/ip6_var.h> 68 #include <netinet/udp.h> 69 #include <netinet/tcp.h> 70 71 #include <machine/bus.h> 72 #include <machine/resource.h> 73 #include <sys/bus.h> 74 #include <sys/rman.h> 75 76 #include <dev/pci/pcivar.h> 77 #include <dev/pci/pcireg.h> 78 79 #include <sys/selinfo.h> 80 #include <net/netmap.h> 81 #include <dev/netmap/netmap_kern.h> 82 #include <net/netmap_virt.h> 83 #include <dev/netmap/netmap_mem2.h> 84 #include <dev/virtio/network/virtio_net.h> 85 86 #ifdef WITH_PTNETMAP 87 88 #ifndef INET 89 #error "INET not defined, cannot support offloadings" 90 #endif 91 92 static uint64_t ptnet_get_counter(if_t, ift_counter); 93 94 //#define PTNETMAP_STATS 95 //#define DEBUG 96 #ifdef DEBUG 97 #define DBG(x) x 98 #else /* !DEBUG */ 99 #define DBG(x) 100 #endif /* !DEBUG */ 101 102 extern int ptnet_vnet_hdr; /* Tunable parameter */ 103 104 struct ptnet_softc; 105 106 struct ptnet_queue_stats { 107 uint64_t packets; /* if_[io]packets */ 108 uint64_t bytes; /* if_[io]bytes */ 109 uint64_t errors; /* if_[io]errors */ 110 uint64_t iqdrops; /* if_iqdrops */ 111 uint64_t mcasts; /* if_[io]mcasts */ 112 #ifdef PTNETMAP_STATS 113 uint64_t intrs; 114 uint64_t kicks; 115 #endif /* PTNETMAP_STATS */ 116 }; 117 118 struct ptnet_queue { 119 struct ptnet_softc *sc; 120 struct resource *irq; 121 void *cookie; 122 int kring_id; 123 struct nm_csb_atok *atok; 124 struct nm_csb_ktoa *ktoa; 125 unsigned int kick; 126 struct mtx lock; 127 struct buf_ring *bufring; /* for TX queues */ 128 struct ptnet_queue_stats stats; 129 #ifdef PTNETMAP_STATS 130 struct ptnet_queue_stats last_stats; 131 #endif /* PTNETMAP_STATS */ 132 struct taskqueue *taskq; 133 struct task task; 134 char lock_name[16]; 135 }; 136 137 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 138 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 139 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 140 141 struct ptnet_softc { 142 device_t dev; 143 if_t ifp; 144 struct ifmedia media; 145 struct mtx lock; 146 char lock_name[16]; 147 char hwaddr[ETHER_ADDR_LEN]; 148 149 /* Mirror of PTFEAT register. */ 150 uint32_t ptfeatures; 151 unsigned int vnet_hdr_len; 152 153 /* PCI BARs support. */ 154 struct resource *iomem; 155 struct resource *msix_mem; 156 157 unsigned int num_rings; 158 unsigned int num_tx_rings; 159 struct ptnet_queue *queues; 160 struct ptnet_queue *rxqueues; 161 struct nm_csb_atok *csb_gh; 162 struct nm_csb_ktoa *csb_hg; 163 164 unsigned int min_tx_space; 165 166 struct netmap_pt_guest_adapter *ptna; 167 168 struct callout tick; 169 #ifdef PTNETMAP_STATS 170 struct timeval last_ts; 171 #endif /* PTNETMAP_STATS */ 172 }; 173 174 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 175 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 176 177 static int ptnet_probe(device_t); 178 static int ptnet_attach(device_t); 179 static int ptnet_detach(device_t); 180 static int ptnet_suspend(device_t); 181 static int ptnet_resume(device_t); 182 static int ptnet_shutdown(device_t); 183 184 static void ptnet_init(void *opaque); 185 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 186 static int ptnet_init_locked(struct ptnet_softc *sc); 187 static int ptnet_stop(struct ptnet_softc *sc); 188 static int ptnet_transmit(if_t ifp, struct mbuf *m); 189 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 190 unsigned int budget, 191 bool may_resched); 192 static void ptnet_qflush(if_t ifp); 193 static void ptnet_tx_task(void *context, int pending); 194 195 static int ptnet_media_change(if_t ifp); 196 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 197 #ifdef PTNETMAP_STATS 198 static void ptnet_tick(void *opaque); 199 #endif 200 201 static int ptnet_irqs_init(struct ptnet_softc *sc); 202 static void ptnet_irqs_fini(struct ptnet_softc *sc); 203 204 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); 205 static int ptnet_nm_config(struct netmap_adapter *na, 206 struct nm_config_info *info); 207 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 208 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 209 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 210 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 211 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); 212 213 static void ptnet_tx_intr(void *opaque); 214 static void ptnet_rx_intr(void *opaque); 215 216 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 217 unsigned int head); 218 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 219 bool may_resched); 220 static void ptnet_rx_task(void *context, int pending); 221 222 #ifdef DEVICE_POLLING 223 static poll_handler_t ptnet_poll; 224 #endif 225 226 static device_method_t ptnet_methods[] = { 227 DEVMETHOD(device_probe, ptnet_probe), 228 DEVMETHOD(device_attach, ptnet_attach), 229 DEVMETHOD(device_detach, ptnet_detach), 230 DEVMETHOD(device_suspend, ptnet_suspend), 231 DEVMETHOD(device_resume, ptnet_resume), 232 DEVMETHOD(device_shutdown, ptnet_shutdown), 233 DEVMETHOD_END 234 }; 235 236 static driver_t ptnet_driver = { 237 "ptnet", 238 ptnet_methods, 239 sizeof(struct ptnet_softc) 240 }; 241 242 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 243 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, NULL, NULL, 244 SI_ORDER_MIDDLE + 2); 245 246 static int 247 ptnet_probe(device_t dev) 248 { 249 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 250 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 251 return (ENXIO); 252 } 253 254 device_set_desc(dev, "ptnet network adapter"); 255 256 return (BUS_PROBE_DEFAULT); 257 } 258 259 static inline void ptnet_kick(struct ptnet_queue *pq) 260 { 261 #ifdef PTNETMAP_STATS 262 pq->stats.kicks ++; 263 #endif /* PTNETMAP_STATS */ 264 bus_write_4(pq->sc->iomem, pq->kick, 0); 265 } 266 267 #define PTNET_BUF_RING_SIZE 4096 268 #define PTNET_RX_BUDGET 512 269 #define PTNET_RX_BATCH 1 270 #define PTNET_TX_BUDGET 512 271 #define PTNET_TX_BATCH 64 272 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 273 #define PTNET_MAX_PKT_SIZE 65536 274 275 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) 276 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 277 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 278 PTNET_CSUM_OFFLOAD_IPV6) 279 280 static int 281 ptnet_attach(device_t dev) 282 { 283 uint32_t ptfeatures = 0; 284 unsigned int num_rx_rings, num_tx_rings; 285 struct netmap_adapter na_arg; 286 unsigned int nifp_offset; 287 struct ptnet_softc *sc; 288 if_t ifp; 289 uint32_t macreg; 290 int err, rid; 291 int i; 292 293 sc = device_get_softc(dev); 294 sc->dev = dev; 295 296 /* Setup PCI resources. */ 297 pci_enable_busmaster(dev); 298 299 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 300 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 301 RF_ACTIVE); 302 if (sc->iomem == NULL) { 303 device_printf(dev, "Failed to map I/O BAR\n"); 304 return (ENXIO); 305 } 306 307 /* Negotiate features with the hypervisor. */ 308 if (ptnet_vnet_hdr) { 309 ptfeatures |= PTNETMAP_F_VNET_HDR; 310 } 311 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 312 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 313 sc->ptfeatures = ptfeatures; 314 315 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 316 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 317 sc->num_rings = num_tx_rings + num_rx_rings; 318 sc->num_tx_rings = num_tx_rings; 319 320 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { 321 device_printf(dev, "CSB cannot handle that many rings (%u)\n", 322 sc->num_rings); 323 err = ENOMEM; 324 goto err_path; 325 } 326 327 /* Allocate CSB and carry out CSB allocation protocol. */ 328 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 329 (size_t)0, -1UL, PAGE_SIZE, 0); 330 if (sc->csb_gh == NULL) { 331 device_printf(dev, "Failed to allocate CSB\n"); 332 err = ENOMEM; 333 goto err_path; 334 } 335 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); 336 337 { 338 /* 339 * We use uint64_t rather than vm_paddr_t since we 340 * need 64 bit addresses even on 32 bit platforms. 341 */ 342 uint64_t paddr = vtophys(sc->csb_gh); 343 344 /* CSB allocation protocol: write to BAH first, then 345 * to BAL (for both GH and HG sections). */ 346 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 347 (paddr >> 32) & 0xffffffff); 348 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 349 paddr & 0xffffffff); 350 paddr = vtophys(sc->csb_hg); 351 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 352 (paddr >> 32) & 0xffffffff); 353 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 354 paddr & 0xffffffff); 355 } 356 357 /* Allocate and initialize per-queue data structures. */ 358 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 359 M_DEVBUF, M_NOWAIT | M_ZERO); 360 if (sc->queues == NULL) { 361 err = ENOMEM; 362 goto err_path; 363 } 364 sc->rxqueues = sc->queues + num_tx_rings; 365 366 for (i = 0; i < sc->num_rings; i++) { 367 struct ptnet_queue *pq = sc->queues + i; 368 369 pq->sc = sc; 370 pq->kring_id = i; 371 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 372 pq->atok = sc->csb_gh + i; 373 pq->ktoa = sc->csb_hg + i; 374 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 375 device_get_nameunit(dev), i); 376 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 377 if (i >= num_tx_rings) { 378 /* RX queue: fix kring_id. */ 379 pq->kring_id -= num_tx_rings; 380 } else { 381 /* TX queue: allocate buf_ring. */ 382 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 383 M_DEVBUF, M_NOWAIT, &pq->lock); 384 if (pq->bufring == NULL) { 385 err = ENOMEM; 386 goto err_path; 387 } 388 } 389 } 390 391 sc->min_tx_space = 64; /* Safe initial value. */ 392 393 err = ptnet_irqs_init(sc); 394 if (err) { 395 goto err_path; 396 } 397 398 /* Setup Ethernet interface. */ 399 sc->ifp = ifp = if_alloc(IFT_ETHER); 400 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 401 if_setbaudrate(ifp, IF_Gbps(10)); 402 if_setsoftc(ifp, sc); 403 if_setflags(ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX); 404 if_setinitfn(ifp, ptnet_init); 405 if_setioctlfn(ifp, ptnet_ioctl); 406 if_setget_counter(ifp, ptnet_get_counter); 407 if_settransmitfn(ifp, ptnet_transmit); 408 if_setqflushfn(ifp, ptnet_qflush); 409 410 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 411 ptnet_media_status); 412 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 413 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 414 415 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 416 sc->hwaddr[0] = (macreg >> 8) & 0xff; 417 sc->hwaddr[1] = macreg & 0xff; 418 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 419 sc->hwaddr[2] = (macreg >> 24) & 0xff; 420 sc->hwaddr[3] = (macreg >> 16) & 0xff; 421 sc->hwaddr[4] = (macreg >> 8) & 0xff; 422 sc->hwaddr[5] = macreg & 0xff; 423 424 ether_ifattach(ifp, sc->hwaddr); 425 426 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 427 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU, 0); 428 429 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 430 /* Similarly to what the vtnet driver does, we can emulate 431 * VLAN offloadings by inserting and removing the 802.1Q 432 * header during transmit and receive. We are then able 433 * to do checksum offloading of VLAN frames. */ 434 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 435 | IFCAP_VLAN_HWCSUM 436 | IFCAP_TSO | IFCAP_LRO 437 | IFCAP_VLAN_HWTSO 438 | IFCAP_VLAN_HWTAGGING, 0); 439 } 440 441 if_setcapenable(ifp, if_getcapabilities(ifp)); 442 #ifdef DEVICE_POLLING 443 /* Don't enable polling by default. */ 444 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 445 #endif 446 snprintf(sc->lock_name, sizeof(sc->lock_name), 447 "%s", device_get_nameunit(dev)); 448 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 449 callout_init_mtx(&sc->tick, &sc->lock, 0); 450 451 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 452 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 453 memset(&na_arg, 0, sizeof(na_arg)); 454 na_arg.ifp = ifp; 455 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 456 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 457 na_arg.num_tx_rings = num_tx_rings; 458 na_arg.num_rx_rings = num_rx_rings; 459 na_arg.nm_config = ptnet_nm_config; 460 na_arg.nm_krings_create = ptnet_nm_krings_create; 461 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 462 na_arg.nm_dtor = ptnet_nm_dtor; 463 na_arg.nm_intr = ptnet_nm_intr; 464 na_arg.nm_register = ptnet_nm_register; 465 na_arg.nm_txsync = ptnet_nm_txsync; 466 na_arg.nm_rxsync = ptnet_nm_rxsync; 467 468 netmap_pt_guest_attach(&na_arg, nifp_offset, 469 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 470 471 /* Now a netmap adapter for this ifp has been allocated, and it 472 * can be accessed through NA(ifp). We also have to initialize the CSB 473 * pointer. */ 474 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 475 476 /* If virtio-net header was negotiated, set the virt_hdr_len field in 477 * the netmap adapter, to inform users that this netmap adapter requires 478 * the application to deal with the headers. */ 479 ptnet_update_vnet_hdr(sc); 480 481 device_printf(dev, "%s() completed\n", __func__); 482 483 return (0); 484 485 err_path: 486 ptnet_detach(dev); 487 return err; 488 } 489 490 /* Stop host sync-kloop if it was running. */ 491 static void 492 ptnet_device_shutdown(struct ptnet_softc *sc) 493 { 494 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 495 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); 496 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); 497 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); 498 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); 499 } 500 501 static int 502 ptnet_detach(device_t dev) 503 { 504 struct ptnet_softc *sc = device_get_softc(dev); 505 int i; 506 507 ptnet_device_shutdown(sc); 508 509 #ifdef DEVICE_POLLING 510 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) { 511 ether_poll_deregister(sc->ifp); 512 } 513 #endif 514 callout_drain(&sc->tick); 515 516 if (sc->queues) { 517 /* Drain taskqueues before calling if_detach. */ 518 for (i = 0; i < sc->num_rings; i++) { 519 struct ptnet_queue *pq = sc->queues + i; 520 521 if (pq->taskq) { 522 taskqueue_drain(pq->taskq, &pq->task); 523 } 524 } 525 } 526 527 if (sc->ifp) { 528 ether_ifdetach(sc->ifp); 529 530 /* Uninitialize netmap adapters for this device. */ 531 netmap_detach(sc->ifp); 532 533 ifmedia_removeall(&sc->media); 534 if_free(sc->ifp); 535 sc->ifp = NULL; 536 } 537 538 ptnet_irqs_fini(sc); 539 540 if (sc->csb_gh) { 541 free(sc->csb_gh, M_DEVBUF); 542 sc->csb_gh = NULL; 543 sc->csb_hg = NULL; 544 } 545 546 if (sc->queues) { 547 for (i = 0; i < sc->num_rings; i++) { 548 struct ptnet_queue *pq = sc->queues + i; 549 550 if (mtx_initialized(&pq->lock)) { 551 mtx_destroy(&pq->lock); 552 } 553 if (pq->bufring != NULL) { 554 buf_ring_free(pq->bufring, M_DEVBUF); 555 } 556 } 557 free(sc->queues, M_DEVBUF); 558 sc->queues = NULL; 559 } 560 561 if (sc->iomem) { 562 bus_release_resource(dev, SYS_RES_IOPORT, 563 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 564 sc->iomem = NULL; 565 } 566 567 mtx_destroy(&sc->lock); 568 569 device_printf(dev, "%s() completed\n", __func__); 570 571 return (0); 572 } 573 574 static int 575 ptnet_suspend(device_t dev) 576 { 577 struct ptnet_softc *sc = device_get_softc(dev); 578 579 (void)sc; 580 581 return (0); 582 } 583 584 static int 585 ptnet_resume(device_t dev) 586 { 587 struct ptnet_softc *sc = device_get_softc(dev); 588 589 (void)sc; 590 591 return (0); 592 } 593 594 static int 595 ptnet_shutdown(device_t dev) 596 { 597 struct ptnet_softc *sc = device_get_softc(dev); 598 599 ptnet_device_shutdown(sc); 600 601 return (0); 602 } 603 604 static int 605 ptnet_irqs_init(struct ptnet_softc *sc) 606 { 607 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 608 int nvecs = sc->num_rings; 609 device_t dev = sc->dev; 610 int err = ENOSPC; 611 int cpu_cur; 612 int i; 613 614 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 615 device_printf(dev, "Could not find MSI-X capability\n"); 616 return (ENXIO); 617 } 618 619 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 620 &rid, RF_ACTIVE); 621 if (sc->msix_mem == NULL) { 622 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 623 return (ENXIO); 624 } 625 626 if (pci_msix_count(dev) < nvecs) { 627 device_printf(dev, "Not enough MSI-X vectors\n"); 628 goto err_path; 629 } 630 631 err = pci_alloc_msix(dev, &nvecs); 632 if (err) { 633 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 634 goto err_path; 635 } 636 637 for (i = 0; i < nvecs; i++) { 638 struct ptnet_queue *pq = sc->queues + i; 639 640 rid = i + 1; 641 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 642 RF_ACTIVE); 643 if (pq->irq == NULL) { 644 device_printf(dev, "Failed to allocate interrupt " 645 "for queue #%d\n", i); 646 err = ENOSPC; 647 goto err_path; 648 } 649 } 650 651 cpu_cur = CPU_FIRST(); 652 for (i = 0; i < nvecs; i++) { 653 struct ptnet_queue *pq = sc->queues + i; 654 void (*handler)(void *) = ptnet_tx_intr; 655 656 if (i >= sc->num_tx_rings) { 657 handler = ptnet_rx_intr; 658 } 659 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 660 NULL /* intr_filter */, handler, 661 pq, &pq->cookie); 662 if (err) { 663 device_printf(dev, "Failed to register intr handler " 664 "for queue #%d\n", i); 665 goto err_path; 666 } 667 668 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 669 #if 0 670 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 671 #endif 672 cpu_cur = CPU_NEXT(cpu_cur); 673 } 674 675 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 676 677 cpu_cur = CPU_FIRST(); 678 for (i = 0; i < nvecs; i++) { 679 struct ptnet_queue *pq = sc->queues + i; 680 681 if (i < sc->num_tx_rings) 682 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq); 683 else 684 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq); 685 686 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 687 taskqueue_thread_enqueue, &pq->taskq); 688 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 689 device_get_nameunit(sc->dev), cpu_cur); 690 cpu_cur = CPU_NEXT(cpu_cur); 691 } 692 693 return 0; 694 err_path: 695 ptnet_irqs_fini(sc); 696 return err; 697 } 698 699 static void 700 ptnet_irqs_fini(struct ptnet_softc *sc) 701 { 702 device_t dev = sc->dev; 703 int i; 704 705 for (i = 0; i < sc->num_rings; i++) { 706 struct ptnet_queue *pq = sc->queues + i; 707 708 if (pq->taskq) { 709 taskqueue_free(pq->taskq); 710 pq->taskq = NULL; 711 } 712 713 if (pq->cookie) { 714 bus_teardown_intr(dev, pq->irq, pq->cookie); 715 pq->cookie = NULL; 716 } 717 718 if (pq->irq) { 719 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 720 pq->irq = NULL; 721 } 722 } 723 724 if (sc->msix_mem) { 725 pci_release_msi(dev); 726 727 bus_release_resource(dev, SYS_RES_MEMORY, 728 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 729 sc->msix_mem); 730 sc->msix_mem = NULL; 731 } 732 } 733 734 static void 735 ptnet_init(void *opaque) 736 { 737 struct ptnet_softc *sc = opaque; 738 739 PTNET_CORE_LOCK(sc); 740 ptnet_init_locked(sc); 741 PTNET_CORE_UNLOCK(sc); 742 } 743 744 static int 745 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 746 { 747 struct ptnet_softc *sc = if_getsoftc(ifp); 748 device_t dev = sc->dev; 749 struct ifreq *ifr = (struct ifreq *)data; 750 int mask __unused, err = 0; 751 752 switch (cmd) { 753 case SIOCSIFFLAGS: 754 device_printf(dev, "SIOCSIFFLAGS %x\n", if_getflags(ifp)); 755 PTNET_CORE_LOCK(sc); 756 if (if_getflags(ifp) & IFF_UP) { 757 /* Network stack wants the iff to be up. */ 758 err = ptnet_init_locked(sc); 759 } else { 760 /* Network stack wants the iff to be down. */ 761 err = ptnet_stop(sc); 762 } 763 /* We don't need to do nothing to support IFF_PROMISC, 764 * since that is managed by the backend port. */ 765 PTNET_CORE_UNLOCK(sc); 766 break; 767 768 case SIOCSIFCAP: 769 device_printf(dev, "SIOCSIFCAP %x %x\n", 770 ifr->ifr_reqcap, if_getcapenable(ifp)); 771 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 772 #ifdef DEVICE_POLLING 773 if (mask & IFCAP_POLLING) { 774 struct ptnet_queue *pq; 775 int i; 776 777 if (ifr->ifr_reqcap & IFCAP_POLLING) { 778 err = ether_poll_register(ptnet_poll, ifp); 779 if (err) { 780 break; 781 } 782 /* Stop queues and sync with taskqueues. */ 783 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 784 for (i = 0; i < sc->num_rings; i++) { 785 pq = sc-> queues + i; 786 /* Make sure the worker sees the 787 * IFF_DRV_RUNNING down. */ 788 PTNET_Q_LOCK(pq); 789 pq->atok->appl_need_kick = 0; 790 PTNET_Q_UNLOCK(pq); 791 /* Wait for rescheduling to finish. */ 792 if (pq->taskq) { 793 taskqueue_drain(pq->taskq, 794 &pq->task); 795 } 796 } 797 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 798 } else { 799 err = ether_poll_deregister(ifp); 800 for (i = 0; i < sc->num_rings; i++) { 801 pq = sc-> queues + i; 802 PTNET_Q_LOCK(pq); 803 pq->atok->appl_need_kick = 1; 804 PTNET_Q_UNLOCK(pq); 805 } 806 } 807 } 808 #endif /* DEVICE_POLLING */ 809 if_setcapenable(ifp, ifr->ifr_reqcap); 810 break; 811 812 case SIOCSIFMTU: 813 /* We support any reasonable MTU. */ 814 if (ifr->ifr_mtu < ETHERMIN || 815 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 816 err = EINVAL; 817 } else { 818 PTNET_CORE_LOCK(sc); 819 if_setmtu(ifp, ifr->ifr_mtu); 820 PTNET_CORE_UNLOCK(sc); 821 } 822 break; 823 824 case SIOCSIFMEDIA: 825 case SIOCGIFMEDIA: 826 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 827 break; 828 829 default: 830 err = ether_ioctl(ifp, cmd, data); 831 break; 832 } 833 834 return err; 835 } 836 837 static int 838 ptnet_init_locked(struct ptnet_softc *sc) 839 { 840 if_t ifp = sc->ifp; 841 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 842 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 843 unsigned int nm_buf_size; 844 int ret; 845 846 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 847 return 0; /* nothing to do */ 848 } 849 850 device_printf(sc->dev, "%s\n", __func__); 851 852 /* Translate offload capabilities according to if_capenable. */ 853 if_sethwassist(ifp, 0); 854 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 855 if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD, 0); 856 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 857 if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD_IPV6, 0); 858 if (if_getcapenable(ifp) & IFCAP_TSO4) 859 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 860 if (if_getcapenable(ifp) & IFCAP_TSO6) 861 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 862 863 /* 864 * Prepare the interface for netmap mode access. 865 */ 866 netmap_update_config(na_dr); 867 868 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 869 if (ret) { 870 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 871 return ret; 872 } 873 874 if (sc->ptna->backend_users == 0) { 875 ret = ptnet_nm_krings_create(na_nm); 876 if (ret) { 877 device_printf(sc->dev, "ptnet_nm_krings_create() " 878 "failed\n"); 879 goto err_mem_finalize; 880 } 881 882 ret = netmap_mem_rings_create(na_dr); 883 if (ret) { 884 device_printf(sc->dev, "netmap_mem_rings_create() " 885 "failed\n"); 886 goto err_rings_create; 887 } 888 889 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 890 if (ret) { 891 device_printf(sc->dev, "netmap_mem_get_lut() " 892 "failed\n"); 893 goto err_get_lut; 894 } 895 } 896 897 ret = ptnet_nm_register(na_dr, 1 /* on */); 898 if (ret) { 899 goto err_register; 900 } 901 902 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 903 904 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 905 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 906 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 907 sc->min_tx_space); 908 #ifdef PTNETMAP_STATS 909 callout_reset(&sc->tick, hz, ptnet_tick, sc); 910 #endif 911 912 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 913 914 return 0; 915 916 err_register: 917 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 918 err_get_lut: 919 netmap_mem_rings_delete(na_dr); 920 err_rings_create: 921 ptnet_nm_krings_delete(na_nm); 922 err_mem_finalize: 923 netmap_mem_deref(na_dr->nm_mem, na_dr); 924 925 return ret; 926 } 927 928 /* To be called under core lock. */ 929 static int 930 ptnet_stop(struct ptnet_softc *sc) 931 { 932 if_t ifp = sc->ifp; 933 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 934 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 935 int i; 936 937 device_printf(sc->dev, "%s\n", __func__); 938 939 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 940 return 0; /* nothing to do */ 941 } 942 943 /* Clear the driver-ready flag, and synchronize with all the queues, 944 * so that after this loop we are sure nobody is working anymore with 945 * the device. This scheme is taken from the vtnet driver. */ 946 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 947 callout_stop(&sc->tick); 948 for (i = 0; i < sc->num_rings; i++) { 949 PTNET_Q_LOCK(sc->queues + i); 950 PTNET_Q_UNLOCK(sc->queues + i); 951 } 952 953 ptnet_nm_register(na_dr, 0 /* off */); 954 955 if (sc->ptna->backend_users == 0) { 956 netmap_mem_rings_delete(na_dr); 957 ptnet_nm_krings_delete(na_nm); 958 } 959 netmap_mem_deref(na_dr->nm_mem, na_dr); 960 961 return 0; 962 } 963 964 static void 965 ptnet_qflush(if_t ifp) 966 { 967 struct ptnet_softc *sc = if_getsoftc(ifp); 968 int i; 969 970 /* Flush all the bufrings and do the interface flush. */ 971 for (i = 0; i < sc->num_rings; i++) { 972 struct ptnet_queue *pq = sc->queues + i; 973 struct mbuf *m; 974 975 PTNET_Q_LOCK(pq); 976 if (pq->bufring) { 977 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 978 m_freem(m); 979 } 980 } 981 PTNET_Q_UNLOCK(pq); 982 } 983 984 if_qflush(ifp); 985 } 986 987 static int 988 ptnet_media_change(if_t ifp) 989 { 990 struct ptnet_softc *sc = if_getsoftc(ifp); 991 struct ifmedia *ifm = &sc->media; 992 993 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 994 return EINVAL; 995 } 996 997 return 0; 998 } 999 1000 static uint64_t 1001 ptnet_get_counter(if_t ifp, ift_counter cnt) 1002 { 1003 struct ptnet_softc *sc = if_getsoftc(ifp); 1004 struct ptnet_queue_stats stats[2]; 1005 int i; 1006 1007 /* Accumulate statistics over the queues. */ 1008 memset(stats, 0, sizeof(stats)); 1009 for (i = 0; i < sc->num_rings; i++) { 1010 struct ptnet_queue *pq = sc->queues + i; 1011 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1012 1013 stats[idx].packets += pq->stats.packets; 1014 stats[idx].bytes += pq->stats.bytes; 1015 stats[idx].errors += pq->stats.errors; 1016 stats[idx].iqdrops += pq->stats.iqdrops; 1017 stats[idx].mcasts += pq->stats.mcasts; 1018 } 1019 1020 switch (cnt) { 1021 case IFCOUNTER_IPACKETS: 1022 return (stats[1].packets); 1023 case IFCOUNTER_IQDROPS: 1024 return (stats[1].iqdrops); 1025 case IFCOUNTER_IERRORS: 1026 return (stats[1].errors); 1027 case IFCOUNTER_OPACKETS: 1028 return (stats[0].packets); 1029 case IFCOUNTER_OBYTES: 1030 return (stats[0].bytes); 1031 case IFCOUNTER_OMCASTS: 1032 return (stats[0].mcasts); 1033 default: 1034 return (if_get_counter_default(ifp, cnt)); 1035 } 1036 } 1037 1038 1039 #ifdef PTNETMAP_STATS 1040 /* Called under core lock. */ 1041 static void 1042 ptnet_tick(void *opaque) 1043 { 1044 struct ptnet_softc *sc = opaque; 1045 int i; 1046 1047 for (i = 0; i < sc->num_rings; i++) { 1048 struct ptnet_queue *pq = sc->queues + i; 1049 struct ptnet_queue_stats cur = pq->stats; 1050 struct timeval now; 1051 unsigned int delta; 1052 1053 microtime(&now); 1054 delta = now.tv_usec - sc->last_ts.tv_usec + 1055 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1056 delta /= 1000; /* in milliseconds */ 1057 1058 if (delta == 0) 1059 continue; 1060 1061 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1062 "intr %lu\n", i, delta, 1063 (cur.packets - pq->last_stats.packets), 1064 (cur.kicks - pq->last_stats.kicks), 1065 (cur.intrs - pq->last_stats.intrs)); 1066 pq->last_stats = cur; 1067 } 1068 microtime(&sc->last_ts); 1069 callout_schedule(&sc->tick, hz); 1070 } 1071 #endif /* PTNETMAP_STATS */ 1072 1073 static void 1074 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1075 { 1076 /* We are always active, as the backend netmap port is 1077 * always open in netmap mode. */ 1078 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1079 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1080 } 1081 1082 static uint32_t 1083 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) 1084 { 1085 /* 1086 * Write a command and read back error status, 1087 * with zero meaning success. 1088 */ 1089 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1090 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1091 } 1092 1093 static int 1094 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) 1095 { 1096 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1097 1098 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1099 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1100 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1101 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1102 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); 1103 1104 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", 1105 info->num_tx_rings, info->num_rx_rings, 1106 info->num_tx_descs, info->num_rx_descs, 1107 info->rx_buf_maxsize); 1108 1109 return 0; 1110 } 1111 1112 static void 1113 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1114 { 1115 int i; 1116 1117 /* Sync krings from the host, reading from 1118 * CSB. */ 1119 for (i = 0; i < sc->num_rings; i++) { 1120 struct nm_csb_atok *atok = sc->queues[i].atok; 1121 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; 1122 struct netmap_kring *kring; 1123 1124 if (i < na->num_tx_rings) { 1125 kring = na->tx_rings[i]; 1126 } else { 1127 kring = na->rx_rings[i - na->num_tx_rings]; 1128 } 1129 kring->rhead = kring->ring->head = atok->head; 1130 kring->rcur = kring->ring->cur = atok->cur; 1131 kring->nr_hwcur = ktoa->hwcur; 1132 kring->nr_hwtail = kring->rtail = 1133 kring->ring->tail = ktoa->hwtail; 1134 1135 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1136 ktoa->hwcur, atok->head, atok->cur, 1137 ktoa->hwtail); 1138 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1139 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1140 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1141 kring->rtail, kring->ring->tail); 1142 } 1143 } 1144 1145 static void 1146 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1147 { 1148 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1149 1150 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1151 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1152 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1153 } 1154 1155 static int 1156 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1157 { 1158 /* device-specific */ 1159 if_t ifp = na->ifp; 1160 struct ptnet_softc *sc = if_getsoftc(ifp); 1161 int native = (na == &sc->ptna->hwup.up); 1162 struct ptnet_queue *pq; 1163 int ret = 0; 1164 int i; 1165 1166 if (!onoff) { 1167 sc->ptna->backend_users--; 1168 } 1169 1170 /* If this is the last netmap client, guest interrupt enable flags may 1171 * be in arbitrary state. Since these flags are going to be used also 1172 * by the netdevice driver, we have to make sure to start with 1173 * notifications enabled. Also, schedule NAPI to flush pending packets 1174 * in the RX rings, since we will not receive further interrupts 1175 * until these will be processed. */ 1176 if (native && !onoff && na->active_fds == 0) { 1177 nm_prinf("Exit netmap mode, re-enable interrupts"); 1178 for (i = 0; i < sc->num_rings; i++) { 1179 pq = sc->queues + i; 1180 pq->atok->appl_need_kick = 1; 1181 } 1182 } 1183 1184 if (onoff) { 1185 if (sc->ptna->backend_users == 0) { 1186 /* Initialize notification enable fields in the CSB. */ 1187 for (i = 0; i < sc->num_rings; i++) { 1188 pq = sc->queues + i; 1189 pq->ktoa->kern_need_kick = 1; 1190 pq->atok->appl_need_kick = 1191 (!(if_getcapenable(ifp) & IFCAP_POLLING) 1192 && i >= sc->num_tx_rings); 1193 } 1194 1195 /* Set the virtio-net header length. */ 1196 ptnet_update_vnet_hdr(sc); 1197 1198 /* Make sure the host adapter passed through is ready 1199 * for txsync/rxsync. */ 1200 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); 1201 if (ret) { 1202 return ret; 1203 } 1204 1205 /* Align the guest krings and rings to the state stored 1206 * in the CSB. */ 1207 ptnet_sync_from_csb(sc, na); 1208 } 1209 1210 /* If not native, don't call nm_set_native_flags, since we don't want 1211 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1212 if (native) { 1213 netmap_krings_mode_commit(na, onoff); 1214 nm_set_native_flags(na); 1215 } 1216 1217 } else { 1218 if (native) { 1219 nm_clear_native_flags(na); 1220 netmap_krings_mode_commit(na, onoff); 1221 } 1222 1223 if (sc->ptna->backend_users == 0) { 1224 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 1225 } 1226 } 1227 1228 if (onoff) { 1229 sc->ptna->backend_users++; 1230 } 1231 1232 return ret; 1233 } 1234 1235 static int 1236 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1237 { 1238 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1239 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1240 bool notify; 1241 1242 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); 1243 if (notify) { 1244 ptnet_kick(pq); 1245 } 1246 1247 return 0; 1248 } 1249 1250 static int 1251 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1252 { 1253 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1254 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1255 bool notify; 1256 1257 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); 1258 if (notify) { 1259 ptnet_kick(pq); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static void 1266 ptnet_nm_intr(struct netmap_adapter *na, int onoff) 1267 { 1268 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1269 int i; 1270 1271 for (i = 0; i < sc->num_rings; i++) { 1272 struct ptnet_queue *pq = sc->queues + i; 1273 pq->atok->appl_need_kick = onoff; 1274 } 1275 } 1276 1277 static void 1278 ptnet_tx_intr(void *opaque) 1279 { 1280 struct ptnet_queue *pq = opaque; 1281 struct ptnet_softc *sc = pq->sc; 1282 1283 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1284 #ifdef PTNETMAP_STATS 1285 pq->stats.intrs ++; 1286 #endif /* PTNETMAP_STATS */ 1287 1288 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1289 return; 1290 } 1291 1292 /* Schedule the tasqueue to flush process transmissions requests. 1293 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1294 * at least when using MSI-X interrupts. The if_em driver, instead 1295 * schedule taskqueue when using legacy interrupts. */ 1296 taskqueue_enqueue(pq->taskq, &pq->task); 1297 } 1298 1299 static void 1300 ptnet_rx_intr(void *opaque) 1301 { 1302 struct ptnet_queue *pq = opaque; 1303 struct ptnet_softc *sc = pq->sc; 1304 unsigned int unused; 1305 1306 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1307 #ifdef PTNETMAP_STATS 1308 pq->stats.intrs ++; 1309 #endif /* PTNETMAP_STATS */ 1310 1311 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1312 return; 1313 } 1314 1315 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1316 * receive-side processing is executed directly in the interrupt 1317 * service routine. Alternatively, we may schedule the taskqueue. */ 1318 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1319 } 1320 1321 static void 1322 ptnet_vlan_tag_remove(struct mbuf *m) 1323 { 1324 struct ether_vlan_header *evh; 1325 1326 evh = mtod(m, struct ether_vlan_header *); 1327 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1328 m->m_flags |= M_VLANTAG; 1329 1330 /* Strip the 802.1Q header. */ 1331 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1332 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1333 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1334 } 1335 1336 static void 1337 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1338 unsigned int head, unsigned int sync_flags) 1339 { 1340 struct netmap_ring *ring = kring->ring; 1341 struct nm_csb_atok *atok = pq->atok; 1342 struct nm_csb_ktoa *ktoa = pq->ktoa; 1343 1344 /* Some packets have been pushed to the netmap ring. We have 1345 * to tell the host to process the new packets, updating cur 1346 * and head in the CSB. */ 1347 ring->head = ring->cur = head; 1348 1349 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1350 kring->rcur = kring->rhead = head; 1351 1352 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 1353 1354 /* Kick the host if needed. */ 1355 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { 1356 atok->sync_flags = sync_flags; 1357 ptnet_kick(pq); 1358 } 1359 } 1360 1361 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1362 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1363 (_k)->rtail - (_h)) < (_min) 1364 1365 /* This function may be called by the network stack, or by 1366 * by the taskqueue thread. */ 1367 static int 1368 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1369 bool may_resched) 1370 { 1371 struct ptnet_softc *sc = pq->sc; 1372 bool have_vnet_hdr = sc->vnet_hdr_len; 1373 struct netmap_adapter *na = &sc->ptna->dr.up; 1374 if_t ifp = sc->ifp; 1375 unsigned int batch_count = 0; 1376 struct nm_csb_atok *atok; 1377 struct nm_csb_ktoa *ktoa; 1378 struct netmap_kring *kring; 1379 struct netmap_ring *ring; 1380 struct netmap_slot *slot; 1381 unsigned int count = 0; 1382 unsigned int minspace; 1383 unsigned int head; 1384 unsigned int lim; 1385 struct mbuf *mhead; 1386 struct mbuf *mf; 1387 int nmbuf_bytes; 1388 uint8_t *nmbuf; 1389 1390 if (!PTNET_Q_TRYLOCK(pq)) { 1391 /* We failed to acquire the lock, schedule the taskqueue. */ 1392 nm_prlim(1, "Deferring TX work"); 1393 if (may_resched) { 1394 taskqueue_enqueue(pq->taskq, &pq->task); 1395 } 1396 1397 return 0; 1398 } 1399 1400 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { 1401 PTNET_Q_UNLOCK(pq); 1402 nm_prlim(1, "Interface is down"); 1403 return ENETDOWN; 1404 } 1405 1406 atok = pq->atok; 1407 ktoa = pq->ktoa; 1408 kring = na->tx_rings[pq->kring_id]; 1409 ring = kring->ring; 1410 lim = kring->nkr_num_slots - 1; 1411 head = ring->head; 1412 minspace = sc->min_tx_space; 1413 1414 while (count < budget) { 1415 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1416 /* We ran out of slot, let's see if the host has 1417 * freed up some, by reading hwcur and hwtail from 1418 * the CSB. */ 1419 ptnet_sync_tail(ktoa, kring); 1420 1421 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1422 /* Still no slots available. Reactivate the 1423 * interrupts so that we can be notified 1424 * when some free slots are made available by 1425 * the host. */ 1426 atok->appl_need_kick = 1; 1427 1428 /* Double check. We need a full barrier to 1429 * prevent the store to atok->appl_need_kick 1430 * to be reordered with the load from 1431 * ktoa->hwcur and ktoa->hwtail (store-load 1432 * barrier). */ 1433 nm_stld_barrier(); 1434 ptnet_sync_tail(ktoa, kring); 1435 if (likely(PTNET_TX_NOSPACE(head, kring, 1436 minspace))) { 1437 break; 1438 } 1439 1440 nm_prlim(1, "Found more slots by doublecheck"); 1441 /* More slots were freed before reactivating 1442 * the interrupts. */ 1443 atok->appl_need_kick = 0; 1444 } 1445 } 1446 1447 mhead = drbr_peek(ifp, pq->bufring); 1448 if (!mhead) { 1449 break; 1450 } 1451 1452 /* Initialize transmission state variables. */ 1453 slot = ring->slot + head; 1454 nmbuf = NMB(na, slot); 1455 nmbuf_bytes = 0; 1456 1457 /* If needed, prepare the virtio-net header at the beginning 1458 * of the first slot. */ 1459 if (have_vnet_hdr) { 1460 struct virtio_net_hdr *vh = 1461 (struct virtio_net_hdr *)nmbuf; 1462 1463 /* For performance, we could replace this memset() with 1464 * two 8-bytes-wide writes. */ 1465 memset(nmbuf, 0, PTNET_HDR_SIZE); 1466 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1467 mhead = virtio_net_tx_offload(ifp, mhead, false, 1468 vh); 1469 if (unlikely(!mhead)) { 1470 /* Packet dropped because errors 1471 * occurred while preparing the vnet 1472 * header. Let's go ahead with the next 1473 * packet. */ 1474 pq->stats.errors ++; 1475 drbr_advance(ifp, pq->bufring); 1476 continue; 1477 } 1478 } 1479 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1480 "csum_start %u csum_ofs %u hdr_len = %u " 1481 "gso_size %u gso_type %x", __func__, 1482 mhead->m_pkthdr.csum_flags, vh->flags, 1483 vh->csum_start, vh->csum_offset, vh->hdr_len, 1484 vh->gso_size, vh->gso_type); 1485 1486 nmbuf += PTNET_HDR_SIZE; 1487 nmbuf_bytes += PTNET_HDR_SIZE; 1488 } 1489 1490 for (mf = mhead; mf; mf = mf->m_next) { 1491 uint8_t *mdata = mf->m_data; 1492 int mlen = mf->m_len; 1493 1494 for (;;) { 1495 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1496 1497 if (mlen < copy) { 1498 copy = mlen; 1499 } 1500 memcpy(nmbuf, mdata, copy); 1501 1502 mdata += copy; 1503 mlen -= copy; 1504 nmbuf += copy; 1505 nmbuf_bytes += copy; 1506 1507 if (!mlen) { 1508 break; 1509 } 1510 1511 slot->len = nmbuf_bytes; 1512 slot->flags = NS_MOREFRAG; 1513 1514 head = nm_next(head, lim); 1515 KASSERT(head != ring->tail, 1516 ("Unexpectedly run out of TX space")); 1517 slot = ring->slot + head; 1518 nmbuf = NMB(na, slot); 1519 nmbuf_bytes = 0; 1520 } 1521 } 1522 1523 /* Complete last slot and update head. */ 1524 slot->len = nmbuf_bytes; 1525 slot->flags = 0; 1526 head = nm_next(head, lim); 1527 1528 /* Consume the packet just processed. */ 1529 drbr_advance(ifp, pq->bufring); 1530 1531 /* Copy the packet to listeners. */ 1532 ETHER_BPF_MTAP(ifp, mhead); 1533 1534 pq->stats.packets ++; 1535 pq->stats.bytes += mhead->m_pkthdr.len; 1536 if (mhead->m_flags & M_MCAST) { 1537 pq->stats.mcasts ++; 1538 } 1539 1540 m_freem(mhead); 1541 1542 count ++; 1543 if (++batch_count == PTNET_TX_BATCH) { 1544 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1545 batch_count = 0; 1546 } 1547 } 1548 1549 if (batch_count) { 1550 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1551 } 1552 1553 if (count >= budget && may_resched) { 1554 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n", 1555 drbr_inuse(ifp, pq->bufring))); 1556 taskqueue_enqueue(pq->taskq, &pq->task); 1557 } 1558 1559 PTNET_Q_UNLOCK(pq); 1560 1561 return count; 1562 } 1563 1564 static int 1565 ptnet_transmit(if_t ifp, struct mbuf *m) 1566 { 1567 struct ptnet_softc *sc = if_getsoftc(ifp); 1568 struct ptnet_queue *pq; 1569 unsigned int queue_idx; 1570 int err; 1571 1572 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1573 1574 /* Insert 802.1Q header if needed. */ 1575 if (m->m_flags & M_VLANTAG) { 1576 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1577 if (m == NULL) { 1578 return ENOBUFS; 1579 } 1580 m->m_flags &= ~M_VLANTAG; 1581 } 1582 1583 /* Get the flow-id if available. */ 1584 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1585 m->m_pkthdr.flowid : curcpu; 1586 1587 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1588 queue_idx %= sc->num_tx_rings; 1589 } 1590 1591 pq = sc->queues + queue_idx; 1592 1593 err = drbr_enqueue(ifp, pq->bufring, m); 1594 if (err) { 1595 /* ENOBUFS when the bufring is full */ 1596 nm_prlim(1, "%s: drbr_enqueue() failed %d\n", 1597 __func__, err); 1598 pq->stats.errors ++; 1599 return err; 1600 } 1601 1602 if (if_getcapenable(ifp) & IFCAP_POLLING) { 1603 /* If polling is on, the transmit queues will be 1604 * drained by the poller. */ 1605 return 0; 1606 } 1607 1608 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1609 1610 return (err < 0) ? err : 0; 1611 } 1612 1613 static unsigned int 1614 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1615 { 1616 struct netmap_ring *ring = kring->ring; 1617 struct netmap_slot *slot = ring->slot + head; 1618 1619 for (;;) { 1620 head = nm_next(head, kring->nkr_num_slots - 1); 1621 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1622 break; 1623 } 1624 slot = ring->slot + head; 1625 } 1626 1627 return head; 1628 } 1629 1630 static inline struct mbuf * 1631 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1632 { 1633 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1634 1635 do { 1636 unsigned int copy; 1637 1638 if (mtail->m_len == MCLBYTES) { 1639 struct mbuf *mf; 1640 1641 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1642 if (unlikely(!mf)) { 1643 return NULL; 1644 } 1645 1646 mtail->m_next = mf; 1647 mtail = mf; 1648 mdata = mtod(mtail, uint8_t *); 1649 mtail->m_len = 0; 1650 } 1651 1652 copy = MCLBYTES - mtail->m_len; 1653 if (nmbuf_len < copy) { 1654 copy = nmbuf_len; 1655 } 1656 1657 memcpy(mdata, nmbuf, copy); 1658 1659 nmbuf += copy; 1660 nmbuf_len -= copy; 1661 mdata += copy; 1662 mtail->m_len += copy; 1663 } while (nmbuf_len); 1664 1665 return mtail; 1666 } 1667 1668 static int 1669 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 1670 { 1671 struct ptnet_softc *sc = pq->sc; 1672 bool have_vnet_hdr = sc->vnet_hdr_len; 1673 struct nm_csb_atok *atok = pq->atok; 1674 struct nm_csb_ktoa *ktoa = pq->ktoa; 1675 struct netmap_adapter *na = &sc->ptna->dr.up; 1676 struct netmap_kring *kring = na->rx_rings[pq->kring_id]; 1677 struct netmap_ring *ring = kring->ring; 1678 unsigned int const lim = kring->nkr_num_slots - 1; 1679 unsigned int batch_count = 0; 1680 if_t ifp = sc->ifp; 1681 unsigned int count = 0; 1682 uint32_t head; 1683 1684 PTNET_Q_LOCK(pq); 1685 1686 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { 1687 goto unlock; 1688 } 1689 1690 kring->nr_kflags &= ~NKR_PENDINTR; 1691 1692 head = ring->head; 1693 while (count < budget) { 1694 uint32_t prev_head = head; 1695 struct mbuf *mhead, *mtail; 1696 struct virtio_net_hdr *vh; 1697 struct netmap_slot *slot; 1698 unsigned int nmbuf_len; 1699 uint8_t *nmbuf; 1700 int deliver = 1; /* the mbuf to the network stack. */ 1701 host_sync: 1702 if (head == ring->tail) { 1703 /* We ran out of slot, let's see if the host has 1704 * added some, by reading hwcur and hwtail from 1705 * the CSB. */ 1706 ptnet_sync_tail(ktoa, kring); 1707 1708 if (head == ring->tail) { 1709 /* Still no slots available. Reactivate 1710 * interrupts as they were disabled by the 1711 * host thread right before issuing the 1712 * last interrupt. */ 1713 atok->appl_need_kick = 1; 1714 1715 /* Double check for more completed RX slots. 1716 * We need a full barrier to prevent the store 1717 * to atok->appl_need_kick to be reordered with 1718 * the load from ktoa->hwcur and ktoa->hwtail 1719 * (store-load barrier). */ 1720 nm_stld_barrier(); 1721 ptnet_sync_tail(ktoa, kring); 1722 if (likely(head == ring->tail)) { 1723 break; 1724 } 1725 atok->appl_need_kick = 0; 1726 } 1727 } 1728 1729 /* Initialize ring state variables, possibly grabbing the 1730 * virtio-net header. */ 1731 slot = ring->slot + head; 1732 nmbuf = NMB(na, slot); 1733 nmbuf_len = slot->len; 1734 1735 vh = (struct virtio_net_hdr *)nmbuf; 1736 if (have_vnet_hdr) { 1737 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 1738 /* There is no good reason why host should 1739 * put the header in multiple netmap slots. 1740 * If this is the case, discard. */ 1741 nm_prlim(1, "Fragmented vnet-hdr: dropping"); 1742 head = ptnet_rx_discard(kring, head); 1743 pq->stats.iqdrops ++; 1744 deliver = 0; 1745 goto skip; 1746 } 1747 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u " 1748 "csum_ofs %u hdr_len = %u gso_size %u " 1749 "gso_type %x", __func__, vh->flags, 1750 vh->csum_start, vh->csum_offset, vh->hdr_len, 1751 vh->gso_size, vh->gso_type); 1752 nmbuf += PTNET_HDR_SIZE; 1753 nmbuf_len -= PTNET_HDR_SIZE; 1754 } 1755 1756 /* Allocate the head of a new mbuf chain. 1757 * We use m_getcl() to allocate an mbuf with standard cluster 1758 * size (MCLBYTES). In the future we could use m_getjcl() 1759 * to choose different sizes. */ 1760 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1761 if (unlikely(mhead == NULL)) { 1762 device_printf(sc->dev, "%s: failed to allocate mbuf " 1763 "head\n", __func__); 1764 pq->stats.errors ++; 1765 break; 1766 } 1767 1768 /* Initialize the mbuf state variables. */ 1769 mhead->m_pkthdr.len = nmbuf_len; 1770 mtail->m_len = 0; 1771 1772 /* Scan all the netmap slots containing the current packet. */ 1773 for (;;) { 1774 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 1775 "len %u, flags %u\n", __func__, 1776 head, ring->tail, slot->len, 1777 slot->flags)); 1778 1779 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 1780 if (unlikely(!mtail)) { 1781 /* Ouch. We ran out of memory while processing 1782 * a packet. We have to restore the previous 1783 * head position, free the mbuf chain, and 1784 * schedule the taskqueue to give the packet 1785 * another chance. */ 1786 device_printf(sc->dev, "%s: failed to allocate" 1787 " mbuf frag, reset head %u --> %u\n", 1788 __func__, head, prev_head); 1789 head = prev_head; 1790 m_freem(mhead); 1791 pq->stats.errors ++; 1792 if (may_resched) { 1793 taskqueue_enqueue(pq->taskq, 1794 &pq->task); 1795 } 1796 goto escape; 1797 } 1798 1799 /* We have to increment head irrespective of the 1800 * NS_MOREFRAG being set or not. */ 1801 head = nm_next(head, lim); 1802 1803 if (!(slot->flags & NS_MOREFRAG)) { 1804 break; 1805 } 1806 1807 if (unlikely(head == ring->tail)) { 1808 /* The very last slot prepared by the host has 1809 * the NS_MOREFRAG set. Drop it and continue 1810 * the outer cycle (to do the double-check). */ 1811 nm_prlim(1, "Incomplete packet: dropping"); 1812 m_freem(mhead); 1813 pq->stats.iqdrops ++; 1814 goto host_sync; 1815 } 1816 1817 slot = ring->slot + head; 1818 nmbuf = NMB(na, slot); 1819 nmbuf_len = slot->len; 1820 mhead->m_pkthdr.len += nmbuf_len; 1821 } 1822 1823 mhead->m_pkthdr.rcvif = ifp; 1824 mhead->m_pkthdr.csum_flags = 0; 1825 1826 /* Store the queue idx in the packet header. */ 1827 mhead->m_pkthdr.flowid = pq->kring_id; 1828 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 1829 1830 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { 1831 struct ether_header *eh; 1832 1833 eh = mtod(mhead, struct ether_header *); 1834 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1835 ptnet_vlan_tag_remove(mhead); 1836 /* 1837 * With the 802.1Q header removed, update the 1838 * checksum starting location accordingly. 1839 */ 1840 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1841 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 1842 } 1843 } 1844 1845 if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) { 1846 m_freem(mhead); 1847 nm_prlim(1, "Csum offload error: dropping"); 1848 pq->stats.iqdrops ++; 1849 deliver = 0; 1850 } 1851 1852 skip: 1853 count ++; 1854 if (++batch_count >= PTNET_RX_BATCH) { 1855 /* Some packets have been (or will be) pushed to the network 1856 * stack. We need to update the CSB to tell the host about 1857 * the new ring->cur and ring->head (RX buffer refill). */ 1858 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 1859 batch_count = 0; 1860 } 1861 1862 if (likely(deliver)) { 1863 pq->stats.packets ++; 1864 pq->stats.bytes += mhead->m_pkthdr.len; 1865 1866 PTNET_Q_UNLOCK(pq); 1867 if_input(ifp, mhead); 1868 PTNET_Q_LOCK(pq); 1869 /* The ring->head index (and related indices) are 1870 * updated under pq lock by ptnet_ring_update(). 1871 * Since we dropped the lock to call if_input(), we 1872 * must reload ring->head and restart processing the 1873 * ring from there. */ 1874 head = ring->head; 1875 1876 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { 1877 /* The interface has gone down while we didn't 1878 * have the lock. Stop any processing and exit. */ 1879 goto unlock; 1880 } 1881 } 1882 } 1883 escape: 1884 if (batch_count) { 1885 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 1886 1887 } 1888 1889 if (count >= budget && may_resched) { 1890 /* If we ran out of budget or the double-check found new 1891 * slots to process, schedule the taskqueue. */ 1892 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n", 1893 head, ring->tail)); 1894 taskqueue_enqueue(pq->taskq, &pq->task); 1895 } 1896 unlock: 1897 PTNET_Q_UNLOCK(pq); 1898 1899 return count; 1900 } 1901 1902 static void 1903 ptnet_rx_task(void *context, int pending) 1904 { 1905 struct ptnet_queue *pq = context; 1906 1907 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 1908 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1909 } 1910 1911 static void 1912 ptnet_tx_task(void *context, int pending) 1913 { 1914 struct ptnet_queue *pq = context; 1915 1916 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 1917 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1918 } 1919 1920 #ifdef DEVICE_POLLING 1921 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 1922 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 1923 static int 1924 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 1925 { 1926 struct ptnet_softc *sc = if_getsoftc(ifp); 1927 unsigned int queue_budget; 1928 unsigned int count = 0; 1929 bool borrow = false; 1930 int i; 1931 1932 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 1933 queue_budget = MAX(budget / sc->num_rings, 1); 1934 nm_prlim(1, "Per-queue budget is %d", queue_budget); 1935 1936 while (budget) { 1937 unsigned int rcnt = 0; 1938 1939 for (i = 0; i < sc->num_rings; i++) { 1940 struct ptnet_queue *pq = sc->queues + i; 1941 1942 if (borrow) { 1943 queue_budget = MIN(queue_budget, budget); 1944 if (queue_budget == 0) { 1945 break; 1946 } 1947 } 1948 1949 if (i < sc->num_tx_rings) { 1950 rcnt += ptnet_drain_transmit_queue(pq, 1951 queue_budget, false); 1952 } else { 1953 rcnt += ptnet_rx_eof(pq, queue_budget, 1954 false); 1955 } 1956 } 1957 1958 if (!rcnt) { 1959 /* A scan of the queues gave no result, we can 1960 * stop here. */ 1961 break; 1962 } 1963 1964 if (rcnt > budget) { 1965 /* This may happen when initial budget < sc->num_rings, 1966 * since one packet budget is given to each queue 1967 * anyway. Just pretend we didn't eat "so much". */ 1968 rcnt = budget; 1969 } 1970 count += rcnt; 1971 budget -= rcnt; 1972 borrow = true; 1973 } 1974 1975 1976 return count; 1977 } 1978 #endif /* DEVICE_POLLING */ 1979 #endif /* WITH_PTNETMAP */ 1980