1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Driver for ptnet paravirtualized network device. */ 30 31 #include <sys/cdefs.h> 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/taskqueue.h> 46 #include <sys/smp.h> 47 #include <sys/time.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 #include <net/bpf.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet/udp.h> 70 #include <netinet/tcp.h> 71 72 #include <machine/bus.h> 73 #include <machine/resource.h> 74 #include <sys/bus.h> 75 #include <sys/rman.h> 76 77 #include <dev/pci/pcivar.h> 78 #include <dev/pci/pcireg.h> 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 83 #include <sys/selinfo.h> 84 #include <net/netmap.h> 85 #include <dev/netmap/netmap_kern.h> 86 #include <net/netmap_virt.h> 87 #include <dev/netmap/netmap_mem2.h> 88 #include <dev/virtio/network/virtio_net.h> 89 90 #ifdef WITH_PTNETMAP 91 92 #ifndef INET 93 #error "INET not defined, cannot support offloadings" 94 #endif 95 96 static uint64_t ptnet_get_counter(if_t, ift_counter); 97 98 //#define PTNETMAP_STATS 99 //#define DEBUG 100 #ifdef DEBUG 101 #define DBG(x) x 102 #else /* !DEBUG */ 103 #define DBG(x) 104 #endif /* !DEBUG */ 105 106 extern int ptnet_vnet_hdr; /* Tunable parameter */ 107 108 struct ptnet_softc; 109 110 struct ptnet_queue_stats { 111 uint64_t packets; /* if_[io]packets */ 112 uint64_t bytes; /* if_[io]bytes */ 113 uint64_t errors; /* if_[io]errors */ 114 uint64_t iqdrops; /* if_iqdrops */ 115 uint64_t mcasts; /* if_[io]mcasts */ 116 #ifdef PTNETMAP_STATS 117 uint64_t intrs; 118 uint64_t kicks; 119 #endif /* PTNETMAP_STATS */ 120 }; 121 122 struct ptnet_queue { 123 struct ptnet_softc *sc; 124 struct resource *irq; 125 void *cookie; 126 int kring_id; 127 struct nm_csb_atok *atok; 128 struct nm_csb_ktoa *ktoa; 129 unsigned int kick; 130 struct mtx lock; 131 struct buf_ring *bufring; /* for TX queues */ 132 struct ptnet_queue_stats stats; 133 #ifdef PTNETMAP_STATS 134 struct ptnet_queue_stats last_stats; 135 #endif /* PTNETMAP_STATS */ 136 struct taskqueue *taskq; 137 struct task task; 138 char lock_name[16]; 139 }; 140 141 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 142 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 143 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 144 145 struct ptnet_softc { 146 device_t dev; 147 if_t ifp; 148 struct ifmedia media; 149 struct mtx lock; 150 char lock_name[16]; 151 char hwaddr[ETHER_ADDR_LEN]; 152 153 /* Mirror of PTFEAT register. */ 154 uint32_t ptfeatures; 155 unsigned int vnet_hdr_len; 156 157 /* PCI BARs support. */ 158 struct resource *iomem; 159 struct resource *msix_mem; 160 161 unsigned int num_rings; 162 unsigned int num_tx_rings; 163 struct ptnet_queue *queues; 164 struct ptnet_queue *rxqueues; 165 struct nm_csb_atok *csb_gh; 166 struct nm_csb_ktoa *csb_hg; 167 168 unsigned int min_tx_space; 169 170 struct netmap_pt_guest_adapter *ptna; 171 172 struct callout tick; 173 #ifdef PTNETMAP_STATS 174 struct timeval last_ts; 175 #endif /* PTNETMAP_STATS */ 176 }; 177 178 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 179 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 180 181 static int ptnet_probe(device_t); 182 static int ptnet_attach(device_t); 183 static int ptnet_detach(device_t); 184 static int ptnet_suspend(device_t); 185 static int ptnet_resume(device_t); 186 static int ptnet_shutdown(device_t); 187 188 static void ptnet_init(void *opaque); 189 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 190 static int ptnet_init_locked(struct ptnet_softc *sc); 191 static int ptnet_stop(struct ptnet_softc *sc); 192 static int ptnet_transmit(if_t ifp, struct mbuf *m); 193 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 194 unsigned int budget, 195 bool may_resched); 196 static void ptnet_qflush(if_t ifp); 197 static void ptnet_tx_task(void *context, int pending); 198 199 static int ptnet_media_change(if_t ifp); 200 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 201 #ifdef PTNETMAP_STATS 202 static void ptnet_tick(void *opaque); 203 #endif 204 205 static int ptnet_irqs_init(struct ptnet_softc *sc); 206 static void ptnet_irqs_fini(struct ptnet_softc *sc); 207 208 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); 209 static int ptnet_nm_config(struct netmap_adapter *na, 210 struct nm_config_info *info); 211 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 212 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 213 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 214 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 215 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); 216 217 static void ptnet_tx_intr(void *opaque); 218 static void ptnet_rx_intr(void *opaque); 219 220 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 221 unsigned int head); 222 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 223 bool may_resched); 224 static void ptnet_rx_task(void *context, int pending); 225 226 #ifdef DEVICE_POLLING 227 static poll_handler_t ptnet_poll; 228 #endif 229 230 static device_method_t ptnet_methods[] = { 231 DEVMETHOD(device_probe, ptnet_probe), 232 DEVMETHOD(device_attach, ptnet_attach), 233 DEVMETHOD(device_detach, ptnet_detach), 234 DEVMETHOD(device_suspend, ptnet_suspend), 235 DEVMETHOD(device_resume, ptnet_resume), 236 DEVMETHOD(device_shutdown, ptnet_shutdown), 237 DEVMETHOD_END 238 }; 239 240 static driver_t ptnet_driver = { 241 "ptnet", 242 ptnet_methods, 243 sizeof(struct ptnet_softc) 244 }; 245 246 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 247 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, NULL, NULL, 248 SI_ORDER_MIDDLE + 2); 249 250 static int 251 ptnet_probe(device_t dev) 252 { 253 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 254 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 255 return (ENXIO); 256 } 257 258 device_set_desc(dev, "ptnet network adapter"); 259 260 return (BUS_PROBE_DEFAULT); 261 } 262 263 static inline void ptnet_kick(struct ptnet_queue *pq) 264 { 265 #ifdef PTNETMAP_STATS 266 pq->stats.kicks ++; 267 #endif /* PTNETMAP_STATS */ 268 bus_write_4(pq->sc->iomem, pq->kick, 0); 269 } 270 271 #define PTNET_BUF_RING_SIZE 4096 272 #define PTNET_RX_BUDGET 512 273 #define PTNET_RX_BATCH 1 274 #define PTNET_TX_BUDGET 512 275 #define PTNET_TX_BATCH 64 276 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 277 #define PTNET_MAX_PKT_SIZE 65536 278 279 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) 280 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 281 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 282 PTNET_CSUM_OFFLOAD_IPV6) 283 284 static int 285 ptnet_attach(device_t dev) 286 { 287 uint32_t ptfeatures = 0; 288 unsigned int num_rx_rings, num_tx_rings; 289 struct netmap_adapter na_arg; 290 unsigned int nifp_offset; 291 struct ptnet_softc *sc; 292 if_t ifp; 293 uint32_t macreg; 294 int err, rid; 295 int i; 296 297 sc = device_get_softc(dev); 298 sc->dev = dev; 299 300 /* Setup PCI resources. */ 301 pci_enable_busmaster(dev); 302 303 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 304 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 305 RF_ACTIVE); 306 if (sc->iomem == NULL) { 307 device_printf(dev, "Failed to map I/O BAR\n"); 308 return (ENXIO); 309 } 310 311 /* Negotiate features with the hypervisor. */ 312 if (ptnet_vnet_hdr) { 313 ptfeatures |= PTNETMAP_F_VNET_HDR; 314 } 315 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 316 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 317 sc->ptfeatures = ptfeatures; 318 319 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 320 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 321 sc->num_rings = num_tx_rings + num_rx_rings; 322 sc->num_tx_rings = num_tx_rings; 323 324 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { 325 device_printf(dev, "CSB cannot handle that many rings (%u)\n", 326 sc->num_rings); 327 err = ENOMEM; 328 goto err_path; 329 } 330 331 /* Allocate CSB and carry out CSB allocation protocol. */ 332 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 333 (size_t)0, -1UL, PAGE_SIZE, 0); 334 if (sc->csb_gh == NULL) { 335 device_printf(dev, "Failed to allocate CSB\n"); 336 err = ENOMEM; 337 goto err_path; 338 } 339 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); 340 341 { 342 /* 343 * We use uint64_t rather than vm_paddr_t since we 344 * need 64 bit addresses even on 32 bit platforms. 345 */ 346 uint64_t paddr = vtophys(sc->csb_gh); 347 348 /* CSB allocation protocol: write to BAH first, then 349 * to BAL (for both GH and HG sections). */ 350 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 351 (paddr >> 32) & 0xffffffff); 352 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 353 paddr & 0xffffffff); 354 paddr = vtophys(sc->csb_hg); 355 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 356 (paddr >> 32) & 0xffffffff); 357 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 358 paddr & 0xffffffff); 359 } 360 361 /* Allocate and initialize per-queue data structures. */ 362 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 363 M_DEVBUF, M_NOWAIT | M_ZERO); 364 if (sc->queues == NULL) { 365 err = ENOMEM; 366 goto err_path; 367 } 368 sc->rxqueues = sc->queues + num_tx_rings; 369 370 for (i = 0; i < sc->num_rings; i++) { 371 struct ptnet_queue *pq = sc->queues + i; 372 373 pq->sc = sc; 374 pq->kring_id = i; 375 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 376 pq->atok = sc->csb_gh + i; 377 pq->ktoa = sc->csb_hg + i; 378 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 379 device_get_nameunit(dev), i); 380 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 381 if (i >= num_tx_rings) { 382 /* RX queue: fix kring_id. */ 383 pq->kring_id -= num_tx_rings; 384 } else { 385 /* TX queue: allocate buf_ring. */ 386 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 387 M_DEVBUF, M_NOWAIT, &pq->lock); 388 if (pq->bufring == NULL) { 389 err = ENOMEM; 390 goto err_path; 391 } 392 } 393 } 394 395 sc->min_tx_space = 64; /* Safe initial value. */ 396 397 err = ptnet_irqs_init(sc); 398 if (err) { 399 goto err_path; 400 } 401 402 /* Setup Ethernet interface. */ 403 sc->ifp = ifp = if_alloc(IFT_ETHER); 404 if (ifp == NULL) { 405 device_printf(dev, "Failed to allocate ifnet\n"); 406 err = ENOMEM; 407 goto err_path; 408 } 409 410 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 411 if_setbaudrate(ifp, IF_Gbps(10)); 412 if_setsoftc(ifp, sc); 413 if_setflags(ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX); 414 if_setinitfn(ifp, ptnet_init); 415 if_setioctlfn(ifp, ptnet_ioctl); 416 if_setget_counter(ifp, ptnet_get_counter); 417 if_settransmitfn(ifp, ptnet_transmit); 418 if_setqflushfn(ifp, ptnet_qflush); 419 420 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 421 ptnet_media_status); 422 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 423 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 424 425 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 426 sc->hwaddr[0] = (macreg >> 8) & 0xff; 427 sc->hwaddr[1] = macreg & 0xff; 428 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 429 sc->hwaddr[2] = (macreg >> 24) & 0xff; 430 sc->hwaddr[3] = (macreg >> 16) & 0xff; 431 sc->hwaddr[4] = (macreg >> 8) & 0xff; 432 sc->hwaddr[5] = macreg & 0xff; 433 434 ether_ifattach(ifp, sc->hwaddr); 435 436 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 437 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU, 0); 438 439 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 440 /* Similarly to what the vtnet driver does, we can emulate 441 * VLAN offloadings by inserting and removing the 802.1Q 442 * header during transmit and receive. We are then able 443 * to do checksum offloading of VLAN frames. */ 444 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 445 | IFCAP_VLAN_HWCSUM 446 | IFCAP_TSO | IFCAP_LRO 447 | IFCAP_VLAN_HWTSO 448 | IFCAP_VLAN_HWTAGGING, 0); 449 } 450 451 if_setcapenable(ifp, if_getcapabilities(ifp)); 452 #ifdef DEVICE_POLLING 453 /* Don't enable polling by default. */ 454 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 455 #endif 456 snprintf(sc->lock_name, sizeof(sc->lock_name), 457 "%s", device_get_nameunit(dev)); 458 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 459 callout_init_mtx(&sc->tick, &sc->lock, 0); 460 461 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 462 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 463 memset(&na_arg, 0, sizeof(na_arg)); 464 na_arg.ifp = ifp; 465 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 466 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 467 na_arg.num_tx_rings = num_tx_rings; 468 na_arg.num_rx_rings = num_rx_rings; 469 na_arg.nm_config = ptnet_nm_config; 470 na_arg.nm_krings_create = ptnet_nm_krings_create; 471 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 472 na_arg.nm_dtor = ptnet_nm_dtor; 473 na_arg.nm_intr = ptnet_nm_intr; 474 na_arg.nm_register = ptnet_nm_register; 475 na_arg.nm_txsync = ptnet_nm_txsync; 476 na_arg.nm_rxsync = ptnet_nm_rxsync; 477 478 netmap_pt_guest_attach(&na_arg, nifp_offset, 479 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 480 481 /* Now a netmap adapter for this ifp has been allocated, and it 482 * can be accessed through NA(ifp). We also have to initialize the CSB 483 * pointer. */ 484 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 485 486 /* If virtio-net header was negotiated, set the virt_hdr_len field in 487 * the netmap adapter, to inform users that this netmap adapter requires 488 * the application to deal with the headers. */ 489 ptnet_update_vnet_hdr(sc); 490 491 device_printf(dev, "%s() completed\n", __func__); 492 493 return (0); 494 495 err_path: 496 ptnet_detach(dev); 497 return err; 498 } 499 500 /* Stop host sync-kloop if it was running. */ 501 static void 502 ptnet_device_shutdown(struct ptnet_softc *sc) 503 { 504 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 505 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); 506 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); 507 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); 508 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); 509 } 510 511 static int 512 ptnet_detach(device_t dev) 513 { 514 struct ptnet_softc *sc = device_get_softc(dev); 515 int i; 516 517 ptnet_device_shutdown(sc); 518 519 #ifdef DEVICE_POLLING 520 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) { 521 ether_poll_deregister(sc->ifp); 522 } 523 #endif 524 callout_drain(&sc->tick); 525 526 if (sc->queues) { 527 /* Drain taskqueues before calling if_detach. */ 528 for (i = 0; i < sc->num_rings; i++) { 529 struct ptnet_queue *pq = sc->queues + i; 530 531 if (pq->taskq) { 532 taskqueue_drain(pq->taskq, &pq->task); 533 } 534 } 535 } 536 537 if (sc->ifp) { 538 ether_ifdetach(sc->ifp); 539 540 /* Uninitialize netmap adapters for this device. */ 541 netmap_detach(sc->ifp); 542 543 ifmedia_removeall(&sc->media); 544 if_free(sc->ifp); 545 sc->ifp = NULL; 546 } 547 548 ptnet_irqs_fini(sc); 549 550 if (sc->csb_gh) { 551 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); 552 sc->csb_gh = NULL; 553 sc->csb_hg = NULL; 554 } 555 556 if (sc->queues) { 557 for (i = 0; i < sc->num_rings; i++) { 558 struct ptnet_queue *pq = sc->queues + i; 559 560 if (mtx_initialized(&pq->lock)) { 561 mtx_destroy(&pq->lock); 562 } 563 if (pq->bufring != NULL) { 564 buf_ring_free(pq->bufring, M_DEVBUF); 565 } 566 } 567 free(sc->queues, M_DEVBUF); 568 sc->queues = NULL; 569 } 570 571 if (sc->iomem) { 572 bus_release_resource(dev, SYS_RES_IOPORT, 573 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 574 sc->iomem = NULL; 575 } 576 577 mtx_destroy(&sc->lock); 578 579 device_printf(dev, "%s() completed\n", __func__); 580 581 return (0); 582 } 583 584 static int 585 ptnet_suspend(device_t dev) 586 { 587 struct ptnet_softc *sc = device_get_softc(dev); 588 589 (void)sc; 590 591 return (0); 592 } 593 594 static int 595 ptnet_resume(device_t dev) 596 { 597 struct ptnet_softc *sc = device_get_softc(dev); 598 599 (void)sc; 600 601 return (0); 602 } 603 604 static int 605 ptnet_shutdown(device_t dev) 606 { 607 struct ptnet_softc *sc = device_get_softc(dev); 608 609 ptnet_device_shutdown(sc); 610 611 return (0); 612 } 613 614 static int 615 ptnet_irqs_init(struct ptnet_softc *sc) 616 { 617 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 618 int nvecs = sc->num_rings; 619 device_t dev = sc->dev; 620 int err = ENOSPC; 621 int cpu_cur; 622 int i; 623 624 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 625 device_printf(dev, "Could not find MSI-X capability\n"); 626 return (ENXIO); 627 } 628 629 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 630 &rid, RF_ACTIVE); 631 if (sc->msix_mem == NULL) { 632 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 633 return (ENXIO); 634 } 635 636 if (pci_msix_count(dev) < nvecs) { 637 device_printf(dev, "Not enough MSI-X vectors\n"); 638 goto err_path; 639 } 640 641 err = pci_alloc_msix(dev, &nvecs); 642 if (err) { 643 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 644 goto err_path; 645 } 646 647 for (i = 0; i < nvecs; i++) { 648 struct ptnet_queue *pq = sc->queues + i; 649 650 rid = i + 1; 651 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 652 RF_ACTIVE); 653 if (pq->irq == NULL) { 654 device_printf(dev, "Failed to allocate interrupt " 655 "for queue #%d\n", i); 656 err = ENOSPC; 657 goto err_path; 658 } 659 } 660 661 cpu_cur = CPU_FIRST(); 662 for (i = 0; i < nvecs; i++) { 663 struct ptnet_queue *pq = sc->queues + i; 664 void (*handler)(void *) = ptnet_tx_intr; 665 666 if (i >= sc->num_tx_rings) { 667 handler = ptnet_rx_intr; 668 } 669 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 670 NULL /* intr_filter */, handler, 671 pq, &pq->cookie); 672 if (err) { 673 device_printf(dev, "Failed to register intr handler " 674 "for queue #%d\n", i); 675 goto err_path; 676 } 677 678 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 679 #if 0 680 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 681 #endif 682 cpu_cur = CPU_NEXT(cpu_cur); 683 } 684 685 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 686 687 cpu_cur = CPU_FIRST(); 688 for (i = 0; i < nvecs; i++) { 689 struct ptnet_queue *pq = sc->queues + i; 690 691 if (i < sc->num_tx_rings) 692 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq); 693 else 694 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq); 695 696 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 697 taskqueue_thread_enqueue, &pq->taskq); 698 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 699 device_get_nameunit(sc->dev), cpu_cur); 700 cpu_cur = CPU_NEXT(cpu_cur); 701 } 702 703 return 0; 704 err_path: 705 ptnet_irqs_fini(sc); 706 return err; 707 } 708 709 static void 710 ptnet_irqs_fini(struct ptnet_softc *sc) 711 { 712 device_t dev = sc->dev; 713 int i; 714 715 for (i = 0; i < sc->num_rings; i++) { 716 struct ptnet_queue *pq = sc->queues + i; 717 718 if (pq->taskq) { 719 taskqueue_free(pq->taskq); 720 pq->taskq = NULL; 721 } 722 723 if (pq->cookie) { 724 bus_teardown_intr(dev, pq->irq, pq->cookie); 725 pq->cookie = NULL; 726 } 727 728 if (pq->irq) { 729 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 730 pq->irq = NULL; 731 } 732 } 733 734 if (sc->msix_mem) { 735 pci_release_msi(dev); 736 737 bus_release_resource(dev, SYS_RES_MEMORY, 738 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 739 sc->msix_mem); 740 sc->msix_mem = NULL; 741 } 742 } 743 744 static void 745 ptnet_init(void *opaque) 746 { 747 struct ptnet_softc *sc = opaque; 748 749 PTNET_CORE_LOCK(sc); 750 ptnet_init_locked(sc); 751 PTNET_CORE_UNLOCK(sc); 752 } 753 754 static int 755 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 756 { 757 struct ptnet_softc *sc = if_getsoftc(ifp); 758 device_t dev = sc->dev; 759 struct ifreq *ifr = (struct ifreq *)data; 760 int mask __unused, err = 0; 761 762 switch (cmd) { 763 case SIOCSIFFLAGS: 764 device_printf(dev, "SIOCSIFFLAGS %x\n", if_getflags(ifp)); 765 PTNET_CORE_LOCK(sc); 766 if (if_getflags(ifp) & IFF_UP) { 767 /* Network stack wants the iff to be up. */ 768 err = ptnet_init_locked(sc); 769 } else { 770 /* Network stack wants the iff to be down. */ 771 err = ptnet_stop(sc); 772 } 773 /* We don't need to do nothing to support IFF_PROMISC, 774 * since that is managed by the backend port. */ 775 PTNET_CORE_UNLOCK(sc); 776 break; 777 778 case SIOCSIFCAP: 779 device_printf(dev, "SIOCSIFCAP %x %x\n", 780 ifr->ifr_reqcap, if_getcapenable(ifp)); 781 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 782 #ifdef DEVICE_POLLING 783 if (mask & IFCAP_POLLING) { 784 struct ptnet_queue *pq; 785 int i; 786 787 if (ifr->ifr_reqcap & IFCAP_POLLING) { 788 err = ether_poll_register(ptnet_poll, ifp); 789 if (err) { 790 break; 791 } 792 /* Stop queues and sync with taskqueues. */ 793 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 794 for (i = 0; i < sc->num_rings; i++) { 795 pq = sc-> queues + i; 796 /* Make sure the worker sees the 797 * IFF_DRV_RUNNING down. */ 798 PTNET_Q_LOCK(pq); 799 pq->atok->appl_need_kick = 0; 800 PTNET_Q_UNLOCK(pq); 801 /* Wait for rescheduling to finish. */ 802 if (pq->taskq) { 803 taskqueue_drain(pq->taskq, 804 &pq->task); 805 } 806 } 807 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 808 } else { 809 err = ether_poll_deregister(ifp); 810 for (i = 0; i < sc->num_rings; i++) { 811 pq = sc-> queues + i; 812 PTNET_Q_LOCK(pq); 813 pq->atok->appl_need_kick = 1; 814 PTNET_Q_UNLOCK(pq); 815 } 816 } 817 } 818 #endif /* DEVICE_POLLING */ 819 if_setcapenable(ifp, ifr->ifr_reqcap); 820 break; 821 822 case SIOCSIFMTU: 823 /* We support any reasonable MTU. */ 824 if (ifr->ifr_mtu < ETHERMIN || 825 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 826 err = EINVAL; 827 } else { 828 PTNET_CORE_LOCK(sc); 829 if_setmtu(ifp, ifr->ifr_mtu); 830 PTNET_CORE_UNLOCK(sc); 831 } 832 break; 833 834 case SIOCSIFMEDIA: 835 case SIOCGIFMEDIA: 836 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 837 break; 838 839 default: 840 err = ether_ioctl(ifp, cmd, data); 841 break; 842 } 843 844 return err; 845 } 846 847 static int 848 ptnet_init_locked(struct ptnet_softc *sc) 849 { 850 if_t ifp = sc->ifp; 851 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 852 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 853 unsigned int nm_buf_size; 854 int ret; 855 856 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 857 return 0; /* nothing to do */ 858 } 859 860 device_printf(sc->dev, "%s\n", __func__); 861 862 /* Translate offload capabilities according to if_capenable. */ 863 if_sethwassist(ifp, 0); 864 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 865 if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD, 0); 866 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 867 if_sethwassistbits(ifp, PTNET_CSUM_OFFLOAD_IPV6, 0); 868 if (if_getcapenable(ifp) & IFCAP_TSO4) 869 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 870 if (if_getcapenable(ifp) & IFCAP_TSO6) 871 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 872 873 /* 874 * Prepare the interface for netmap mode access. 875 */ 876 netmap_update_config(na_dr); 877 878 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 879 if (ret) { 880 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 881 return ret; 882 } 883 884 if (sc->ptna->backend_users == 0) { 885 ret = ptnet_nm_krings_create(na_nm); 886 if (ret) { 887 device_printf(sc->dev, "ptnet_nm_krings_create() " 888 "failed\n"); 889 goto err_mem_finalize; 890 } 891 892 ret = netmap_mem_rings_create(na_dr); 893 if (ret) { 894 device_printf(sc->dev, "netmap_mem_rings_create() " 895 "failed\n"); 896 goto err_rings_create; 897 } 898 899 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 900 if (ret) { 901 device_printf(sc->dev, "netmap_mem_get_lut() " 902 "failed\n"); 903 goto err_get_lut; 904 } 905 } 906 907 ret = ptnet_nm_register(na_dr, 1 /* on */); 908 if (ret) { 909 goto err_register; 910 } 911 912 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 913 914 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 915 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 916 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 917 sc->min_tx_space); 918 #ifdef PTNETMAP_STATS 919 callout_reset(&sc->tick, hz, ptnet_tick, sc); 920 #endif 921 922 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 923 924 return 0; 925 926 err_register: 927 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 928 err_get_lut: 929 netmap_mem_rings_delete(na_dr); 930 err_rings_create: 931 ptnet_nm_krings_delete(na_nm); 932 err_mem_finalize: 933 netmap_mem_deref(na_dr->nm_mem, na_dr); 934 935 return ret; 936 } 937 938 /* To be called under core lock. */ 939 static int 940 ptnet_stop(struct ptnet_softc *sc) 941 { 942 if_t ifp = sc->ifp; 943 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 944 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 945 int i; 946 947 device_printf(sc->dev, "%s\n", __func__); 948 949 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 950 return 0; /* nothing to do */ 951 } 952 953 /* Clear the driver-ready flag, and synchronize with all the queues, 954 * so that after this loop we are sure nobody is working anymore with 955 * the device. This scheme is taken from the vtnet driver. */ 956 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 957 callout_stop(&sc->tick); 958 for (i = 0; i < sc->num_rings; i++) { 959 PTNET_Q_LOCK(sc->queues + i); 960 PTNET_Q_UNLOCK(sc->queues + i); 961 } 962 963 ptnet_nm_register(na_dr, 0 /* off */); 964 965 if (sc->ptna->backend_users == 0) { 966 netmap_mem_rings_delete(na_dr); 967 ptnet_nm_krings_delete(na_nm); 968 } 969 netmap_mem_deref(na_dr->nm_mem, na_dr); 970 971 return 0; 972 } 973 974 static void 975 ptnet_qflush(if_t ifp) 976 { 977 struct ptnet_softc *sc = if_getsoftc(ifp); 978 int i; 979 980 /* Flush all the bufrings and do the interface flush. */ 981 for (i = 0; i < sc->num_rings; i++) { 982 struct ptnet_queue *pq = sc->queues + i; 983 struct mbuf *m; 984 985 PTNET_Q_LOCK(pq); 986 if (pq->bufring) { 987 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 988 m_freem(m); 989 } 990 } 991 PTNET_Q_UNLOCK(pq); 992 } 993 994 if_qflush(ifp); 995 } 996 997 static int 998 ptnet_media_change(if_t ifp) 999 { 1000 struct ptnet_softc *sc = if_getsoftc(ifp); 1001 struct ifmedia *ifm = &sc->media; 1002 1003 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1004 return EINVAL; 1005 } 1006 1007 return 0; 1008 } 1009 1010 static uint64_t 1011 ptnet_get_counter(if_t ifp, ift_counter cnt) 1012 { 1013 struct ptnet_softc *sc = if_getsoftc(ifp); 1014 struct ptnet_queue_stats stats[2]; 1015 int i; 1016 1017 /* Accumulate statistics over the queues. */ 1018 memset(stats, 0, sizeof(stats)); 1019 for (i = 0; i < sc->num_rings; i++) { 1020 struct ptnet_queue *pq = sc->queues + i; 1021 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1022 1023 stats[idx].packets += pq->stats.packets; 1024 stats[idx].bytes += pq->stats.bytes; 1025 stats[idx].errors += pq->stats.errors; 1026 stats[idx].iqdrops += pq->stats.iqdrops; 1027 stats[idx].mcasts += pq->stats.mcasts; 1028 } 1029 1030 switch (cnt) { 1031 case IFCOUNTER_IPACKETS: 1032 return (stats[1].packets); 1033 case IFCOUNTER_IQDROPS: 1034 return (stats[1].iqdrops); 1035 case IFCOUNTER_IERRORS: 1036 return (stats[1].errors); 1037 case IFCOUNTER_OPACKETS: 1038 return (stats[0].packets); 1039 case IFCOUNTER_OBYTES: 1040 return (stats[0].bytes); 1041 case IFCOUNTER_OMCASTS: 1042 return (stats[0].mcasts); 1043 default: 1044 return (if_get_counter_default(ifp, cnt)); 1045 } 1046 } 1047 1048 1049 #ifdef PTNETMAP_STATS 1050 /* Called under core lock. */ 1051 static void 1052 ptnet_tick(void *opaque) 1053 { 1054 struct ptnet_softc *sc = opaque; 1055 int i; 1056 1057 for (i = 0; i < sc->num_rings; i++) { 1058 struct ptnet_queue *pq = sc->queues + i; 1059 struct ptnet_queue_stats cur = pq->stats; 1060 struct timeval now; 1061 unsigned int delta; 1062 1063 microtime(&now); 1064 delta = now.tv_usec - sc->last_ts.tv_usec + 1065 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1066 delta /= 1000; /* in milliseconds */ 1067 1068 if (delta == 0) 1069 continue; 1070 1071 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1072 "intr %lu\n", i, delta, 1073 (cur.packets - pq->last_stats.packets), 1074 (cur.kicks - pq->last_stats.kicks), 1075 (cur.intrs - pq->last_stats.intrs)); 1076 pq->last_stats = cur; 1077 } 1078 microtime(&sc->last_ts); 1079 callout_schedule(&sc->tick, hz); 1080 } 1081 #endif /* PTNETMAP_STATS */ 1082 1083 static void 1084 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1085 { 1086 /* We are always active, as the backend netmap port is 1087 * always open in netmap mode. */ 1088 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1089 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1090 } 1091 1092 static uint32_t 1093 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) 1094 { 1095 /* 1096 * Write a command and read back error status, 1097 * with zero meaning success. 1098 */ 1099 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1100 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1101 } 1102 1103 static int 1104 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) 1105 { 1106 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1107 1108 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1109 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1110 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1111 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1112 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); 1113 1114 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", 1115 info->num_tx_rings, info->num_rx_rings, 1116 info->num_tx_descs, info->num_rx_descs, 1117 info->rx_buf_maxsize); 1118 1119 return 0; 1120 } 1121 1122 static void 1123 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1124 { 1125 int i; 1126 1127 /* Sync krings from the host, reading from 1128 * CSB. */ 1129 for (i = 0; i < sc->num_rings; i++) { 1130 struct nm_csb_atok *atok = sc->queues[i].atok; 1131 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; 1132 struct netmap_kring *kring; 1133 1134 if (i < na->num_tx_rings) { 1135 kring = na->tx_rings[i]; 1136 } else { 1137 kring = na->rx_rings[i - na->num_tx_rings]; 1138 } 1139 kring->rhead = kring->ring->head = atok->head; 1140 kring->rcur = kring->ring->cur = atok->cur; 1141 kring->nr_hwcur = ktoa->hwcur; 1142 kring->nr_hwtail = kring->rtail = 1143 kring->ring->tail = ktoa->hwtail; 1144 1145 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1146 ktoa->hwcur, atok->head, atok->cur, 1147 ktoa->hwtail); 1148 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1149 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1150 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1151 kring->rtail, kring->ring->tail); 1152 } 1153 } 1154 1155 static void 1156 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1157 { 1158 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1159 1160 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1161 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1162 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1163 } 1164 1165 static int 1166 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1167 { 1168 /* device-specific */ 1169 if_t ifp = na->ifp; 1170 struct ptnet_softc *sc = if_getsoftc(ifp); 1171 int native = (na == &sc->ptna->hwup.up); 1172 struct ptnet_queue *pq; 1173 int ret = 0; 1174 int i; 1175 1176 if (!onoff) { 1177 sc->ptna->backend_users--; 1178 } 1179 1180 /* If this is the last netmap client, guest interrupt enable flags may 1181 * be in arbitrary state. Since these flags are going to be used also 1182 * by the netdevice driver, we have to make sure to start with 1183 * notifications enabled. Also, schedule NAPI to flush pending packets 1184 * in the RX rings, since we will not receive further interrupts 1185 * until these will be processed. */ 1186 if (native && !onoff && na->active_fds == 0) { 1187 nm_prinf("Exit netmap mode, re-enable interrupts"); 1188 for (i = 0; i < sc->num_rings; i++) { 1189 pq = sc->queues + i; 1190 pq->atok->appl_need_kick = 1; 1191 } 1192 } 1193 1194 if (onoff) { 1195 if (sc->ptna->backend_users == 0) { 1196 /* Initialize notification enable fields in the CSB. */ 1197 for (i = 0; i < sc->num_rings; i++) { 1198 pq = sc->queues + i; 1199 pq->ktoa->kern_need_kick = 1; 1200 pq->atok->appl_need_kick = 1201 (!(if_getcapenable(ifp) & IFCAP_POLLING) 1202 && i >= sc->num_tx_rings); 1203 } 1204 1205 /* Set the virtio-net header length. */ 1206 ptnet_update_vnet_hdr(sc); 1207 1208 /* Make sure the host adapter passed through is ready 1209 * for txsync/rxsync. */ 1210 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); 1211 if (ret) { 1212 return ret; 1213 } 1214 1215 /* Align the guest krings and rings to the state stored 1216 * in the CSB. */ 1217 ptnet_sync_from_csb(sc, na); 1218 } 1219 1220 /* If not native, don't call nm_set_native_flags, since we don't want 1221 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1222 if (native) { 1223 netmap_krings_mode_commit(na, onoff); 1224 nm_set_native_flags(na); 1225 } 1226 1227 } else { 1228 if (native) { 1229 nm_clear_native_flags(na); 1230 netmap_krings_mode_commit(na, onoff); 1231 } 1232 1233 if (sc->ptna->backend_users == 0) { 1234 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 1235 } 1236 } 1237 1238 if (onoff) { 1239 sc->ptna->backend_users++; 1240 } 1241 1242 return ret; 1243 } 1244 1245 static int 1246 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1247 { 1248 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1249 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1250 bool notify; 1251 1252 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); 1253 if (notify) { 1254 ptnet_kick(pq); 1255 } 1256 1257 return 0; 1258 } 1259 1260 static int 1261 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1262 { 1263 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1264 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1265 bool notify; 1266 1267 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); 1268 if (notify) { 1269 ptnet_kick(pq); 1270 } 1271 1272 return 0; 1273 } 1274 1275 static void 1276 ptnet_nm_intr(struct netmap_adapter *na, int onoff) 1277 { 1278 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1279 int i; 1280 1281 for (i = 0; i < sc->num_rings; i++) { 1282 struct ptnet_queue *pq = sc->queues + i; 1283 pq->atok->appl_need_kick = onoff; 1284 } 1285 } 1286 1287 static void 1288 ptnet_tx_intr(void *opaque) 1289 { 1290 struct ptnet_queue *pq = opaque; 1291 struct ptnet_softc *sc = pq->sc; 1292 1293 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1294 #ifdef PTNETMAP_STATS 1295 pq->stats.intrs ++; 1296 #endif /* PTNETMAP_STATS */ 1297 1298 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1299 return; 1300 } 1301 1302 /* Schedule the tasqueue to flush process transmissions requests. 1303 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1304 * at least when using MSI-X interrupts. The if_em driver, instead 1305 * schedule taskqueue when using legacy interrupts. */ 1306 taskqueue_enqueue(pq->taskq, &pq->task); 1307 } 1308 1309 static void 1310 ptnet_rx_intr(void *opaque) 1311 { 1312 struct ptnet_queue *pq = opaque; 1313 struct ptnet_softc *sc = pq->sc; 1314 unsigned int unused; 1315 1316 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1317 #ifdef PTNETMAP_STATS 1318 pq->stats.intrs ++; 1319 #endif /* PTNETMAP_STATS */ 1320 1321 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1322 return; 1323 } 1324 1325 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1326 * receive-side processing is executed directly in the interrupt 1327 * service routine. Alternatively, we may schedule the taskqueue. */ 1328 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1329 } 1330 1331 static void 1332 ptnet_vlan_tag_remove(struct mbuf *m) 1333 { 1334 struct ether_vlan_header *evh; 1335 1336 evh = mtod(m, struct ether_vlan_header *); 1337 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1338 m->m_flags |= M_VLANTAG; 1339 1340 /* Strip the 802.1Q header. */ 1341 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1342 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1343 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1344 } 1345 1346 static void 1347 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1348 unsigned int head, unsigned int sync_flags) 1349 { 1350 struct netmap_ring *ring = kring->ring; 1351 struct nm_csb_atok *atok = pq->atok; 1352 struct nm_csb_ktoa *ktoa = pq->ktoa; 1353 1354 /* Some packets have been pushed to the netmap ring. We have 1355 * to tell the host to process the new packets, updating cur 1356 * and head in the CSB. */ 1357 ring->head = ring->cur = head; 1358 1359 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1360 kring->rcur = kring->rhead = head; 1361 1362 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 1363 1364 /* Kick the host if needed. */ 1365 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { 1366 atok->sync_flags = sync_flags; 1367 ptnet_kick(pq); 1368 } 1369 } 1370 1371 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1372 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1373 (_k)->rtail - (_h)) < (_min) 1374 1375 /* This function may be called by the network stack, or by 1376 * by the taskqueue thread. */ 1377 static int 1378 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1379 bool may_resched) 1380 { 1381 struct ptnet_softc *sc = pq->sc; 1382 bool have_vnet_hdr = sc->vnet_hdr_len; 1383 struct netmap_adapter *na = &sc->ptna->dr.up; 1384 if_t ifp = sc->ifp; 1385 unsigned int batch_count = 0; 1386 struct nm_csb_atok *atok; 1387 struct nm_csb_ktoa *ktoa; 1388 struct netmap_kring *kring; 1389 struct netmap_ring *ring; 1390 struct netmap_slot *slot; 1391 unsigned int count = 0; 1392 unsigned int minspace; 1393 unsigned int head; 1394 unsigned int lim; 1395 struct mbuf *mhead; 1396 struct mbuf *mf; 1397 int nmbuf_bytes; 1398 uint8_t *nmbuf; 1399 1400 if (!PTNET_Q_TRYLOCK(pq)) { 1401 /* We failed to acquire the lock, schedule the taskqueue. */ 1402 nm_prlim(1, "Deferring TX work"); 1403 if (may_resched) { 1404 taskqueue_enqueue(pq->taskq, &pq->task); 1405 } 1406 1407 return 0; 1408 } 1409 1410 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { 1411 PTNET_Q_UNLOCK(pq); 1412 nm_prlim(1, "Interface is down"); 1413 return ENETDOWN; 1414 } 1415 1416 atok = pq->atok; 1417 ktoa = pq->ktoa; 1418 kring = na->tx_rings[pq->kring_id]; 1419 ring = kring->ring; 1420 lim = kring->nkr_num_slots - 1; 1421 head = ring->head; 1422 minspace = sc->min_tx_space; 1423 1424 while (count < budget) { 1425 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1426 /* We ran out of slot, let's see if the host has 1427 * freed up some, by reading hwcur and hwtail from 1428 * the CSB. */ 1429 ptnet_sync_tail(ktoa, kring); 1430 1431 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1432 /* Still no slots available. Reactivate the 1433 * interrupts so that we can be notified 1434 * when some free slots are made available by 1435 * the host. */ 1436 atok->appl_need_kick = 1; 1437 1438 /* Double check. We need a full barrier to 1439 * prevent the store to atok->appl_need_kick 1440 * to be reordered with the load from 1441 * ktoa->hwcur and ktoa->hwtail (store-load 1442 * barrier). */ 1443 nm_stld_barrier(); 1444 ptnet_sync_tail(ktoa, kring); 1445 if (likely(PTNET_TX_NOSPACE(head, kring, 1446 minspace))) { 1447 break; 1448 } 1449 1450 nm_prlim(1, "Found more slots by doublecheck"); 1451 /* More slots were freed before reactivating 1452 * the interrupts. */ 1453 atok->appl_need_kick = 0; 1454 } 1455 } 1456 1457 mhead = drbr_peek(ifp, pq->bufring); 1458 if (!mhead) { 1459 break; 1460 } 1461 1462 /* Initialize transmission state variables. */ 1463 slot = ring->slot + head; 1464 nmbuf = NMB(na, slot); 1465 nmbuf_bytes = 0; 1466 1467 /* If needed, prepare the virtio-net header at the beginning 1468 * of the first slot. */ 1469 if (have_vnet_hdr) { 1470 struct virtio_net_hdr *vh = 1471 (struct virtio_net_hdr *)nmbuf; 1472 1473 /* For performance, we could replace this memset() with 1474 * two 8-bytes-wide writes. */ 1475 memset(nmbuf, 0, PTNET_HDR_SIZE); 1476 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1477 mhead = virtio_net_tx_offload(ifp, mhead, false, 1478 vh); 1479 if (unlikely(!mhead)) { 1480 /* Packet dropped because errors 1481 * occurred while preparing the vnet 1482 * header. Let's go ahead with the next 1483 * packet. */ 1484 pq->stats.errors ++; 1485 drbr_advance(ifp, pq->bufring); 1486 continue; 1487 } 1488 } 1489 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1490 "csum_start %u csum_ofs %u hdr_len = %u " 1491 "gso_size %u gso_type %x", __func__, 1492 mhead->m_pkthdr.csum_flags, vh->flags, 1493 vh->csum_start, vh->csum_offset, vh->hdr_len, 1494 vh->gso_size, vh->gso_type); 1495 1496 nmbuf += PTNET_HDR_SIZE; 1497 nmbuf_bytes += PTNET_HDR_SIZE; 1498 } 1499 1500 for (mf = mhead; mf; mf = mf->m_next) { 1501 uint8_t *mdata = mf->m_data; 1502 int mlen = mf->m_len; 1503 1504 for (;;) { 1505 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1506 1507 if (mlen < copy) { 1508 copy = mlen; 1509 } 1510 memcpy(nmbuf, mdata, copy); 1511 1512 mdata += copy; 1513 mlen -= copy; 1514 nmbuf += copy; 1515 nmbuf_bytes += copy; 1516 1517 if (!mlen) { 1518 break; 1519 } 1520 1521 slot->len = nmbuf_bytes; 1522 slot->flags = NS_MOREFRAG; 1523 1524 head = nm_next(head, lim); 1525 KASSERT(head != ring->tail, 1526 ("Unexpectedly run out of TX space")); 1527 slot = ring->slot + head; 1528 nmbuf = NMB(na, slot); 1529 nmbuf_bytes = 0; 1530 } 1531 } 1532 1533 /* Complete last slot and update head. */ 1534 slot->len = nmbuf_bytes; 1535 slot->flags = 0; 1536 head = nm_next(head, lim); 1537 1538 /* Consume the packet just processed. */ 1539 drbr_advance(ifp, pq->bufring); 1540 1541 /* Copy the packet to listeners. */ 1542 ETHER_BPF_MTAP(ifp, mhead); 1543 1544 pq->stats.packets ++; 1545 pq->stats.bytes += mhead->m_pkthdr.len; 1546 if (mhead->m_flags & M_MCAST) { 1547 pq->stats.mcasts ++; 1548 } 1549 1550 m_freem(mhead); 1551 1552 count ++; 1553 if (++batch_count == PTNET_TX_BATCH) { 1554 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1555 batch_count = 0; 1556 } 1557 } 1558 1559 if (batch_count) { 1560 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1561 } 1562 1563 if (count >= budget && may_resched) { 1564 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n", 1565 drbr_inuse(ifp, pq->bufring))); 1566 taskqueue_enqueue(pq->taskq, &pq->task); 1567 } 1568 1569 PTNET_Q_UNLOCK(pq); 1570 1571 return count; 1572 } 1573 1574 static int 1575 ptnet_transmit(if_t ifp, struct mbuf *m) 1576 { 1577 struct ptnet_softc *sc = if_getsoftc(ifp); 1578 struct ptnet_queue *pq; 1579 unsigned int queue_idx; 1580 int err; 1581 1582 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1583 1584 /* Insert 802.1Q header if needed. */ 1585 if (m->m_flags & M_VLANTAG) { 1586 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1587 if (m == NULL) { 1588 return ENOBUFS; 1589 } 1590 m->m_flags &= ~M_VLANTAG; 1591 } 1592 1593 /* Get the flow-id if available. */ 1594 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1595 m->m_pkthdr.flowid : curcpu; 1596 1597 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1598 queue_idx %= sc->num_tx_rings; 1599 } 1600 1601 pq = sc->queues + queue_idx; 1602 1603 err = drbr_enqueue(ifp, pq->bufring, m); 1604 if (err) { 1605 /* ENOBUFS when the bufring is full */ 1606 nm_prlim(1, "%s: drbr_enqueue() failed %d\n", 1607 __func__, err); 1608 pq->stats.errors ++; 1609 return err; 1610 } 1611 1612 if (if_getcapenable(ifp) & IFCAP_POLLING) { 1613 /* If polling is on, the transmit queues will be 1614 * drained by the poller. */ 1615 return 0; 1616 } 1617 1618 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1619 1620 return (err < 0) ? err : 0; 1621 } 1622 1623 static unsigned int 1624 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1625 { 1626 struct netmap_ring *ring = kring->ring; 1627 struct netmap_slot *slot = ring->slot + head; 1628 1629 for (;;) { 1630 head = nm_next(head, kring->nkr_num_slots - 1); 1631 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1632 break; 1633 } 1634 slot = ring->slot + head; 1635 } 1636 1637 return head; 1638 } 1639 1640 static inline struct mbuf * 1641 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1642 { 1643 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1644 1645 do { 1646 unsigned int copy; 1647 1648 if (mtail->m_len == MCLBYTES) { 1649 struct mbuf *mf; 1650 1651 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1652 if (unlikely(!mf)) { 1653 return NULL; 1654 } 1655 1656 mtail->m_next = mf; 1657 mtail = mf; 1658 mdata = mtod(mtail, uint8_t *); 1659 mtail->m_len = 0; 1660 } 1661 1662 copy = MCLBYTES - mtail->m_len; 1663 if (nmbuf_len < copy) { 1664 copy = nmbuf_len; 1665 } 1666 1667 memcpy(mdata, nmbuf, copy); 1668 1669 nmbuf += copy; 1670 nmbuf_len -= copy; 1671 mdata += copy; 1672 mtail->m_len += copy; 1673 } while (nmbuf_len); 1674 1675 return mtail; 1676 } 1677 1678 static int 1679 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 1680 { 1681 struct ptnet_softc *sc = pq->sc; 1682 bool have_vnet_hdr = sc->vnet_hdr_len; 1683 struct nm_csb_atok *atok = pq->atok; 1684 struct nm_csb_ktoa *ktoa = pq->ktoa; 1685 struct netmap_adapter *na = &sc->ptna->dr.up; 1686 struct netmap_kring *kring = na->rx_rings[pq->kring_id]; 1687 struct netmap_ring *ring = kring->ring; 1688 unsigned int const lim = kring->nkr_num_slots - 1; 1689 unsigned int batch_count = 0; 1690 if_t ifp = sc->ifp; 1691 unsigned int count = 0; 1692 uint32_t head; 1693 1694 PTNET_Q_LOCK(pq); 1695 1696 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { 1697 goto unlock; 1698 } 1699 1700 kring->nr_kflags &= ~NKR_PENDINTR; 1701 1702 head = ring->head; 1703 while (count < budget) { 1704 uint32_t prev_head = head; 1705 struct mbuf *mhead, *mtail; 1706 struct virtio_net_hdr *vh; 1707 struct netmap_slot *slot; 1708 unsigned int nmbuf_len; 1709 uint8_t *nmbuf; 1710 int deliver = 1; /* the mbuf to the network stack. */ 1711 host_sync: 1712 if (head == ring->tail) { 1713 /* We ran out of slot, let's see if the host has 1714 * added some, by reading hwcur and hwtail from 1715 * the CSB. */ 1716 ptnet_sync_tail(ktoa, kring); 1717 1718 if (head == ring->tail) { 1719 /* Still no slots available. Reactivate 1720 * interrupts as they were disabled by the 1721 * host thread right before issuing the 1722 * last interrupt. */ 1723 atok->appl_need_kick = 1; 1724 1725 /* Double check for more completed RX slots. 1726 * We need a full barrier to prevent the store 1727 * to atok->appl_need_kick to be reordered with 1728 * the load from ktoa->hwcur and ktoa->hwtail 1729 * (store-load barrier). */ 1730 nm_stld_barrier(); 1731 ptnet_sync_tail(ktoa, kring); 1732 if (likely(head == ring->tail)) { 1733 break; 1734 } 1735 atok->appl_need_kick = 0; 1736 } 1737 } 1738 1739 /* Initialize ring state variables, possibly grabbing the 1740 * virtio-net header. */ 1741 slot = ring->slot + head; 1742 nmbuf = NMB(na, slot); 1743 nmbuf_len = slot->len; 1744 1745 vh = (struct virtio_net_hdr *)nmbuf; 1746 if (have_vnet_hdr) { 1747 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 1748 /* There is no good reason why host should 1749 * put the header in multiple netmap slots. 1750 * If this is the case, discard. */ 1751 nm_prlim(1, "Fragmented vnet-hdr: dropping"); 1752 head = ptnet_rx_discard(kring, head); 1753 pq->stats.iqdrops ++; 1754 deliver = 0; 1755 goto skip; 1756 } 1757 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u " 1758 "csum_ofs %u hdr_len = %u gso_size %u " 1759 "gso_type %x", __func__, vh->flags, 1760 vh->csum_start, vh->csum_offset, vh->hdr_len, 1761 vh->gso_size, vh->gso_type); 1762 nmbuf += PTNET_HDR_SIZE; 1763 nmbuf_len -= PTNET_HDR_SIZE; 1764 } 1765 1766 /* Allocate the head of a new mbuf chain. 1767 * We use m_getcl() to allocate an mbuf with standard cluster 1768 * size (MCLBYTES). In the future we could use m_getjcl() 1769 * to choose different sizes. */ 1770 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1771 if (unlikely(mhead == NULL)) { 1772 device_printf(sc->dev, "%s: failed to allocate mbuf " 1773 "head\n", __func__); 1774 pq->stats.errors ++; 1775 break; 1776 } 1777 1778 /* Initialize the mbuf state variables. */ 1779 mhead->m_pkthdr.len = nmbuf_len; 1780 mtail->m_len = 0; 1781 1782 /* Scan all the netmap slots containing the current packet. */ 1783 for (;;) { 1784 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 1785 "len %u, flags %u\n", __func__, 1786 head, ring->tail, slot->len, 1787 slot->flags)); 1788 1789 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 1790 if (unlikely(!mtail)) { 1791 /* Ouch. We ran out of memory while processing 1792 * a packet. We have to restore the previous 1793 * head position, free the mbuf chain, and 1794 * schedule the taskqueue to give the packet 1795 * another chance. */ 1796 device_printf(sc->dev, "%s: failed to allocate" 1797 " mbuf frag, reset head %u --> %u\n", 1798 __func__, head, prev_head); 1799 head = prev_head; 1800 m_freem(mhead); 1801 pq->stats.errors ++; 1802 if (may_resched) { 1803 taskqueue_enqueue(pq->taskq, 1804 &pq->task); 1805 } 1806 goto escape; 1807 } 1808 1809 /* We have to increment head irrespective of the 1810 * NS_MOREFRAG being set or not. */ 1811 head = nm_next(head, lim); 1812 1813 if (!(slot->flags & NS_MOREFRAG)) { 1814 break; 1815 } 1816 1817 if (unlikely(head == ring->tail)) { 1818 /* The very last slot prepared by the host has 1819 * the NS_MOREFRAG set. Drop it and continue 1820 * the outer cycle (to do the double-check). */ 1821 nm_prlim(1, "Incomplete packet: dropping"); 1822 m_freem(mhead); 1823 pq->stats.iqdrops ++; 1824 goto host_sync; 1825 } 1826 1827 slot = ring->slot + head; 1828 nmbuf = NMB(na, slot); 1829 nmbuf_len = slot->len; 1830 mhead->m_pkthdr.len += nmbuf_len; 1831 } 1832 1833 mhead->m_pkthdr.rcvif = ifp; 1834 mhead->m_pkthdr.csum_flags = 0; 1835 1836 /* Store the queue idx in the packet header. */ 1837 mhead->m_pkthdr.flowid = pq->kring_id; 1838 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 1839 1840 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { 1841 struct ether_header *eh; 1842 1843 eh = mtod(mhead, struct ether_header *); 1844 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1845 ptnet_vlan_tag_remove(mhead); 1846 /* 1847 * With the 802.1Q header removed, update the 1848 * checksum starting location accordingly. 1849 */ 1850 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1851 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 1852 } 1853 } 1854 1855 if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) { 1856 m_freem(mhead); 1857 nm_prlim(1, "Csum offload error: dropping"); 1858 pq->stats.iqdrops ++; 1859 deliver = 0; 1860 } 1861 1862 skip: 1863 count ++; 1864 if (++batch_count >= PTNET_RX_BATCH) { 1865 /* Some packets have been (or will be) pushed to the network 1866 * stack. We need to update the CSB to tell the host about 1867 * the new ring->cur and ring->head (RX buffer refill). */ 1868 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 1869 batch_count = 0; 1870 } 1871 1872 if (likely(deliver)) { 1873 pq->stats.packets ++; 1874 pq->stats.bytes += mhead->m_pkthdr.len; 1875 1876 PTNET_Q_UNLOCK(pq); 1877 if_input(ifp, mhead); 1878 PTNET_Q_LOCK(pq); 1879 /* The ring->head index (and related indices) are 1880 * updated under pq lock by ptnet_ring_update(). 1881 * Since we dropped the lock to call if_input(), we 1882 * must reload ring->head and restart processing the 1883 * ring from there. */ 1884 head = ring->head; 1885 1886 if (unlikely(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { 1887 /* The interface has gone down while we didn't 1888 * have the lock. Stop any processing and exit. */ 1889 goto unlock; 1890 } 1891 } 1892 } 1893 escape: 1894 if (batch_count) { 1895 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 1896 1897 } 1898 1899 if (count >= budget && may_resched) { 1900 /* If we ran out of budget or the double-check found new 1901 * slots to process, schedule the taskqueue. */ 1902 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n", 1903 head, ring->tail)); 1904 taskqueue_enqueue(pq->taskq, &pq->task); 1905 } 1906 unlock: 1907 PTNET_Q_UNLOCK(pq); 1908 1909 return count; 1910 } 1911 1912 static void 1913 ptnet_rx_task(void *context, int pending) 1914 { 1915 struct ptnet_queue *pq = context; 1916 1917 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 1918 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1919 } 1920 1921 static void 1922 ptnet_tx_task(void *context, int pending) 1923 { 1924 struct ptnet_queue *pq = context; 1925 1926 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 1927 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1928 } 1929 1930 #ifdef DEVICE_POLLING 1931 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 1932 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 1933 static int 1934 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 1935 { 1936 struct ptnet_softc *sc = if_getsoftc(ifp); 1937 unsigned int queue_budget; 1938 unsigned int count = 0; 1939 bool borrow = false; 1940 int i; 1941 1942 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 1943 queue_budget = MAX(budget / sc->num_rings, 1); 1944 nm_prlim(1, "Per-queue budget is %d", queue_budget); 1945 1946 while (budget) { 1947 unsigned int rcnt = 0; 1948 1949 for (i = 0; i < sc->num_rings; i++) { 1950 struct ptnet_queue *pq = sc->queues + i; 1951 1952 if (borrow) { 1953 queue_budget = MIN(queue_budget, budget); 1954 if (queue_budget == 0) { 1955 break; 1956 } 1957 } 1958 1959 if (i < sc->num_tx_rings) { 1960 rcnt += ptnet_drain_transmit_queue(pq, 1961 queue_budget, false); 1962 } else { 1963 rcnt += ptnet_rx_eof(pq, queue_budget, 1964 false); 1965 } 1966 } 1967 1968 if (!rcnt) { 1969 /* A scan of the queues gave no result, we can 1970 * stop here. */ 1971 break; 1972 } 1973 1974 if (rcnt > budget) { 1975 /* This may happen when initial budget < sc->num_rings, 1976 * since one packet budget is given to each queue 1977 * anyway. Just pretend we didn't eat "so much". */ 1978 rcnt = budget; 1979 } 1980 count += rcnt; 1981 budget -= rcnt; 1982 borrow = true; 1983 } 1984 1985 1986 return count; 1987 } 1988 #endif /* DEVICE_POLLING */ 1989 #endif /* WITH_PTNETMAP */ 1990