1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Driver for ptnet paravirtualized network device. */ 30 31 #include <sys/cdefs.h> 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/taskqueue.h> 46 #include <sys/smp.h> 47 #include <sys/time.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 #include <net/bpf.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet/udp.h> 70 #include <netinet/tcp.h> 71 #include <netinet/sctp.h> 72 73 #include <machine/bus.h> 74 #include <machine/resource.h> 75 #include <sys/bus.h> 76 #include <sys/rman.h> 77 78 #include <dev/pci/pcivar.h> 79 #include <dev/pci/pcireg.h> 80 81 #include "opt_inet.h" 82 #include "opt_inet6.h" 83 84 #include <sys/selinfo.h> 85 #include <net/netmap.h> 86 #include <dev/netmap/netmap_kern.h> 87 #include <net/netmap_virt.h> 88 #include <dev/netmap/netmap_mem2.h> 89 #include <dev/virtio/network/virtio_net.h> 90 91 #ifndef PTNET_CSB_ALLOC 92 #error "No support for on-device CSB" 93 #endif 94 95 #ifndef INET 96 #error "INET not defined, cannot support offloadings" 97 #endif 98 99 #if __FreeBSD_version >= 1100000 100 static uint64_t ptnet_get_counter(if_t, ift_counter); 101 #else 102 typedef struct ifnet *if_t; 103 #define if_getsoftc(_ifp) (_ifp)->if_softc 104 #endif 105 106 //#define PTNETMAP_STATS 107 //#define DEBUG 108 #ifdef DEBUG 109 #define DBG(x) x 110 #else /* !DEBUG */ 111 #define DBG(x) 112 #endif /* !DEBUG */ 113 114 extern int ptnet_vnet_hdr; /* Tunable parameter */ 115 116 struct ptnet_softc; 117 118 struct ptnet_queue_stats { 119 uint64_t packets; /* if_[io]packets */ 120 uint64_t bytes; /* if_[io]bytes */ 121 uint64_t errors; /* if_[io]errors */ 122 uint64_t iqdrops; /* if_iqdrops */ 123 uint64_t mcasts; /* if_[io]mcasts */ 124 #ifdef PTNETMAP_STATS 125 uint64_t intrs; 126 uint64_t kicks; 127 #endif /* PTNETMAP_STATS */ 128 }; 129 130 struct ptnet_queue { 131 struct ptnet_softc *sc; 132 struct resource *irq; 133 void *cookie; 134 int kring_id; 135 struct ptnet_ring *ptring; 136 unsigned int kick; 137 struct mtx lock; 138 struct buf_ring *bufring; /* for TX queues */ 139 struct ptnet_queue_stats stats; 140 #ifdef PTNETMAP_STATS 141 struct ptnet_queue_stats last_stats; 142 #endif /* PTNETMAP_STATS */ 143 struct taskqueue *taskq; 144 struct task task; 145 char lock_name[16]; 146 }; 147 148 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 149 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 150 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 151 152 struct ptnet_softc { 153 device_t dev; 154 if_t ifp; 155 struct ifmedia media; 156 struct mtx lock; 157 char lock_name[16]; 158 char hwaddr[ETHER_ADDR_LEN]; 159 160 /* Mirror of PTFEAT register. */ 161 uint32_t ptfeatures; 162 unsigned int vnet_hdr_len; 163 164 /* PCI BARs support. */ 165 struct resource *iomem; 166 struct resource *msix_mem; 167 168 unsigned int num_rings; 169 unsigned int num_tx_rings; 170 struct ptnet_queue *queues; 171 struct ptnet_queue *rxqueues; 172 struct ptnet_csb *csb; 173 174 unsigned int min_tx_space; 175 176 struct netmap_pt_guest_adapter *ptna; 177 178 struct callout tick; 179 #ifdef PTNETMAP_STATS 180 struct timeval last_ts; 181 #endif /* PTNETMAP_STATS */ 182 }; 183 184 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 185 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 186 187 static int ptnet_probe(device_t); 188 static int ptnet_attach(device_t); 189 static int ptnet_detach(device_t); 190 static int ptnet_suspend(device_t); 191 static int ptnet_resume(device_t); 192 static int ptnet_shutdown(device_t); 193 194 static void ptnet_init(void *opaque); 195 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 196 static int ptnet_init_locked(struct ptnet_softc *sc); 197 static int ptnet_stop(struct ptnet_softc *sc); 198 static int ptnet_transmit(if_t ifp, struct mbuf *m); 199 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 200 unsigned int budget, 201 bool may_resched); 202 static void ptnet_qflush(if_t ifp); 203 static void ptnet_tx_task(void *context, int pending); 204 205 static int ptnet_media_change(if_t ifp); 206 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 207 #ifdef PTNETMAP_STATS 208 static void ptnet_tick(void *opaque); 209 #endif 210 211 static int ptnet_irqs_init(struct ptnet_softc *sc); 212 static void ptnet_irqs_fini(struct ptnet_softc *sc); 213 214 static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); 215 static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, 216 unsigned *txd, unsigned *rxr, unsigned *rxd); 217 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 218 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 219 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 220 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 221 222 static void ptnet_tx_intr(void *opaque); 223 static void ptnet_rx_intr(void *opaque); 224 225 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 226 unsigned int head); 227 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 228 bool may_resched); 229 static void ptnet_rx_task(void *context, int pending); 230 231 #ifdef DEVICE_POLLING 232 static poll_handler_t ptnet_poll; 233 #endif 234 235 static device_method_t ptnet_methods[] = { 236 DEVMETHOD(device_probe, ptnet_probe), 237 DEVMETHOD(device_attach, ptnet_attach), 238 DEVMETHOD(device_detach, ptnet_detach), 239 DEVMETHOD(device_suspend, ptnet_suspend), 240 DEVMETHOD(device_resume, ptnet_resume), 241 DEVMETHOD(device_shutdown, ptnet_shutdown), 242 DEVMETHOD_END 243 }; 244 245 static driver_t ptnet_driver = { 246 "ptnet", 247 ptnet_methods, 248 sizeof(struct ptnet_softc) 249 }; 250 251 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 252 static devclass_t ptnet_devclass; 253 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 254 NULL, NULL, SI_ORDER_MIDDLE + 2); 255 256 static int 257 ptnet_probe(device_t dev) 258 { 259 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 260 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 261 return (ENXIO); 262 } 263 264 device_set_desc(dev, "ptnet network adapter"); 265 266 return (BUS_PROBE_DEFAULT); 267 } 268 269 static inline void ptnet_kick(struct ptnet_queue *pq) 270 { 271 #ifdef PTNETMAP_STATS 272 pq->stats.kicks ++; 273 #endif /* PTNETMAP_STATS */ 274 bus_write_4(pq->sc->iomem, pq->kick, 0); 275 } 276 277 #define PTNET_BUF_RING_SIZE 4096 278 #define PTNET_RX_BUDGET 512 279 #define PTNET_RX_BATCH 1 280 #define PTNET_TX_BUDGET 512 281 #define PTNET_TX_BATCH 64 282 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 283 #define PTNET_MAX_PKT_SIZE 65536 284 285 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) 286 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ 287 CSUM_SCTP_IPV6) 288 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 289 PTNET_CSUM_OFFLOAD_IPV6) 290 291 static int 292 ptnet_attach(device_t dev) 293 { 294 uint32_t ptfeatures = PTNETMAP_F_BASE; 295 unsigned int num_rx_rings, num_tx_rings; 296 struct netmap_adapter na_arg; 297 unsigned int nifp_offset; 298 struct ptnet_softc *sc; 299 if_t ifp; 300 uint32_t macreg; 301 int err, rid; 302 int i; 303 304 sc = device_get_softc(dev); 305 sc->dev = dev; 306 307 /* Setup PCI resources. */ 308 pci_enable_busmaster(dev); 309 310 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 311 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 312 RF_ACTIVE); 313 if (sc->iomem == NULL) { 314 device_printf(dev, "Failed to map I/O BAR\n"); 315 return (ENXIO); 316 } 317 318 /* Check if we are supported by the hypervisor. If not, 319 * bail out immediately. */ 320 if (ptnet_vnet_hdr) { 321 ptfeatures |= PTNETMAP_F_VNET_HDR; 322 } 323 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 324 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 325 if (!(ptfeatures & PTNETMAP_F_BASE)) { 326 device_printf(dev, "Hypervisor does not support netmap " 327 "passthorugh\n"); 328 err = ENXIO; 329 goto err_path; 330 } 331 sc->ptfeatures = ptfeatures; 332 333 /* Allocate CSB and carry out CSB allocation protocol (CSBBAH first, 334 * then CSBBAL). */ 335 sc->csb = malloc(sizeof(struct ptnet_csb), M_DEVBUF, 336 M_NOWAIT | M_ZERO); 337 if (sc->csb == NULL) { 338 device_printf(dev, "Failed to allocate CSB\n"); 339 err = ENOMEM; 340 goto err_path; 341 } 342 343 { 344 /* 345 * We use uint64_t rather than vm_paddr_t since we 346 * need 64 bit addresses even on 32 bit platforms. 347 */ 348 uint64_t paddr = vtophys(sc->csb); 349 350 bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 351 (paddr >> 32) & 0xffffffff); 352 bus_write_4(sc->iomem, PTNET_IO_CSBBAL, paddr & 0xffffffff); 353 } 354 355 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 356 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 357 sc->num_rings = num_tx_rings + num_rx_rings; 358 sc->num_tx_rings = num_tx_rings; 359 360 /* Allocate and initialize per-queue data structures. */ 361 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 362 M_DEVBUF, M_NOWAIT | M_ZERO); 363 if (sc->queues == NULL) { 364 err = ENOMEM; 365 goto err_path; 366 } 367 sc->rxqueues = sc->queues + num_tx_rings; 368 369 for (i = 0; i < sc->num_rings; i++) { 370 struct ptnet_queue *pq = sc->queues + i; 371 372 pq->sc = sc; 373 pq->kring_id = i; 374 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 375 pq->ptring = sc->csb->rings + i; 376 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 377 device_get_nameunit(dev), i); 378 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 379 if (i >= num_tx_rings) { 380 /* RX queue: fix kring_id. */ 381 pq->kring_id -= num_tx_rings; 382 } else { 383 /* TX queue: allocate buf_ring. */ 384 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 385 M_DEVBUF, M_NOWAIT, &pq->lock); 386 if (pq->bufring == NULL) { 387 err = ENOMEM; 388 goto err_path; 389 } 390 } 391 } 392 393 sc->min_tx_space = 64; /* Safe initial value. */ 394 395 err = ptnet_irqs_init(sc); 396 if (err) { 397 goto err_path; 398 } 399 400 /* Setup Ethernet interface. */ 401 sc->ifp = ifp = if_alloc(IFT_ETHER); 402 if (ifp == NULL) { 403 device_printf(dev, "Failed to allocate ifnet\n"); 404 err = ENOMEM; 405 goto err_path; 406 } 407 408 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 409 ifp->if_baudrate = IF_Gbps(10); 410 ifp->if_softc = sc; 411 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 412 ifp->if_init = ptnet_init; 413 ifp->if_ioctl = ptnet_ioctl; 414 #if __FreeBSD_version >= 1100000 415 ifp->if_get_counter = ptnet_get_counter; 416 #endif 417 ifp->if_transmit = ptnet_transmit; 418 ifp->if_qflush = ptnet_qflush; 419 420 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 421 ptnet_media_status); 422 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 423 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 424 425 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 426 sc->hwaddr[0] = (macreg >> 8) & 0xff; 427 sc->hwaddr[1] = macreg & 0xff; 428 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 429 sc->hwaddr[2] = (macreg >> 24) & 0xff; 430 sc->hwaddr[3] = (macreg >> 16) & 0xff; 431 sc->hwaddr[4] = (macreg >> 8) & 0xff; 432 sc->hwaddr[5] = macreg & 0xff; 433 434 ether_ifattach(ifp, sc->hwaddr); 435 436 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 437 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 438 439 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 440 /* Similarly to what the vtnet driver does, we can emulate 441 * VLAN offloadings by inserting and removing the 802.1Q 442 * header during transmit and receive. We are then able 443 * to do checksum offloading of VLAN frames. */ 444 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 445 | IFCAP_VLAN_HWCSUM 446 | IFCAP_TSO | IFCAP_LRO 447 | IFCAP_VLAN_HWTSO 448 | IFCAP_VLAN_HWTAGGING; 449 } 450 451 ifp->if_capenable = ifp->if_capabilities; 452 #ifdef DEVICE_POLLING 453 /* Don't enable polling by default. */ 454 ifp->if_capabilities |= IFCAP_POLLING; 455 #endif 456 snprintf(sc->lock_name, sizeof(sc->lock_name), 457 "%s", device_get_nameunit(dev)); 458 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 459 callout_init_mtx(&sc->tick, &sc->lock, 0); 460 461 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 462 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 463 memset(&na_arg, 0, sizeof(na_arg)); 464 na_arg.ifp = ifp; 465 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 466 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 467 na_arg.num_tx_rings = num_tx_rings; 468 na_arg.num_rx_rings = num_rx_rings; 469 na_arg.nm_config = ptnet_nm_config; 470 na_arg.nm_krings_create = ptnet_nm_krings_create; 471 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 472 na_arg.nm_dtor = ptnet_nm_dtor; 473 na_arg.nm_register = ptnet_nm_register; 474 na_arg.nm_txsync = ptnet_nm_txsync; 475 na_arg.nm_rxsync = ptnet_nm_rxsync; 476 477 netmap_pt_guest_attach(&na_arg, sc->csb, nifp_offset, ptnet_nm_ptctl); 478 479 /* Now a netmap adapter for this ifp has been allocated, and it 480 * can be accessed through NA(ifp). We also have to initialize the CSB 481 * pointer. */ 482 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 483 484 /* If virtio-net header was negotiated, set the virt_hdr_len field in 485 * the netmap adapter, to inform users that this netmap adapter requires 486 * the application to deal with the headers. */ 487 ptnet_update_vnet_hdr(sc); 488 489 device_printf(dev, "%s() completed\n", __func__); 490 491 return (0); 492 493 err_path: 494 ptnet_detach(dev); 495 return err; 496 } 497 498 static int 499 ptnet_detach(device_t dev) 500 { 501 struct ptnet_softc *sc = device_get_softc(dev); 502 int i; 503 504 #ifdef DEVICE_POLLING 505 if (sc->ifp->if_capenable & IFCAP_POLLING) { 506 ether_poll_deregister(sc->ifp); 507 } 508 #endif 509 callout_drain(&sc->tick); 510 511 if (sc->queues) { 512 /* Drain taskqueues before calling if_detach. */ 513 for (i = 0; i < sc->num_rings; i++) { 514 struct ptnet_queue *pq = sc->queues + i; 515 516 if (pq->taskq) { 517 taskqueue_drain(pq->taskq, &pq->task); 518 } 519 } 520 } 521 522 if (sc->ifp) { 523 ether_ifdetach(sc->ifp); 524 525 /* Uninitialize netmap adapters for this device. */ 526 netmap_detach(sc->ifp); 527 528 ifmedia_removeall(&sc->media); 529 if_free(sc->ifp); 530 sc->ifp = NULL; 531 } 532 533 ptnet_irqs_fini(sc); 534 535 if (sc->csb) { 536 bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 0); 537 bus_write_4(sc->iomem, PTNET_IO_CSBBAL, 0); 538 free(sc->csb, M_DEVBUF); 539 sc->csb = NULL; 540 } 541 542 if (sc->queues) { 543 for (i = 0; i < sc->num_rings; i++) { 544 struct ptnet_queue *pq = sc->queues + i; 545 546 if (mtx_initialized(&pq->lock)) { 547 mtx_destroy(&pq->lock); 548 } 549 if (pq->bufring != NULL) { 550 buf_ring_free(pq->bufring, M_DEVBUF); 551 } 552 } 553 free(sc->queues, M_DEVBUF); 554 sc->queues = NULL; 555 } 556 557 if (sc->iomem) { 558 bus_release_resource(dev, SYS_RES_IOPORT, 559 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 560 sc->iomem = NULL; 561 } 562 563 mtx_destroy(&sc->lock); 564 565 device_printf(dev, "%s() completed\n", __func__); 566 567 return (0); 568 } 569 570 static int 571 ptnet_suspend(device_t dev) 572 { 573 struct ptnet_softc *sc; 574 575 sc = device_get_softc(dev); 576 (void)sc; 577 578 return (0); 579 } 580 581 static int 582 ptnet_resume(device_t dev) 583 { 584 struct ptnet_softc *sc; 585 586 sc = device_get_softc(dev); 587 (void)sc; 588 589 return (0); 590 } 591 592 static int 593 ptnet_shutdown(device_t dev) 594 { 595 /* 596 * Suspend already does all of what we need to 597 * do here; we just never expect to be resumed. 598 */ 599 return (ptnet_suspend(dev)); 600 } 601 602 static int 603 ptnet_irqs_init(struct ptnet_softc *sc) 604 { 605 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 606 int nvecs = sc->num_rings; 607 device_t dev = sc->dev; 608 int err = ENOSPC; 609 int cpu_cur; 610 int i; 611 612 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 613 device_printf(dev, "Could not find MSI-X capability\n"); 614 return (ENXIO); 615 } 616 617 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 618 &rid, RF_ACTIVE); 619 if (sc->msix_mem == NULL) { 620 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 621 return (ENXIO); 622 } 623 624 if (pci_msix_count(dev) < nvecs) { 625 device_printf(dev, "Not enough MSI-X vectors\n"); 626 goto err_path; 627 } 628 629 err = pci_alloc_msix(dev, &nvecs); 630 if (err) { 631 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 632 goto err_path; 633 } 634 635 for (i = 0; i < nvecs; i++) { 636 struct ptnet_queue *pq = sc->queues + i; 637 638 rid = i + 1; 639 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 640 RF_ACTIVE); 641 if (pq->irq == NULL) { 642 device_printf(dev, "Failed to allocate interrupt " 643 "for queue #%d\n", i); 644 err = ENOSPC; 645 goto err_path; 646 } 647 } 648 649 cpu_cur = CPU_FIRST(); 650 for (i = 0; i < nvecs; i++) { 651 struct ptnet_queue *pq = sc->queues + i; 652 void (*handler)(void *) = ptnet_tx_intr; 653 654 if (i >= sc->num_tx_rings) { 655 handler = ptnet_rx_intr; 656 } 657 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 658 NULL /* intr_filter */, handler, 659 pq, &pq->cookie); 660 if (err) { 661 device_printf(dev, "Failed to register intr handler " 662 "for queue #%d\n", i); 663 goto err_path; 664 } 665 666 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 667 #if 0 668 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 669 #endif 670 cpu_cur = CPU_NEXT(cpu_cur); 671 } 672 673 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 674 675 cpu_cur = CPU_FIRST(); 676 for (i = 0; i < nvecs; i++) { 677 struct ptnet_queue *pq = sc->queues + i; 678 static void (*handler)(void *context, int pending); 679 680 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; 681 682 TASK_INIT(&pq->task, 0, handler, pq); 683 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 684 taskqueue_thread_enqueue, &pq->taskq); 685 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 686 device_get_nameunit(sc->dev), cpu_cur); 687 cpu_cur = CPU_NEXT(cpu_cur); 688 } 689 690 return 0; 691 err_path: 692 ptnet_irqs_fini(sc); 693 return err; 694 } 695 696 static void 697 ptnet_irqs_fini(struct ptnet_softc *sc) 698 { 699 device_t dev = sc->dev; 700 int i; 701 702 for (i = 0; i < sc->num_rings; i++) { 703 struct ptnet_queue *pq = sc->queues + i; 704 705 if (pq->taskq) { 706 taskqueue_free(pq->taskq); 707 pq->taskq = NULL; 708 } 709 710 if (pq->cookie) { 711 bus_teardown_intr(dev, pq->irq, pq->cookie); 712 pq->cookie = NULL; 713 } 714 715 if (pq->irq) { 716 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 717 pq->irq = NULL; 718 } 719 } 720 721 if (sc->msix_mem) { 722 pci_release_msi(dev); 723 724 bus_release_resource(dev, SYS_RES_MEMORY, 725 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 726 sc->msix_mem); 727 sc->msix_mem = NULL; 728 } 729 } 730 731 static void 732 ptnet_init(void *opaque) 733 { 734 struct ptnet_softc *sc = opaque; 735 736 PTNET_CORE_LOCK(sc); 737 ptnet_init_locked(sc); 738 PTNET_CORE_UNLOCK(sc); 739 } 740 741 static int 742 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 743 { 744 struct ptnet_softc *sc = if_getsoftc(ifp); 745 device_t dev = sc->dev; 746 struct ifreq *ifr = (struct ifreq *)data; 747 int mask, err = 0; 748 749 switch (cmd) { 750 case SIOCSIFFLAGS: 751 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 752 PTNET_CORE_LOCK(sc); 753 if (ifp->if_flags & IFF_UP) { 754 /* Network stack wants the iff to be up. */ 755 err = ptnet_init_locked(sc); 756 } else { 757 /* Network stack wants the iff to be down. */ 758 err = ptnet_stop(sc); 759 } 760 /* We don't need to do nothing to support IFF_PROMISC, 761 * since that is managed by the backend port. */ 762 PTNET_CORE_UNLOCK(sc); 763 break; 764 765 case SIOCSIFCAP: 766 device_printf(dev, "SIOCSIFCAP %x %x\n", 767 ifr->ifr_reqcap, ifp->if_capenable); 768 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 769 #ifdef DEVICE_POLLING 770 if (mask & IFCAP_POLLING) { 771 struct ptnet_queue *pq; 772 int i; 773 774 if (ifr->ifr_reqcap & IFCAP_POLLING) { 775 err = ether_poll_register(ptnet_poll, ifp); 776 if (err) { 777 break; 778 } 779 /* Stop queues and sync with taskqueues. */ 780 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 781 for (i = 0; i < sc->num_rings; i++) { 782 pq = sc-> queues + i; 783 /* Make sure the worker sees the 784 * IFF_DRV_RUNNING down. */ 785 PTNET_Q_LOCK(pq); 786 pq->ptring->guest_need_kick = 0; 787 PTNET_Q_UNLOCK(pq); 788 /* Wait for rescheduling to finish. */ 789 if (pq->taskq) { 790 taskqueue_drain(pq->taskq, 791 &pq->task); 792 } 793 } 794 ifp->if_drv_flags |= IFF_DRV_RUNNING; 795 } else { 796 err = ether_poll_deregister(ifp); 797 for (i = 0; i < sc->num_rings; i++) { 798 pq = sc-> queues + i; 799 PTNET_Q_LOCK(pq); 800 pq->ptring->guest_need_kick = 1; 801 PTNET_Q_UNLOCK(pq); 802 } 803 } 804 } 805 #endif /* DEVICE_POLLING */ 806 ifp->if_capenable = ifr->ifr_reqcap; 807 break; 808 809 case SIOCSIFMTU: 810 /* We support any reasonable MTU. */ 811 if (ifr->ifr_mtu < ETHERMIN || 812 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 813 err = EINVAL; 814 } else { 815 PTNET_CORE_LOCK(sc); 816 ifp->if_mtu = ifr->ifr_mtu; 817 PTNET_CORE_UNLOCK(sc); 818 } 819 break; 820 821 case SIOCSIFMEDIA: 822 case SIOCGIFMEDIA: 823 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 824 break; 825 826 default: 827 err = ether_ioctl(ifp, cmd, data); 828 break; 829 } 830 831 return err; 832 } 833 834 static int 835 ptnet_init_locked(struct ptnet_softc *sc) 836 { 837 if_t ifp = sc->ifp; 838 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 839 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 840 unsigned int nm_buf_size; 841 int ret; 842 843 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 844 return 0; /* nothing to do */ 845 } 846 847 device_printf(sc->dev, "%s\n", __func__); 848 849 /* Translate offload capabilities according to if_capenable. */ 850 ifp->if_hwassist = 0; 851 if (ifp->if_capenable & IFCAP_TXCSUM) 852 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 853 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 854 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 855 if (ifp->if_capenable & IFCAP_TSO4) 856 ifp->if_hwassist |= CSUM_IP_TSO; 857 if (ifp->if_capenable & IFCAP_TSO6) 858 ifp->if_hwassist |= CSUM_IP6_TSO; 859 860 /* 861 * Prepare the interface for netmap mode access. 862 */ 863 netmap_update_config(na_dr); 864 865 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 866 if (ret) { 867 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 868 return ret; 869 } 870 871 if (sc->ptna->backend_regifs == 0) { 872 ret = ptnet_nm_krings_create(na_nm); 873 if (ret) { 874 device_printf(sc->dev, "ptnet_nm_krings_create() " 875 "failed\n"); 876 goto err_mem_finalize; 877 } 878 879 ret = netmap_mem_rings_create(na_dr); 880 if (ret) { 881 device_printf(sc->dev, "netmap_mem_rings_create() " 882 "failed\n"); 883 goto err_rings_create; 884 } 885 886 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 887 if (ret) { 888 device_printf(sc->dev, "netmap_mem_get_lut() " 889 "failed\n"); 890 goto err_get_lut; 891 } 892 } 893 894 ret = ptnet_nm_register(na_dr, 1 /* on */); 895 if (ret) { 896 goto err_register; 897 } 898 899 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 900 901 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 902 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 903 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 904 sc->min_tx_space); 905 #ifdef PTNETMAP_STATS 906 callout_reset(&sc->tick, hz, ptnet_tick, sc); 907 #endif 908 909 ifp->if_drv_flags |= IFF_DRV_RUNNING; 910 911 return 0; 912 913 err_register: 914 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 915 err_get_lut: 916 netmap_mem_rings_delete(na_dr); 917 err_rings_create: 918 ptnet_nm_krings_delete(na_nm); 919 err_mem_finalize: 920 netmap_mem_deref(na_dr->nm_mem, na_dr); 921 922 return ret; 923 } 924 925 /* To be called under core lock. */ 926 static int 927 ptnet_stop(struct ptnet_softc *sc) 928 { 929 if_t ifp = sc->ifp; 930 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 931 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 932 int i; 933 934 device_printf(sc->dev, "%s\n", __func__); 935 936 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 937 return 0; /* nothing to do */ 938 } 939 940 /* Clear the driver-ready flag, and synchronize with all the queues, 941 * so that after this loop we are sure nobody is working anymore with 942 * the device. This scheme is taken from the vtnet driver. */ 943 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 944 callout_stop(&sc->tick); 945 for (i = 0; i < sc->num_rings; i++) { 946 PTNET_Q_LOCK(sc->queues + i); 947 PTNET_Q_UNLOCK(sc->queues + i); 948 } 949 950 ptnet_nm_register(na_dr, 0 /* off */); 951 952 if (sc->ptna->backend_regifs == 0) { 953 netmap_mem_rings_delete(na_dr); 954 ptnet_nm_krings_delete(na_nm); 955 } 956 netmap_mem_deref(na_dr->nm_mem, na_dr); 957 958 return 0; 959 } 960 961 static void 962 ptnet_qflush(if_t ifp) 963 { 964 struct ptnet_softc *sc = if_getsoftc(ifp); 965 int i; 966 967 /* Flush all the bufrings and do the interface flush. */ 968 for (i = 0; i < sc->num_rings; i++) { 969 struct ptnet_queue *pq = sc->queues + i; 970 struct mbuf *m; 971 972 PTNET_Q_LOCK(pq); 973 if (pq->bufring) { 974 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 975 m_freem(m); 976 } 977 } 978 PTNET_Q_UNLOCK(pq); 979 } 980 981 if_qflush(ifp); 982 } 983 984 static int 985 ptnet_media_change(if_t ifp) 986 { 987 struct ptnet_softc *sc = if_getsoftc(ifp); 988 struct ifmedia *ifm = &sc->media; 989 990 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 991 return EINVAL; 992 } 993 994 return 0; 995 } 996 997 #if __FreeBSD_version >= 1100000 998 static uint64_t 999 ptnet_get_counter(if_t ifp, ift_counter cnt) 1000 { 1001 struct ptnet_softc *sc = if_getsoftc(ifp); 1002 struct ptnet_queue_stats stats[2]; 1003 int i; 1004 1005 /* Accumulate statistics over the queues. */ 1006 memset(stats, 0, sizeof(stats)); 1007 for (i = 0; i < sc->num_rings; i++) { 1008 struct ptnet_queue *pq = sc->queues + i; 1009 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1010 1011 stats[idx].packets += pq->stats.packets; 1012 stats[idx].bytes += pq->stats.bytes; 1013 stats[idx].errors += pq->stats.errors; 1014 stats[idx].iqdrops += pq->stats.iqdrops; 1015 stats[idx].mcasts += pq->stats.mcasts; 1016 } 1017 1018 switch (cnt) { 1019 case IFCOUNTER_IPACKETS: 1020 return (stats[1].packets); 1021 case IFCOUNTER_IQDROPS: 1022 return (stats[1].iqdrops); 1023 case IFCOUNTER_IERRORS: 1024 return (stats[1].errors); 1025 case IFCOUNTER_OPACKETS: 1026 return (stats[0].packets); 1027 case IFCOUNTER_OBYTES: 1028 return (stats[0].bytes); 1029 case IFCOUNTER_OMCASTS: 1030 return (stats[0].mcasts); 1031 default: 1032 return (if_get_counter_default(ifp, cnt)); 1033 } 1034 } 1035 #endif 1036 1037 1038 #ifdef PTNETMAP_STATS 1039 /* Called under core lock. */ 1040 static void 1041 ptnet_tick(void *opaque) 1042 { 1043 struct ptnet_softc *sc = opaque; 1044 int i; 1045 1046 for (i = 0; i < sc->num_rings; i++) { 1047 struct ptnet_queue *pq = sc->queues + i; 1048 struct ptnet_queue_stats cur = pq->stats; 1049 struct timeval now; 1050 unsigned int delta; 1051 1052 microtime(&now); 1053 delta = now.tv_usec - sc->last_ts.tv_usec + 1054 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1055 delta /= 1000; /* in milliseconds */ 1056 1057 if (delta == 0) 1058 continue; 1059 1060 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1061 "intr %lu\n", i, delta, 1062 (cur.packets - pq->last_stats.packets), 1063 (cur.kicks - pq->last_stats.kicks), 1064 (cur.intrs - pq->last_stats.intrs)); 1065 pq->last_stats = cur; 1066 } 1067 microtime(&sc->last_ts); 1068 callout_schedule(&sc->tick, hz); 1069 } 1070 #endif /* PTNETMAP_STATS */ 1071 1072 static void 1073 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1074 { 1075 /* We are always active, as the backend netmap port is 1076 * always open in netmap mode. */ 1077 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1078 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1079 } 1080 1081 static uint32_t 1082 ptnet_nm_ptctl(if_t ifp, uint32_t cmd) 1083 { 1084 struct ptnet_softc *sc = if_getsoftc(ifp); 1085 int ret; 1086 1087 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1088 ret = bus_read_4(sc->iomem, PTNET_IO_PTSTS); 1089 device_printf(sc->dev, "PTCTL %u, ret %u\n", cmd, ret); 1090 1091 return ret; 1092 } 1093 1094 static int 1095 ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd, 1096 unsigned *rxr, unsigned *rxd) 1097 { 1098 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1099 1100 *txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1101 *rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1102 *txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1103 *rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1104 1105 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n", 1106 *txr, *rxr, *txd, *rxd); 1107 1108 return 0; 1109 } 1110 1111 static void 1112 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1113 { 1114 int i; 1115 1116 /* Sync krings from the host, reading from 1117 * CSB. */ 1118 for (i = 0; i < sc->num_rings; i++) { 1119 struct ptnet_ring *ptring = sc->queues[i].ptring; 1120 struct netmap_kring *kring; 1121 1122 if (i < na->num_tx_rings) { 1123 kring = na->tx_rings + i; 1124 } else { 1125 kring = na->rx_rings + i - na->num_tx_rings; 1126 } 1127 kring->rhead = kring->ring->head = ptring->head; 1128 kring->rcur = kring->ring->cur = ptring->cur; 1129 kring->nr_hwcur = ptring->hwcur; 1130 kring->nr_hwtail = kring->rtail = 1131 kring->ring->tail = ptring->hwtail; 1132 1133 ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1134 ptring->hwcur, ptring->head, ptring->cur, 1135 ptring->hwtail); 1136 ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1137 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1138 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1139 kring->rtail, kring->ring->tail); 1140 } 1141 } 1142 1143 static void 1144 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1145 { 1146 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1147 1148 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1149 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1150 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1151 } 1152 1153 static int 1154 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1155 { 1156 /* device-specific */ 1157 if_t ifp = na->ifp; 1158 struct ptnet_softc *sc = if_getsoftc(ifp); 1159 int native = (na == &sc->ptna->hwup.up); 1160 struct ptnet_queue *pq; 1161 enum txrx t; 1162 int ret = 0; 1163 int i; 1164 1165 if (!onoff) { 1166 sc->ptna->backend_regifs--; 1167 } 1168 1169 /* If this is the last netmap client, guest interrupt enable flags may 1170 * be in arbitrary state. Since these flags are going to be used also 1171 * by the netdevice driver, we have to make sure to start with 1172 * notifications enabled. Also, schedule NAPI to flush pending packets 1173 * in the RX rings, since we will not receive further interrupts 1174 * until these will be processed. */ 1175 if (native && !onoff && na->active_fds == 0) { 1176 D("Exit netmap mode, re-enable interrupts"); 1177 for (i = 0; i < sc->num_rings; i++) { 1178 pq = sc->queues + i; 1179 pq->ptring->guest_need_kick = 1; 1180 } 1181 } 1182 1183 if (onoff) { 1184 if (sc->ptna->backend_regifs == 0) { 1185 /* Initialize notification enable fields in the CSB. */ 1186 for (i = 0; i < sc->num_rings; i++) { 1187 pq = sc->queues + i; 1188 pq->ptring->host_need_kick = 1; 1189 pq->ptring->guest_need_kick = 1190 (!(ifp->if_capenable & IFCAP_POLLING) 1191 && i >= sc->num_tx_rings); 1192 } 1193 1194 /* Set the virtio-net header length. */ 1195 ptnet_update_vnet_hdr(sc); 1196 1197 /* Make sure the host adapter passed through is ready 1198 * for txsync/rxsync. */ 1199 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_REGIF); 1200 if (ret) { 1201 return ret; 1202 } 1203 } 1204 1205 /* Sync from CSB must be done after REGIF PTCTL. Skip this 1206 * step only if this is a netmap client and it is not the 1207 * first one. */ 1208 if ((!native && sc->ptna->backend_regifs == 0) || 1209 (native && na->active_fds == 0)) { 1210 ptnet_sync_from_csb(sc, na); 1211 } 1212 1213 /* If not native, don't call nm_set_native_flags, since we don't want 1214 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1215 if (native) { 1216 for_rx_tx(t) { 1217 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1218 struct netmap_kring *kring = &NMR(na, t)[i]; 1219 1220 if (nm_kring_pending_on(kring)) { 1221 kring->nr_mode = NKR_NETMAP_ON; 1222 } 1223 } 1224 } 1225 nm_set_native_flags(na); 1226 } 1227 1228 } else { 1229 if (native) { 1230 nm_clear_native_flags(na); 1231 for_rx_tx(t) { 1232 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1233 struct netmap_kring *kring = &NMR(na, t)[i]; 1234 1235 if (nm_kring_pending_off(kring)) { 1236 kring->nr_mode = NKR_NETMAP_OFF; 1237 } 1238 } 1239 } 1240 } 1241 1242 /* Sync from CSB must be done before UNREGIF PTCTL, on the last 1243 * netmap client. */ 1244 if (native && na->active_fds == 0) { 1245 ptnet_sync_from_csb(sc, na); 1246 } 1247 1248 if (sc->ptna->backend_regifs == 0) { 1249 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_UNREGIF); 1250 } 1251 } 1252 1253 if (onoff) { 1254 sc->ptna->backend_regifs++; 1255 } 1256 1257 return ret; 1258 } 1259 1260 static int 1261 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1262 { 1263 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1264 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1265 bool notify; 1266 1267 notify = netmap_pt_guest_txsync(pq->ptring, kring, flags); 1268 if (notify) { 1269 ptnet_kick(pq); 1270 } 1271 1272 return 0; 1273 } 1274 1275 static int 1276 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1277 { 1278 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1279 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1280 bool notify; 1281 1282 notify = netmap_pt_guest_rxsync(pq->ptring, kring, flags); 1283 if (notify) { 1284 ptnet_kick(pq); 1285 } 1286 1287 return 0; 1288 } 1289 1290 static void 1291 ptnet_tx_intr(void *opaque) 1292 { 1293 struct ptnet_queue *pq = opaque; 1294 struct ptnet_softc *sc = pq->sc; 1295 1296 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1297 #ifdef PTNETMAP_STATS 1298 pq->stats.intrs ++; 1299 #endif /* PTNETMAP_STATS */ 1300 1301 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1302 return; 1303 } 1304 1305 /* Schedule the tasqueue to flush process transmissions requests. 1306 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1307 * at least when using MSI-X interrupts. The if_em driver, instead 1308 * schedule taskqueue when using legacy interrupts. */ 1309 taskqueue_enqueue(pq->taskq, &pq->task); 1310 } 1311 1312 static void 1313 ptnet_rx_intr(void *opaque) 1314 { 1315 struct ptnet_queue *pq = opaque; 1316 struct ptnet_softc *sc = pq->sc; 1317 unsigned int unused; 1318 1319 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1320 #ifdef PTNETMAP_STATS 1321 pq->stats.intrs ++; 1322 #endif /* PTNETMAP_STATS */ 1323 1324 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1325 return; 1326 } 1327 1328 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1329 * receive-side processing is executed directly in the interrupt 1330 * service routine. Alternatively, we may schedule the taskqueue. */ 1331 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1332 } 1333 1334 /* The following offloadings-related functions are taken from the vtnet 1335 * driver, but the same functionality is required for the ptnet driver. 1336 * As a temporary solution, I copied this code from vtnet and I started 1337 * to generalize it (taking away driver-specific statistic accounting), 1338 * making as little modifications as possible. 1339 * In the future we need to share these functions between vtnet and ptnet. 1340 */ 1341 static int 1342 ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 1343 { 1344 struct ether_vlan_header *evh; 1345 int offset; 1346 1347 evh = mtod(m, struct ether_vlan_header *); 1348 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1349 /* BMV: We should handle nested VLAN tags too. */ 1350 *etype = ntohs(evh->evl_proto); 1351 offset = sizeof(struct ether_vlan_header); 1352 } else { 1353 *etype = ntohs(evh->evl_encap_proto); 1354 offset = sizeof(struct ether_header); 1355 } 1356 1357 switch (*etype) { 1358 #if defined(INET) 1359 case ETHERTYPE_IP: { 1360 struct ip *ip, iphdr; 1361 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1362 m_copydata(m, offset, sizeof(struct ip), 1363 (caddr_t) &iphdr); 1364 ip = &iphdr; 1365 } else 1366 ip = (struct ip *)(m->m_data + offset); 1367 *proto = ip->ip_p; 1368 *start = offset + (ip->ip_hl << 2); 1369 break; 1370 } 1371 #endif 1372 #if defined(INET6) 1373 case ETHERTYPE_IPV6: 1374 *proto = -1; 1375 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 1376 /* Assert the network stack sent us a valid packet. */ 1377 KASSERT(*start > offset, 1378 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 1379 *start, offset, *proto)); 1380 break; 1381 #endif 1382 default: 1383 /* Here we should increment the tx_csum_bad_ethtype counter. */ 1384 return (EINVAL); 1385 } 1386 1387 return (0); 1388 } 1389 1390 static int 1391 ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, 1392 int offset, bool allow_ecn, struct virtio_net_hdr *hdr) 1393 { 1394 static struct timeval lastecn; 1395 static int curecn; 1396 struct tcphdr *tcp, tcphdr; 1397 1398 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 1399 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 1400 tcp = &tcphdr; 1401 } else 1402 tcp = (struct tcphdr *)(m->m_data + offset); 1403 1404 hdr->hdr_len = offset + (tcp->th_off << 2); 1405 hdr->gso_size = m->m_pkthdr.tso_segsz; 1406 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 1407 VIRTIO_NET_HDR_GSO_TCPV6; 1408 1409 if (tcp->th_flags & TH_CWR) { 1410 /* 1411 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 1412 * ECN support is not on a per-interface basis, but globally via 1413 * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 1414 */ 1415 if (!allow_ecn) { 1416 if (ppsratecheck(&lastecn, &curecn, 1)) 1417 if_printf(ifp, 1418 "TSO with ECN not negotiated with host\n"); 1419 return (ENOTSUP); 1420 } 1421 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1422 } 1423 1424 /* Here we should increment tx_tso counter. */ 1425 1426 return (0); 1427 } 1428 1429 static struct mbuf * 1430 ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, 1431 struct virtio_net_hdr *hdr) 1432 { 1433 int flags, etype, csum_start, proto, error; 1434 1435 flags = m->m_pkthdr.csum_flags; 1436 1437 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); 1438 if (error) 1439 goto drop; 1440 1441 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || 1442 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { 1443 /* 1444 * We could compare the IP protocol vs the CSUM_ flag too, 1445 * but that really should not be necessary. 1446 */ 1447 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1448 hdr->csum_start = csum_start; 1449 hdr->csum_offset = m->m_pkthdr.csum_data; 1450 /* Here we should increment the tx_csum counter. */ 1451 } 1452 1453 if (flags & CSUM_TSO) { 1454 if (__predict_false(proto != IPPROTO_TCP)) { 1455 /* Likely failed to correctly parse the mbuf. 1456 * Here we should increment the tx_tso_not_tcp 1457 * counter. */ 1458 goto drop; 1459 } 1460 1461 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 1462 ("%s: mbuf %p TSO without checksum offload %#x", 1463 __func__, m, flags)); 1464 1465 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, 1466 allow_ecn, hdr); 1467 if (error) 1468 goto drop; 1469 } 1470 1471 return (m); 1472 1473 drop: 1474 m_freem(m); 1475 return (NULL); 1476 } 1477 1478 static void 1479 ptnet_vlan_tag_remove(struct mbuf *m) 1480 { 1481 struct ether_vlan_header *evh; 1482 1483 evh = mtod(m, struct ether_vlan_header *); 1484 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1485 m->m_flags |= M_VLANTAG; 1486 1487 /* Strip the 802.1Q header. */ 1488 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1489 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1490 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1491 } 1492 1493 /* 1494 * Use the checksum offset in the VirtIO header to set the 1495 * correct CSUM_* flags. 1496 */ 1497 static int 1498 ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, 1499 struct virtio_net_hdr *hdr) 1500 { 1501 #if defined(INET) || defined(INET6) 1502 int offset = hdr->csum_start + hdr->csum_offset; 1503 #endif 1504 1505 /* Only do a basic sanity check on the offset. */ 1506 switch (eth_type) { 1507 #if defined(INET) 1508 case ETHERTYPE_IP: 1509 if (__predict_false(offset < ip_start + sizeof(struct ip))) 1510 return (1); 1511 break; 1512 #endif 1513 #if defined(INET6) 1514 case ETHERTYPE_IPV6: 1515 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1516 return (1); 1517 break; 1518 #endif 1519 default: 1520 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1521 return (1); 1522 } 1523 1524 /* 1525 * Use the offset to determine the appropriate CSUM_* flags. This is 1526 * a bit dirty, but we can get by with it since the checksum offsets 1527 * happen to be different. We assume the host host does not do IPv4 1528 * header checksum offloading. 1529 */ 1530 switch (hdr->csum_offset) { 1531 case offsetof(struct udphdr, uh_sum): 1532 case offsetof(struct tcphdr, th_sum): 1533 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1534 m->m_pkthdr.csum_data = 0xFFFF; 1535 break; 1536 case offsetof(struct sctphdr, checksum): 1537 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1538 break; 1539 default: 1540 /* Here we should increment the rx_csum_bad_offset counter. */ 1541 return (1); 1542 } 1543 1544 return (0); 1545 } 1546 1547 static int 1548 ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, 1549 struct virtio_net_hdr *hdr) 1550 { 1551 int offset, proto; 1552 1553 switch (eth_type) { 1554 #if defined(INET) 1555 case ETHERTYPE_IP: { 1556 struct ip *ip; 1557 if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1558 return (1); 1559 ip = (struct ip *)(m->m_data + ip_start); 1560 proto = ip->ip_p; 1561 offset = ip_start + (ip->ip_hl << 2); 1562 break; 1563 } 1564 #endif 1565 #if defined(INET6) 1566 case ETHERTYPE_IPV6: 1567 if (__predict_false(m->m_len < ip_start + 1568 sizeof(struct ip6_hdr))) 1569 return (1); 1570 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1571 if (__predict_false(offset < 0)) 1572 return (1); 1573 break; 1574 #endif 1575 default: 1576 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1577 return (1); 1578 } 1579 1580 switch (proto) { 1581 case IPPROTO_TCP: 1582 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1583 return (1); 1584 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1585 m->m_pkthdr.csum_data = 0xFFFF; 1586 break; 1587 case IPPROTO_UDP: 1588 if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1589 return (1); 1590 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1591 m->m_pkthdr.csum_data = 0xFFFF; 1592 break; 1593 case IPPROTO_SCTP: 1594 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) 1595 return (1); 1596 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1597 break; 1598 default: 1599 /* 1600 * For the remaining protocols, FreeBSD does not support 1601 * checksum offloading, so the checksum will be recomputed. 1602 */ 1603 #if 0 1604 if_printf(ifp, "cksum offload of unsupported " 1605 "protocol eth_type=%#x proto=%d csum_start=%d " 1606 "csum_offset=%d\n", __func__, eth_type, proto, 1607 hdr->csum_start, hdr->csum_offset); 1608 #endif 1609 break; 1610 } 1611 1612 return (0); 1613 } 1614 1615 /* 1616 * Set the appropriate CSUM_* flags. Unfortunately, the information 1617 * provided is not directly useful to us. The VirtIO header gives the 1618 * offset of the checksum, which is all Linux needs, but this is not 1619 * how FreeBSD does things. We are forced to peek inside the packet 1620 * a bit. 1621 * 1622 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1623 * could accept the offsets and let the stack figure it out. 1624 */ 1625 static int 1626 ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) 1627 { 1628 struct ether_header *eh; 1629 struct ether_vlan_header *evh; 1630 uint16_t eth_type; 1631 int offset, error; 1632 1633 eh = mtod(m, struct ether_header *); 1634 eth_type = ntohs(eh->ether_type); 1635 if (eth_type == ETHERTYPE_VLAN) { 1636 /* BMV: We should handle nested VLAN tags too. */ 1637 evh = mtod(m, struct ether_vlan_header *); 1638 eth_type = ntohs(evh->evl_proto); 1639 offset = sizeof(struct ether_vlan_header); 1640 } else 1641 offset = sizeof(struct ether_header); 1642 1643 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1644 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); 1645 else 1646 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); 1647 1648 return (error); 1649 } 1650 /* End of offloading-related functions to be shared with vtnet. */ 1651 1652 static inline void 1653 ptnet_sync_tail(struct ptnet_ring *ptring, struct netmap_kring *kring) 1654 { 1655 struct netmap_ring *ring = kring->ring; 1656 1657 /* Update hwcur and hwtail as known by the host. */ 1658 ptnetmap_guest_read_kring_csb(ptring, kring); 1659 1660 /* nm_sync_finalize */ 1661 ring->tail = kring->rtail = kring->nr_hwtail; 1662 } 1663 1664 static void 1665 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1666 unsigned int head, unsigned int sync_flags) 1667 { 1668 struct netmap_ring *ring = kring->ring; 1669 struct ptnet_ring *ptring = pq->ptring; 1670 1671 /* Some packets have been pushed to the netmap ring. We have 1672 * to tell the host to process the new packets, updating cur 1673 * and head in the CSB. */ 1674 ring->head = ring->cur = head; 1675 1676 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1677 kring->rcur = kring->rhead = head; 1678 1679 ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead); 1680 1681 /* Kick the host if needed. */ 1682 if (NM_ACCESS_ONCE(ptring->host_need_kick)) { 1683 ptring->sync_flags = sync_flags; 1684 ptnet_kick(pq); 1685 } 1686 } 1687 1688 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1689 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1690 (_k)->rtail - (_h)) < (_min) 1691 1692 /* This function may be called by the network stack, or by 1693 * by the taskqueue thread. */ 1694 static int 1695 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1696 bool may_resched) 1697 { 1698 struct ptnet_softc *sc = pq->sc; 1699 bool have_vnet_hdr = sc->vnet_hdr_len; 1700 struct netmap_adapter *na = &sc->ptna->dr.up; 1701 if_t ifp = sc->ifp; 1702 unsigned int batch_count = 0; 1703 struct ptnet_ring *ptring; 1704 struct netmap_kring *kring; 1705 struct netmap_ring *ring; 1706 struct netmap_slot *slot; 1707 unsigned int count = 0; 1708 unsigned int minspace; 1709 unsigned int head; 1710 unsigned int lim; 1711 struct mbuf *mhead; 1712 struct mbuf *mf; 1713 int nmbuf_bytes; 1714 uint8_t *nmbuf; 1715 1716 if (!PTNET_Q_TRYLOCK(pq)) { 1717 /* We failed to acquire the lock, schedule the taskqueue. */ 1718 RD(1, "Deferring TX work"); 1719 if (may_resched) { 1720 taskqueue_enqueue(pq->taskq, &pq->task); 1721 } 1722 1723 return 0; 1724 } 1725 1726 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1727 PTNET_Q_UNLOCK(pq); 1728 RD(1, "Interface is down"); 1729 return ENETDOWN; 1730 } 1731 1732 ptring = pq->ptring; 1733 kring = na->tx_rings + pq->kring_id; 1734 ring = kring->ring; 1735 lim = kring->nkr_num_slots - 1; 1736 head = ring->head; 1737 minspace = sc->min_tx_space; 1738 1739 while (count < budget) { 1740 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1741 /* We ran out of slot, let's see if the host has 1742 * freed up some, by reading hwcur and hwtail from 1743 * the CSB. */ 1744 ptnet_sync_tail(ptring, kring); 1745 1746 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1747 /* Still no slots available. Reactivate the 1748 * interrupts so that we can be notified 1749 * when some free slots are made available by 1750 * the host. */ 1751 ptring->guest_need_kick = 1; 1752 1753 /* Double-check. */ 1754 ptnet_sync_tail(ptring, kring); 1755 if (likely(PTNET_TX_NOSPACE(head, kring, 1756 minspace))) { 1757 break; 1758 } 1759 1760 RD(1, "Found more slots by doublecheck"); 1761 /* More slots were freed before reactivating 1762 * the interrupts. */ 1763 ptring->guest_need_kick = 0; 1764 } 1765 } 1766 1767 mhead = drbr_peek(ifp, pq->bufring); 1768 if (!mhead) { 1769 break; 1770 } 1771 1772 /* Initialize transmission state variables. */ 1773 slot = ring->slot + head; 1774 nmbuf = NMB(na, slot); 1775 nmbuf_bytes = 0; 1776 1777 /* If needed, prepare the virtio-net header at the beginning 1778 * of the first slot. */ 1779 if (have_vnet_hdr) { 1780 struct virtio_net_hdr *vh = 1781 (struct virtio_net_hdr *)nmbuf; 1782 1783 /* For performance, we could replace this memset() with 1784 * two 8-bytes-wide writes. */ 1785 memset(nmbuf, 0, PTNET_HDR_SIZE); 1786 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1787 mhead = ptnet_tx_offload(ifp, mhead, false, 1788 vh); 1789 if (unlikely(!mhead)) { 1790 /* Packet dropped because errors 1791 * occurred while preparing the vnet 1792 * header. Let's go ahead with the next 1793 * packet. */ 1794 pq->stats.errors ++; 1795 drbr_advance(ifp, pq->bufring); 1796 continue; 1797 } 1798 } 1799 ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1800 "csum_start %u csum_ofs %u hdr_len = %u " 1801 "gso_size %u gso_type %x", __func__, 1802 mhead->m_pkthdr.csum_flags, vh->flags, 1803 vh->csum_start, vh->csum_offset, vh->hdr_len, 1804 vh->gso_size, vh->gso_type); 1805 1806 nmbuf += PTNET_HDR_SIZE; 1807 nmbuf_bytes += PTNET_HDR_SIZE; 1808 } 1809 1810 for (mf = mhead; mf; mf = mf->m_next) { 1811 uint8_t *mdata = mf->m_data; 1812 int mlen = mf->m_len; 1813 1814 for (;;) { 1815 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1816 1817 if (mlen < copy) { 1818 copy = mlen; 1819 } 1820 memcpy(nmbuf, mdata, copy); 1821 1822 mdata += copy; 1823 mlen -= copy; 1824 nmbuf += copy; 1825 nmbuf_bytes += copy; 1826 1827 if (!mlen) { 1828 break; 1829 } 1830 1831 slot->len = nmbuf_bytes; 1832 slot->flags = NS_MOREFRAG; 1833 1834 head = nm_next(head, lim); 1835 KASSERT(head != ring->tail, 1836 ("Unexpectedly run out of TX space")); 1837 slot = ring->slot + head; 1838 nmbuf = NMB(na, slot); 1839 nmbuf_bytes = 0; 1840 } 1841 } 1842 1843 /* Complete last slot and update head. */ 1844 slot->len = nmbuf_bytes; 1845 slot->flags = 0; 1846 head = nm_next(head, lim); 1847 1848 /* Consume the packet just processed. */ 1849 drbr_advance(ifp, pq->bufring); 1850 1851 /* Copy the packet to listeners. */ 1852 ETHER_BPF_MTAP(ifp, mhead); 1853 1854 pq->stats.packets ++; 1855 pq->stats.bytes += mhead->m_pkthdr.len; 1856 if (mhead->m_flags & M_MCAST) { 1857 pq->stats.mcasts ++; 1858 } 1859 1860 m_freem(mhead); 1861 1862 count ++; 1863 if (++batch_count == PTNET_TX_BATCH) { 1864 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1865 batch_count = 0; 1866 } 1867 } 1868 1869 if (batch_count) { 1870 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1871 } 1872 1873 if (count >= budget && may_resched) { 1874 DBG(RD(1, "out of budget: resched, %d mbufs pending\n", 1875 drbr_inuse(ifp, pq->bufring))); 1876 taskqueue_enqueue(pq->taskq, &pq->task); 1877 } 1878 1879 PTNET_Q_UNLOCK(pq); 1880 1881 return count; 1882 } 1883 1884 static int 1885 ptnet_transmit(if_t ifp, struct mbuf *m) 1886 { 1887 struct ptnet_softc *sc = if_getsoftc(ifp); 1888 struct ptnet_queue *pq; 1889 unsigned int queue_idx; 1890 int err; 1891 1892 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1893 1894 /* Insert 802.1Q header if needed. */ 1895 if (m->m_flags & M_VLANTAG) { 1896 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1897 if (m == NULL) { 1898 return ENOBUFS; 1899 } 1900 m->m_flags &= ~M_VLANTAG; 1901 } 1902 1903 /* Get the flow-id if available. */ 1904 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1905 m->m_pkthdr.flowid : curcpu; 1906 1907 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1908 queue_idx %= sc->num_tx_rings; 1909 } 1910 1911 pq = sc->queues + queue_idx; 1912 1913 err = drbr_enqueue(ifp, pq->bufring, m); 1914 if (err) { 1915 /* ENOBUFS when the bufring is full */ 1916 RD(1, "%s: drbr_enqueue() failed %d\n", 1917 __func__, err); 1918 pq->stats.errors ++; 1919 return err; 1920 } 1921 1922 if (ifp->if_capenable & IFCAP_POLLING) { 1923 /* If polling is on, the transmit queues will be 1924 * drained by the poller. */ 1925 return 0; 1926 } 1927 1928 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1929 1930 return (err < 0) ? err : 0; 1931 } 1932 1933 static unsigned int 1934 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1935 { 1936 struct netmap_ring *ring = kring->ring; 1937 struct netmap_slot *slot = ring->slot + head; 1938 1939 for (;;) { 1940 head = nm_next(head, kring->nkr_num_slots - 1); 1941 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1942 break; 1943 } 1944 slot = ring->slot + head; 1945 } 1946 1947 return head; 1948 } 1949 1950 static inline struct mbuf * 1951 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1952 { 1953 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1954 1955 do { 1956 unsigned int copy; 1957 1958 if (mtail->m_len == MCLBYTES) { 1959 struct mbuf *mf; 1960 1961 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1962 if (unlikely(!mf)) { 1963 return NULL; 1964 } 1965 1966 mtail->m_next = mf; 1967 mtail = mf; 1968 mdata = mtod(mtail, uint8_t *); 1969 mtail->m_len = 0; 1970 } 1971 1972 copy = MCLBYTES - mtail->m_len; 1973 if (nmbuf_len < copy) { 1974 copy = nmbuf_len; 1975 } 1976 1977 memcpy(mdata, nmbuf, copy); 1978 1979 nmbuf += copy; 1980 nmbuf_len -= copy; 1981 mdata += copy; 1982 mtail->m_len += copy; 1983 } while (nmbuf_len); 1984 1985 return mtail; 1986 } 1987 1988 static int 1989 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 1990 { 1991 struct ptnet_softc *sc = pq->sc; 1992 bool have_vnet_hdr = sc->vnet_hdr_len; 1993 struct ptnet_ring *ptring = pq->ptring; 1994 struct netmap_adapter *na = &sc->ptna->dr.up; 1995 struct netmap_kring *kring = na->rx_rings + pq->kring_id; 1996 struct netmap_ring *ring = kring->ring; 1997 unsigned int const lim = kring->nkr_num_slots - 1; 1998 unsigned int head = ring->head; 1999 unsigned int batch_count = 0; 2000 if_t ifp = sc->ifp; 2001 unsigned int count = 0; 2002 2003 PTNET_Q_LOCK(pq); 2004 2005 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2006 goto unlock; 2007 } 2008 2009 kring->nr_kflags &= ~NKR_PENDINTR; 2010 2011 while (count < budget) { 2012 unsigned int prev_head = head; 2013 struct mbuf *mhead, *mtail; 2014 struct virtio_net_hdr *vh; 2015 struct netmap_slot *slot; 2016 unsigned int nmbuf_len; 2017 uint8_t *nmbuf; 2018 host_sync: 2019 if (head == ring->tail) { 2020 /* We ran out of slot, let's see if the host has 2021 * added some, by reading hwcur and hwtail from 2022 * the CSB. */ 2023 ptnet_sync_tail(ptring, kring); 2024 2025 if (head == ring->tail) { 2026 /* Still no slots available. Reactivate 2027 * interrupts as they were disabled by the 2028 * host thread right before issuing the 2029 * last interrupt. */ 2030 ptring->guest_need_kick = 1; 2031 2032 /* Double-check. */ 2033 ptnet_sync_tail(ptring, kring); 2034 if (likely(head == ring->tail)) { 2035 break; 2036 } 2037 ptring->guest_need_kick = 0; 2038 } 2039 } 2040 2041 /* Initialize ring state variables, possibly grabbing the 2042 * virtio-net header. */ 2043 slot = ring->slot + head; 2044 nmbuf = NMB(na, slot); 2045 nmbuf_len = slot->len; 2046 2047 vh = (struct virtio_net_hdr *)nmbuf; 2048 if (have_vnet_hdr) { 2049 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 2050 /* There is no good reason why host should 2051 * put the header in multiple netmap slots. 2052 * If this is the case, discard. */ 2053 RD(1, "Fragmented vnet-hdr: dropping"); 2054 head = ptnet_rx_discard(kring, head); 2055 pq->stats.iqdrops ++; 2056 goto skip; 2057 } 2058 ND(1, "%s: vnet hdr: flags %x csum_start %u " 2059 "csum_ofs %u hdr_len = %u gso_size %u " 2060 "gso_type %x", __func__, vh->flags, 2061 vh->csum_start, vh->csum_offset, vh->hdr_len, 2062 vh->gso_size, vh->gso_type); 2063 nmbuf += PTNET_HDR_SIZE; 2064 nmbuf_len -= PTNET_HDR_SIZE; 2065 } 2066 2067 /* Allocate the head of a new mbuf chain. 2068 * We use m_getcl() to allocate an mbuf with standard cluster 2069 * size (MCLBYTES). In the future we could use m_getjcl() 2070 * to choose different sizes. */ 2071 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2072 if (unlikely(mhead == NULL)) { 2073 device_printf(sc->dev, "%s: failed to allocate mbuf " 2074 "head\n", __func__); 2075 pq->stats.errors ++; 2076 break; 2077 } 2078 2079 /* Initialize the mbuf state variables. */ 2080 mhead->m_pkthdr.len = nmbuf_len; 2081 mtail->m_len = 0; 2082 2083 /* Scan all the netmap slots containing the current packet. */ 2084 for (;;) { 2085 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 2086 "len %u, flags %u\n", __func__, 2087 head, ring->tail, slot->len, 2088 slot->flags)); 2089 2090 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 2091 if (unlikely(!mtail)) { 2092 /* Ouch. We ran out of memory while processing 2093 * a packet. We have to restore the previous 2094 * head position, free the mbuf chain, and 2095 * schedule the taskqueue to give the packet 2096 * another chance. */ 2097 device_printf(sc->dev, "%s: failed to allocate" 2098 " mbuf frag, reset head %u --> %u\n", 2099 __func__, head, prev_head); 2100 head = prev_head; 2101 m_freem(mhead); 2102 pq->stats.errors ++; 2103 if (may_resched) { 2104 taskqueue_enqueue(pq->taskq, 2105 &pq->task); 2106 } 2107 goto escape; 2108 } 2109 2110 /* We have to increment head irrespective of the 2111 * NS_MOREFRAG being set or not. */ 2112 head = nm_next(head, lim); 2113 2114 if (!(slot->flags & NS_MOREFRAG)) { 2115 break; 2116 } 2117 2118 if (unlikely(head == ring->tail)) { 2119 /* The very last slot prepared by the host has 2120 * the NS_MOREFRAG set. Drop it and continue 2121 * the outer cycle (to do the double-check). */ 2122 RD(1, "Incomplete packet: dropping"); 2123 m_freem(mhead); 2124 pq->stats.iqdrops ++; 2125 goto host_sync; 2126 } 2127 2128 slot = ring->slot + head; 2129 nmbuf = NMB(na, slot); 2130 nmbuf_len = slot->len; 2131 mhead->m_pkthdr.len += nmbuf_len; 2132 } 2133 2134 mhead->m_pkthdr.rcvif = ifp; 2135 mhead->m_pkthdr.csum_flags = 0; 2136 2137 /* Store the queue idx in the packet header. */ 2138 mhead->m_pkthdr.flowid = pq->kring_id; 2139 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 2140 2141 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 2142 struct ether_header *eh; 2143 2144 eh = mtod(mhead, struct ether_header *); 2145 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2146 ptnet_vlan_tag_remove(mhead); 2147 /* 2148 * With the 802.1Q header removed, update the 2149 * checksum starting location accordingly. 2150 */ 2151 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2152 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 2153 } 2154 } 2155 2156 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM 2157 | VIRTIO_NET_HDR_F_DATA_VALID))) { 2158 if (unlikely(ptnet_rx_csum(mhead, vh))) { 2159 m_freem(mhead); 2160 RD(1, "Csum offload error: dropping"); 2161 pq->stats.iqdrops ++; 2162 goto skip; 2163 } 2164 } 2165 2166 pq->stats.packets ++; 2167 pq->stats.bytes += mhead->m_pkthdr.len; 2168 2169 PTNET_Q_UNLOCK(pq); 2170 (*ifp->if_input)(ifp, mhead); 2171 PTNET_Q_LOCK(pq); 2172 2173 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2174 /* The interface has gone down while we didn't 2175 * have the lock. Stop any processing and exit. */ 2176 goto unlock; 2177 } 2178 skip: 2179 count ++; 2180 if (++batch_count == PTNET_RX_BATCH) { 2181 /* Some packets have been pushed to the network stack. 2182 * We need to update the CSB to tell the host about the new 2183 * ring->cur and ring->head (RX buffer refill). */ 2184 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2185 batch_count = 0; 2186 } 2187 } 2188 escape: 2189 if (batch_count) { 2190 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2191 2192 } 2193 2194 if (count >= budget && may_resched) { 2195 /* If we ran out of budget or the double-check found new 2196 * slots to process, schedule the taskqueue. */ 2197 DBG(RD(1, "out of budget: resched h %u t %u\n", 2198 head, ring->tail)); 2199 taskqueue_enqueue(pq->taskq, &pq->task); 2200 } 2201 unlock: 2202 PTNET_Q_UNLOCK(pq); 2203 2204 return count; 2205 } 2206 2207 static void 2208 ptnet_rx_task(void *context, int pending) 2209 { 2210 struct ptnet_queue *pq = context; 2211 2212 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2213 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 2214 } 2215 2216 static void 2217 ptnet_tx_task(void *context, int pending) 2218 { 2219 struct ptnet_queue *pq = context; 2220 2221 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2222 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 2223 } 2224 2225 #ifdef DEVICE_POLLING 2226 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 2227 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 2228 static int 2229 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 2230 { 2231 struct ptnet_softc *sc = if_getsoftc(ifp); 2232 unsigned int queue_budget; 2233 unsigned int count = 0; 2234 bool borrow = false; 2235 int i; 2236 2237 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 2238 queue_budget = MAX(budget / sc->num_rings, 1); 2239 RD(1, "Per-queue budget is %d", queue_budget); 2240 2241 while (budget) { 2242 unsigned int rcnt = 0; 2243 2244 for (i = 0; i < sc->num_rings; i++) { 2245 struct ptnet_queue *pq = sc->queues + i; 2246 2247 if (borrow) { 2248 queue_budget = MIN(queue_budget, budget); 2249 if (queue_budget == 0) { 2250 break; 2251 } 2252 } 2253 2254 if (i < sc->num_tx_rings) { 2255 rcnt += ptnet_drain_transmit_queue(pq, 2256 queue_budget, false); 2257 } else { 2258 rcnt += ptnet_rx_eof(pq, queue_budget, 2259 false); 2260 } 2261 } 2262 2263 if (!rcnt) { 2264 /* A scan of the queues gave no result, we can 2265 * stop here. */ 2266 break; 2267 } 2268 2269 if (rcnt > budget) { 2270 /* This may happen when initial budget < sc->num_rings, 2271 * since one packet budget is given to each queue 2272 * anyway. Just pretend we didn't eat "so much". */ 2273 rcnt = budget; 2274 } 2275 count += rcnt; 2276 budget -= rcnt; 2277 borrow = true; 2278 } 2279 2280 2281 return count; 2282 } 2283 #endif /* DEVICE_POLLING */ 2284