1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Driver for ptnet paravirtualized network device. */ 30 31 #include <sys/cdefs.h> 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/taskqueue.h> 46 #include <sys/smp.h> 47 #include <sys/time.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 #include <net/bpf.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet/udp.h> 70 #include <netinet/tcp.h> 71 #include <netinet/sctp.h> 72 73 #include <machine/bus.h> 74 #include <machine/resource.h> 75 #include <sys/bus.h> 76 #include <sys/rman.h> 77 78 #include <dev/pci/pcivar.h> 79 #include <dev/pci/pcireg.h> 80 81 #include "opt_inet.h" 82 #include "opt_inet6.h" 83 84 #include <sys/selinfo.h> 85 #include <net/netmap.h> 86 #include <dev/netmap/netmap_kern.h> 87 #include <net/netmap_virt.h> 88 #include <dev/netmap/netmap_mem2.h> 89 #include <dev/virtio/network/virtio_net.h> 90 91 #ifndef PTNET_CSB_ALLOC 92 #error "No support for on-device CSB" 93 #endif 94 95 #ifndef INET 96 #error "INET not defined, cannot support offloadings" 97 #endif 98 99 #if __FreeBSD_version >= 1100000 100 static uint64_t ptnet_get_counter(if_t, ift_counter); 101 #else 102 typedef struct ifnet *if_t; 103 #define if_getsoftc(_ifp) (_ifp)->if_softc 104 #endif 105 106 //#define PTNETMAP_STATS 107 //#define DEBUG 108 #ifdef DEBUG 109 #define DBG(x) x 110 #else /* !DEBUG */ 111 #define DBG(x) 112 #endif /* !DEBUG */ 113 114 extern int ptnet_vnet_hdr; /* Tunable parameter */ 115 116 struct ptnet_softc; 117 118 struct ptnet_queue_stats { 119 uint64_t packets; /* if_[io]packets */ 120 uint64_t bytes; /* if_[io]bytes */ 121 uint64_t errors; /* if_[io]errors */ 122 uint64_t iqdrops; /* if_iqdrops */ 123 uint64_t mcasts; /* if_[io]mcasts */ 124 #ifdef PTNETMAP_STATS 125 uint64_t intrs; 126 uint64_t kicks; 127 #endif /* PTNETMAP_STATS */ 128 }; 129 130 struct ptnet_queue { 131 struct ptnet_softc *sc; 132 struct resource *irq; 133 void *cookie; 134 int kring_id; 135 struct ptnet_ring *ptring; 136 unsigned int kick; 137 struct mtx lock; 138 struct buf_ring *bufring; /* for TX queues */ 139 struct ptnet_queue_stats stats; 140 #ifdef PTNETMAP_STATS 141 struct ptnet_queue_stats last_stats; 142 #endif /* PTNETMAP_STATS */ 143 struct taskqueue *taskq; 144 struct task task; 145 char lock_name[16]; 146 }; 147 148 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 149 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 150 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 151 152 struct ptnet_softc { 153 device_t dev; 154 if_t ifp; 155 struct ifmedia media; 156 struct mtx lock; 157 char lock_name[16]; 158 char hwaddr[ETHER_ADDR_LEN]; 159 160 /* Mirror of PTFEAT register. */ 161 uint32_t ptfeatures; 162 unsigned int vnet_hdr_len; 163 164 /* PCI BARs support. */ 165 struct resource *iomem; 166 struct resource *msix_mem; 167 168 unsigned int num_rings; 169 unsigned int num_tx_rings; 170 struct ptnet_queue *queues; 171 struct ptnet_queue *rxqueues; 172 struct ptnet_csb *csb; 173 174 unsigned int min_tx_space; 175 176 struct netmap_pt_guest_adapter *ptna; 177 178 struct callout tick; 179 #ifdef PTNETMAP_STATS 180 struct timeval last_ts; 181 #endif /* PTNETMAP_STATS */ 182 }; 183 184 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 185 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 186 187 static int ptnet_probe(device_t); 188 static int ptnet_attach(device_t); 189 static int ptnet_detach(device_t); 190 static int ptnet_suspend(device_t); 191 static int ptnet_resume(device_t); 192 static int ptnet_shutdown(device_t); 193 194 static void ptnet_init(void *opaque); 195 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 196 static int ptnet_init_locked(struct ptnet_softc *sc); 197 static int ptnet_stop(struct ptnet_softc *sc); 198 static int ptnet_transmit(if_t ifp, struct mbuf *m); 199 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 200 unsigned int budget, 201 bool may_resched); 202 static void ptnet_qflush(if_t ifp); 203 static void ptnet_tx_task(void *context, int pending); 204 205 static int ptnet_media_change(if_t ifp); 206 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 207 #ifdef PTNETMAP_STATS 208 static void ptnet_tick(void *opaque); 209 #endif 210 211 static int ptnet_irqs_init(struct ptnet_softc *sc); 212 static void ptnet_irqs_fini(struct ptnet_softc *sc); 213 214 static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); 215 static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, 216 unsigned *txd, unsigned *rxr, unsigned *rxd); 217 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 218 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 219 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 220 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 221 222 static void ptnet_tx_intr(void *opaque); 223 static void ptnet_rx_intr(void *opaque); 224 225 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 226 unsigned int head); 227 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 228 bool may_resched); 229 static void ptnet_rx_task(void *context, int pending); 230 231 #ifdef DEVICE_POLLING 232 static poll_handler_t ptnet_poll; 233 #endif 234 235 static device_method_t ptnet_methods[] = { 236 DEVMETHOD(device_probe, ptnet_probe), 237 DEVMETHOD(device_attach, ptnet_attach), 238 DEVMETHOD(device_detach, ptnet_detach), 239 DEVMETHOD(device_suspend, ptnet_suspend), 240 DEVMETHOD(device_resume, ptnet_resume), 241 DEVMETHOD(device_shutdown, ptnet_shutdown), 242 DEVMETHOD_END 243 }; 244 245 static driver_t ptnet_driver = { 246 "ptnet", 247 ptnet_methods, 248 sizeof(struct ptnet_softc) 249 }; 250 251 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 252 static devclass_t ptnet_devclass; 253 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 254 NULL, NULL, SI_ORDER_MIDDLE + 2); 255 256 static int 257 ptnet_probe(device_t dev) 258 { 259 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 260 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 261 return (ENXIO); 262 } 263 264 device_set_desc(dev, "ptnet network adapter"); 265 266 return (BUS_PROBE_DEFAULT); 267 } 268 269 static inline void ptnet_kick(struct ptnet_queue *pq) 270 { 271 #ifdef PTNETMAP_STATS 272 pq->stats.kicks ++; 273 #endif /* PTNETMAP_STATS */ 274 bus_write_4(pq->sc->iomem, pq->kick, 0); 275 } 276 277 #define PTNET_BUF_RING_SIZE 4096 278 #define PTNET_RX_BUDGET 512 279 #define PTNET_RX_BATCH 1 280 #define PTNET_TX_BUDGET 512 281 #define PTNET_TX_BATCH 64 282 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 283 #define PTNET_MAX_PKT_SIZE 65536 284 285 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) 286 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ 287 CSUM_SCTP_IPV6) 288 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 289 PTNET_CSUM_OFFLOAD_IPV6) 290 291 static int 292 ptnet_attach(device_t dev) 293 { 294 uint32_t ptfeatures = 0; 295 unsigned int num_rx_rings, num_tx_rings; 296 struct netmap_adapter na_arg; 297 unsigned int nifp_offset; 298 struct ptnet_softc *sc; 299 if_t ifp; 300 uint32_t macreg; 301 int err, rid; 302 int i; 303 304 sc = device_get_softc(dev); 305 sc->dev = dev; 306 307 /* Setup PCI resources. */ 308 pci_enable_busmaster(dev); 309 310 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 311 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 312 RF_ACTIVE); 313 if (sc->iomem == NULL) { 314 device_printf(dev, "Failed to map I/O BAR\n"); 315 return (ENXIO); 316 } 317 318 /* Negotiate features with the hypervisor. */ 319 if (ptnet_vnet_hdr) { 320 ptfeatures |= PTNETMAP_F_VNET_HDR; 321 } 322 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 323 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 324 sc->ptfeatures = ptfeatures; 325 326 /* Allocate CSB and carry out CSB allocation protocol (CSBBAH first, 327 * then CSBBAL). */ 328 sc->csb = malloc(sizeof(struct ptnet_csb), M_DEVBUF, 329 M_NOWAIT | M_ZERO); 330 if (sc->csb == NULL) { 331 device_printf(dev, "Failed to allocate CSB\n"); 332 err = ENOMEM; 333 goto err_path; 334 } 335 336 { 337 /* 338 * We use uint64_t rather than vm_paddr_t since we 339 * need 64 bit addresses even on 32 bit platforms. 340 */ 341 uint64_t paddr = vtophys(sc->csb); 342 343 bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 344 (paddr >> 32) & 0xffffffff); 345 bus_write_4(sc->iomem, PTNET_IO_CSBBAL, paddr & 0xffffffff); 346 } 347 348 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 349 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 350 sc->num_rings = num_tx_rings + num_rx_rings; 351 sc->num_tx_rings = num_tx_rings; 352 353 /* Allocate and initialize per-queue data structures. */ 354 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 355 M_DEVBUF, M_NOWAIT | M_ZERO); 356 if (sc->queues == NULL) { 357 err = ENOMEM; 358 goto err_path; 359 } 360 sc->rxqueues = sc->queues + num_tx_rings; 361 362 for (i = 0; i < sc->num_rings; i++) { 363 struct ptnet_queue *pq = sc->queues + i; 364 365 pq->sc = sc; 366 pq->kring_id = i; 367 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 368 pq->ptring = sc->csb->rings + i; 369 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 370 device_get_nameunit(dev), i); 371 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 372 if (i >= num_tx_rings) { 373 /* RX queue: fix kring_id. */ 374 pq->kring_id -= num_tx_rings; 375 } else { 376 /* TX queue: allocate buf_ring. */ 377 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 378 M_DEVBUF, M_NOWAIT, &pq->lock); 379 if (pq->bufring == NULL) { 380 err = ENOMEM; 381 goto err_path; 382 } 383 } 384 } 385 386 sc->min_tx_space = 64; /* Safe initial value. */ 387 388 err = ptnet_irqs_init(sc); 389 if (err) { 390 goto err_path; 391 } 392 393 /* Setup Ethernet interface. */ 394 sc->ifp = ifp = if_alloc(IFT_ETHER); 395 if (ifp == NULL) { 396 device_printf(dev, "Failed to allocate ifnet\n"); 397 err = ENOMEM; 398 goto err_path; 399 } 400 401 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 402 ifp->if_baudrate = IF_Gbps(10); 403 ifp->if_softc = sc; 404 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 405 ifp->if_init = ptnet_init; 406 ifp->if_ioctl = ptnet_ioctl; 407 #if __FreeBSD_version >= 1100000 408 ifp->if_get_counter = ptnet_get_counter; 409 #endif 410 ifp->if_transmit = ptnet_transmit; 411 ifp->if_qflush = ptnet_qflush; 412 413 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 414 ptnet_media_status); 415 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 416 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 417 418 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 419 sc->hwaddr[0] = (macreg >> 8) & 0xff; 420 sc->hwaddr[1] = macreg & 0xff; 421 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 422 sc->hwaddr[2] = (macreg >> 24) & 0xff; 423 sc->hwaddr[3] = (macreg >> 16) & 0xff; 424 sc->hwaddr[4] = (macreg >> 8) & 0xff; 425 sc->hwaddr[5] = macreg & 0xff; 426 427 ether_ifattach(ifp, sc->hwaddr); 428 429 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 430 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 431 432 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 433 /* Similarly to what the vtnet driver does, we can emulate 434 * VLAN offloadings by inserting and removing the 802.1Q 435 * header during transmit and receive. We are then able 436 * to do checksum offloading of VLAN frames. */ 437 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 438 | IFCAP_VLAN_HWCSUM 439 | IFCAP_TSO | IFCAP_LRO 440 | IFCAP_VLAN_HWTSO 441 | IFCAP_VLAN_HWTAGGING; 442 } 443 444 ifp->if_capenable = ifp->if_capabilities; 445 #ifdef DEVICE_POLLING 446 /* Don't enable polling by default. */ 447 ifp->if_capabilities |= IFCAP_POLLING; 448 #endif 449 snprintf(sc->lock_name, sizeof(sc->lock_name), 450 "%s", device_get_nameunit(dev)); 451 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 452 callout_init_mtx(&sc->tick, &sc->lock, 0); 453 454 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 455 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 456 memset(&na_arg, 0, sizeof(na_arg)); 457 na_arg.ifp = ifp; 458 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 459 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 460 na_arg.num_tx_rings = num_tx_rings; 461 na_arg.num_rx_rings = num_rx_rings; 462 na_arg.nm_config = ptnet_nm_config; 463 na_arg.nm_krings_create = ptnet_nm_krings_create; 464 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 465 na_arg.nm_dtor = ptnet_nm_dtor; 466 na_arg.nm_register = ptnet_nm_register; 467 na_arg.nm_txsync = ptnet_nm_txsync; 468 na_arg.nm_rxsync = ptnet_nm_rxsync; 469 470 netmap_pt_guest_attach(&na_arg, sc->csb, nifp_offset, 471 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 472 473 /* Now a netmap adapter for this ifp has been allocated, and it 474 * can be accessed through NA(ifp). We also have to initialize the CSB 475 * pointer. */ 476 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 477 478 /* If virtio-net header was negotiated, set the virt_hdr_len field in 479 * the netmap adapter, to inform users that this netmap adapter requires 480 * the application to deal with the headers. */ 481 ptnet_update_vnet_hdr(sc); 482 483 device_printf(dev, "%s() completed\n", __func__); 484 485 return (0); 486 487 err_path: 488 ptnet_detach(dev); 489 return err; 490 } 491 492 static int 493 ptnet_detach(device_t dev) 494 { 495 struct ptnet_softc *sc = device_get_softc(dev); 496 int i; 497 498 #ifdef DEVICE_POLLING 499 if (sc->ifp->if_capenable & IFCAP_POLLING) { 500 ether_poll_deregister(sc->ifp); 501 } 502 #endif 503 callout_drain(&sc->tick); 504 505 if (sc->queues) { 506 /* Drain taskqueues before calling if_detach. */ 507 for (i = 0; i < sc->num_rings; i++) { 508 struct ptnet_queue *pq = sc->queues + i; 509 510 if (pq->taskq) { 511 taskqueue_drain(pq->taskq, &pq->task); 512 } 513 } 514 } 515 516 if (sc->ifp) { 517 ether_ifdetach(sc->ifp); 518 519 /* Uninitialize netmap adapters for this device. */ 520 netmap_detach(sc->ifp); 521 522 ifmedia_removeall(&sc->media); 523 if_free(sc->ifp); 524 sc->ifp = NULL; 525 } 526 527 ptnet_irqs_fini(sc); 528 529 if (sc->csb) { 530 bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 0); 531 bus_write_4(sc->iomem, PTNET_IO_CSBBAL, 0); 532 free(sc->csb, M_DEVBUF); 533 sc->csb = NULL; 534 } 535 536 if (sc->queues) { 537 for (i = 0; i < sc->num_rings; i++) { 538 struct ptnet_queue *pq = sc->queues + i; 539 540 if (mtx_initialized(&pq->lock)) { 541 mtx_destroy(&pq->lock); 542 } 543 if (pq->bufring != NULL) { 544 buf_ring_free(pq->bufring, M_DEVBUF); 545 } 546 } 547 free(sc->queues, M_DEVBUF); 548 sc->queues = NULL; 549 } 550 551 if (sc->iomem) { 552 bus_release_resource(dev, SYS_RES_IOPORT, 553 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 554 sc->iomem = NULL; 555 } 556 557 mtx_destroy(&sc->lock); 558 559 device_printf(dev, "%s() completed\n", __func__); 560 561 return (0); 562 } 563 564 static int 565 ptnet_suspend(device_t dev) 566 { 567 struct ptnet_softc *sc; 568 569 sc = device_get_softc(dev); 570 (void)sc; 571 572 return (0); 573 } 574 575 static int 576 ptnet_resume(device_t dev) 577 { 578 struct ptnet_softc *sc; 579 580 sc = device_get_softc(dev); 581 (void)sc; 582 583 return (0); 584 } 585 586 static int 587 ptnet_shutdown(device_t dev) 588 { 589 /* 590 * Suspend already does all of what we need to 591 * do here; we just never expect to be resumed. 592 */ 593 return (ptnet_suspend(dev)); 594 } 595 596 static int 597 ptnet_irqs_init(struct ptnet_softc *sc) 598 { 599 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 600 int nvecs = sc->num_rings; 601 device_t dev = sc->dev; 602 int err = ENOSPC; 603 int cpu_cur; 604 int i; 605 606 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 607 device_printf(dev, "Could not find MSI-X capability\n"); 608 return (ENXIO); 609 } 610 611 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 612 &rid, RF_ACTIVE); 613 if (sc->msix_mem == NULL) { 614 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 615 return (ENXIO); 616 } 617 618 if (pci_msix_count(dev) < nvecs) { 619 device_printf(dev, "Not enough MSI-X vectors\n"); 620 goto err_path; 621 } 622 623 err = pci_alloc_msix(dev, &nvecs); 624 if (err) { 625 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 626 goto err_path; 627 } 628 629 for (i = 0; i < nvecs; i++) { 630 struct ptnet_queue *pq = sc->queues + i; 631 632 rid = i + 1; 633 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 634 RF_ACTIVE); 635 if (pq->irq == NULL) { 636 device_printf(dev, "Failed to allocate interrupt " 637 "for queue #%d\n", i); 638 err = ENOSPC; 639 goto err_path; 640 } 641 } 642 643 cpu_cur = CPU_FIRST(); 644 for (i = 0; i < nvecs; i++) { 645 struct ptnet_queue *pq = sc->queues + i; 646 void (*handler)(void *) = ptnet_tx_intr; 647 648 if (i >= sc->num_tx_rings) { 649 handler = ptnet_rx_intr; 650 } 651 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 652 NULL /* intr_filter */, handler, 653 pq, &pq->cookie); 654 if (err) { 655 device_printf(dev, "Failed to register intr handler " 656 "for queue #%d\n", i); 657 goto err_path; 658 } 659 660 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 661 #if 0 662 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 663 #endif 664 cpu_cur = CPU_NEXT(cpu_cur); 665 } 666 667 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 668 669 cpu_cur = CPU_FIRST(); 670 for (i = 0; i < nvecs; i++) { 671 struct ptnet_queue *pq = sc->queues + i; 672 static void (*handler)(void *context, int pending); 673 674 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; 675 676 TASK_INIT(&pq->task, 0, handler, pq); 677 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 678 taskqueue_thread_enqueue, &pq->taskq); 679 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 680 device_get_nameunit(sc->dev), cpu_cur); 681 cpu_cur = CPU_NEXT(cpu_cur); 682 } 683 684 return 0; 685 err_path: 686 ptnet_irqs_fini(sc); 687 return err; 688 } 689 690 static void 691 ptnet_irqs_fini(struct ptnet_softc *sc) 692 { 693 device_t dev = sc->dev; 694 int i; 695 696 for (i = 0; i < sc->num_rings; i++) { 697 struct ptnet_queue *pq = sc->queues + i; 698 699 if (pq->taskq) { 700 taskqueue_free(pq->taskq); 701 pq->taskq = NULL; 702 } 703 704 if (pq->cookie) { 705 bus_teardown_intr(dev, pq->irq, pq->cookie); 706 pq->cookie = NULL; 707 } 708 709 if (pq->irq) { 710 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 711 pq->irq = NULL; 712 } 713 } 714 715 if (sc->msix_mem) { 716 pci_release_msi(dev); 717 718 bus_release_resource(dev, SYS_RES_MEMORY, 719 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 720 sc->msix_mem); 721 sc->msix_mem = NULL; 722 } 723 } 724 725 static void 726 ptnet_init(void *opaque) 727 { 728 struct ptnet_softc *sc = opaque; 729 730 PTNET_CORE_LOCK(sc); 731 ptnet_init_locked(sc); 732 PTNET_CORE_UNLOCK(sc); 733 } 734 735 static int 736 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 737 { 738 struct ptnet_softc *sc = if_getsoftc(ifp); 739 device_t dev = sc->dev; 740 struct ifreq *ifr = (struct ifreq *)data; 741 int mask, err = 0; 742 743 switch (cmd) { 744 case SIOCSIFFLAGS: 745 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 746 PTNET_CORE_LOCK(sc); 747 if (ifp->if_flags & IFF_UP) { 748 /* Network stack wants the iff to be up. */ 749 err = ptnet_init_locked(sc); 750 } else { 751 /* Network stack wants the iff to be down. */ 752 err = ptnet_stop(sc); 753 } 754 /* We don't need to do nothing to support IFF_PROMISC, 755 * since that is managed by the backend port. */ 756 PTNET_CORE_UNLOCK(sc); 757 break; 758 759 case SIOCSIFCAP: 760 device_printf(dev, "SIOCSIFCAP %x %x\n", 761 ifr->ifr_reqcap, ifp->if_capenable); 762 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 763 #ifdef DEVICE_POLLING 764 if (mask & IFCAP_POLLING) { 765 struct ptnet_queue *pq; 766 int i; 767 768 if (ifr->ifr_reqcap & IFCAP_POLLING) { 769 err = ether_poll_register(ptnet_poll, ifp); 770 if (err) { 771 break; 772 } 773 /* Stop queues and sync with taskqueues. */ 774 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 775 for (i = 0; i < sc->num_rings; i++) { 776 pq = sc-> queues + i; 777 /* Make sure the worker sees the 778 * IFF_DRV_RUNNING down. */ 779 PTNET_Q_LOCK(pq); 780 pq->ptring->guest_need_kick = 0; 781 PTNET_Q_UNLOCK(pq); 782 /* Wait for rescheduling to finish. */ 783 if (pq->taskq) { 784 taskqueue_drain(pq->taskq, 785 &pq->task); 786 } 787 } 788 ifp->if_drv_flags |= IFF_DRV_RUNNING; 789 } else { 790 err = ether_poll_deregister(ifp); 791 for (i = 0; i < sc->num_rings; i++) { 792 pq = sc-> queues + i; 793 PTNET_Q_LOCK(pq); 794 pq->ptring->guest_need_kick = 1; 795 PTNET_Q_UNLOCK(pq); 796 } 797 } 798 } 799 #endif /* DEVICE_POLLING */ 800 ifp->if_capenable = ifr->ifr_reqcap; 801 break; 802 803 case SIOCSIFMTU: 804 /* We support any reasonable MTU. */ 805 if (ifr->ifr_mtu < ETHERMIN || 806 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 807 err = EINVAL; 808 } else { 809 PTNET_CORE_LOCK(sc); 810 ifp->if_mtu = ifr->ifr_mtu; 811 PTNET_CORE_UNLOCK(sc); 812 } 813 break; 814 815 case SIOCSIFMEDIA: 816 case SIOCGIFMEDIA: 817 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 818 break; 819 820 default: 821 err = ether_ioctl(ifp, cmd, data); 822 break; 823 } 824 825 return err; 826 } 827 828 static int 829 ptnet_init_locked(struct ptnet_softc *sc) 830 { 831 if_t ifp = sc->ifp; 832 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 833 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 834 unsigned int nm_buf_size; 835 int ret; 836 837 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 838 return 0; /* nothing to do */ 839 } 840 841 device_printf(sc->dev, "%s\n", __func__); 842 843 /* Translate offload capabilities according to if_capenable. */ 844 ifp->if_hwassist = 0; 845 if (ifp->if_capenable & IFCAP_TXCSUM) 846 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 847 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 848 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 849 if (ifp->if_capenable & IFCAP_TSO4) 850 ifp->if_hwassist |= CSUM_IP_TSO; 851 if (ifp->if_capenable & IFCAP_TSO6) 852 ifp->if_hwassist |= CSUM_IP6_TSO; 853 854 /* 855 * Prepare the interface for netmap mode access. 856 */ 857 netmap_update_config(na_dr); 858 859 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 860 if (ret) { 861 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 862 return ret; 863 } 864 865 if (sc->ptna->backend_regifs == 0) { 866 ret = ptnet_nm_krings_create(na_nm); 867 if (ret) { 868 device_printf(sc->dev, "ptnet_nm_krings_create() " 869 "failed\n"); 870 goto err_mem_finalize; 871 } 872 873 ret = netmap_mem_rings_create(na_dr); 874 if (ret) { 875 device_printf(sc->dev, "netmap_mem_rings_create() " 876 "failed\n"); 877 goto err_rings_create; 878 } 879 880 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 881 if (ret) { 882 device_printf(sc->dev, "netmap_mem_get_lut() " 883 "failed\n"); 884 goto err_get_lut; 885 } 886 } 887 888 ret = ptnet_nm_register(na_dr, 1 /* on */); 889 if (ret) { 890 goto err_register; 891 } 892 893 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 894 895 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 896 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 897 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 898 sc->min_tx_space); 899 #ifdef PTNETMAP_STATS 900 callout_reset(&sc->tick, hz, ptnet_tick, sc); 901 #endif 902 903 ifp->if_drv_flags |= IFF_DRV_RUNNING; 904 905 return 0; 906 907 err_register: 908 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 909 err_get_lut: 910 netmap_mem_rings_delete(na_dr); 911 err_rings_create: 912 ptnet_nm_krings_delete(na_nm); 913 err_mem_finalize: 914 netmap_mem_deref(na_dr->nm_mem, na_dr); 915 916 return ret; 917 } 918 919 /* To be called under core lock. */ 920 static int 921 ptnet_stop(struct ptnet_softc *sc) 922 { 923 if_t ifp = sc->ifp; 924 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 925 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 926 int i; 927 928 device_printf(sc->dev, "%s\n", __func__); 929 930 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 931 return 0; /* nothing to do */ 932 } 933 934 /* Clear the driver-ready flag, and synchronize with all the queues, 935 * so that after this loop we are sure nobody is working anymore with 936 * the device. This scheme is taken from the vtnet driver. */ 937 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 938 callout_stop(&sc->tick); 939 for (i = 0; i < sc->num_rings; i++) { 940 PTNET_Q_LOCK(sc->queues + i); 941 PTNET_Q_UNLOCK(sc->queues + i); 942 } 943 944 ptnet_nm_register(na_dr, 0 /* off */); 945 946 if (sc->ptna->backend_regifs == 0) { 947 netmap_mem_rings_delete(na_dr); 948 ptnet_nm_krings_delete(na_nm); 949 } 950 netmap_mem_deref(na_dr->nm_mem, na_dr); 951 952 return 0; 953 } 954 955 static void 956 ptnet_qflush(if_t ifp) 957 { 958 struct ptnet_softc *sc = if_getsoftc(ifp); 959 int i; 960 961 /* Flush all the bufrings and do the interface flush. */ 962 for (i = 0; i < sc->num_rings; i++) { 963 struct ptnet_queue *pq = sc->queues + i; 964 struct mbuf *m; 965 966 PTNET_Q_LOCK(pq); 967 if (pq->bufring) { 968 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 969 m_freem(m); 970 } 971 } 972 PTNET_Q_UNLOCK(pq); 973 } 974 975 if_qflush(ifp); 976 } 977 978 static int 979 ptnet_media_change(if_t ifp) 980 { 981 struct ptnet_softc *sc = if_getsoftc(ifp); 982 struct ifmedia *ifm = &sc->media; 983 984 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 985 return EINVAL; 986 } 987 988 return 0; 989 } 990 991 #if __FreeBSD_version >= 1100000 992 static uint64_t 993 ptnet_get_counter(if_t ifp, ift_counter cnt) 994 { 995 struct ptnet_softc *sc = if_getsoftc(ifp); 996 struct ptnet_queue_stats stats[2]; 997 int i; 998 999 /* Accumulate statistics over the queues. */ 1000 memset(stats, 0, sizeof(stats)); 1001 for (i = 0; i < sc->num_rings; i++) { 1002 struct ptnet_queue *pq = sc->queues + i; 1003 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1004 1005 stats[idx].packets += pq->stats.packets; 1006 stats[idx].bytes += pq->stats.bytes; 1007 stats[idx].errors += pq->stats.errors; 1008 stats[idx].iqdrops += pq->stats.iqdrops; 1009 stats[idx].mcasts += pq->stats.mcasts; 1010 } 1011 1012 switch (cnt) { 1013 case IFCOUNTER_IPACKETS: 1014 return (stats[1].packets); 1015 case IFCOUNTER_IQDROPS: 1016 return (stats[1].iqdrops); 1017 case IFCOUNTER_IERRORS: 1018 return (stats[1].errors); 1019 case IFCOUNTER_OPACKETS: 1020 return (stats[0].packets); 1021 case IFCOUNTER_OBYTES: 1022 return (stats[0].bytes); 1023 case IFCOUNTER_OMCASTS: 1024 return (stats[0].mcasts); 1025 default: 1026 return (if_get_counter_default(ifp, cnt)); 1027 } 1028 } 1029 #endif 1030 1031 1032 #ifdef PTNETMAP_STATS 1033 /* Called under core lock. */ 1034 static void 1035 ptnet_tick(void *opaque) 1036 { 1037 struct ptnet_softc *sc = opaque; 1038 int i; 1039 1040 for (i = 0; i < sc->num_rings; i++) { 1041 struct ptnet_queue *pq = sc->queues + i; 1042 struct ptnet_queue_stats cur = pq->stats; 1043 struct timeval now; 1044 unsigned int delta; 1045 1046 microtime(&now); 1047 delta = now.tv_usec - sc->last_ts.tv_usec + 1048 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1049 delta /= 1000; /* in milliseconds */ 1050 1051 if (delta == 0) 1052 continue; 1053 1054 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1055 "intr %lu\n", i, delta, 1056 (cur.packets - pq->last_stats.packets), 1057 (cur.kicks - pq->last_stats.kicks), 1058 (cur.intrs - pq->last_stats.intrs)); 1059 pq->last_stats = cur; 1060 } 1061 microtime(&sc->last_ts); 1062 callout_schedule(&sc->tick, hz); 1063 } 1064 #endif /* PTNETMAP_STATS */ 1065 1066 static void 1067 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1068 { 1069 /* We are always active, as the backend netmap port is 1070 * always open in netmap mode. */ 1071 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1072 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1073 } 1074 1075 static uint32_t 1076 ptnet_nm_ptctl(if_t ifp, uint32_t cmd) 1077 { 1078 struct ptnet_softc *sc = if_getsoftc(ifp); 1079 /* 1080 * Write a command and read back error status, 1081 * with zero meaning success. 1082 */ 1083 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1084 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1085 } 1086 1087 static int 1088 ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd, 1089 unsigned *rxr, unsigned *rxd) 1090 { 1091 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1092 1093 *txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1094 *rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1095 *txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1096 *rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1097 1098 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n", 1099 *txr, *rxr, *txd, *rxd); 1100 1101 return 0; 1102 } 1103 1104 static void 1105 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1106 { 1107 int i; 1108 1109 /* Sync krings from the host, reading from 1110 * CSB. */ 1111 for (i = 0; i < sc->num_rings; i++) { 1112 struct ptnet_ring *ptring = sc->queues[i].ptring; 1113 struct netmap_kring *kring; 1114 1115 if (i < na->num_tx_rings) { 1116 kring = na->tx_rings + i; 1117 } else { 1118 kring = na->rx_rings + i - na->num_tx_rings; 1119 } 1120 kring->rhead = kring->ring->head = ptring->head; 1121 kring->rcur = kring->ring->cur = ptring->cur; 1122 kring->nr_hwcur = ptring->hwcur; 1123 kring->nr_hwtail = kring->rtail = 1124 kring->ring->tail = ptring->hwtail; 1125 1126 ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1127 ptring->hwcur, ptring->head, ptring->cur, 1128 ptring->hwtail); 1129 ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1130 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1131 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1132 kring->rtail, kring->ring->tail); 1133 } 1134 } 1135 1136 static void 1137 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1138 { 1139 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1140 1141 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1142 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1143 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1144 } 1145 1146 static int 1147 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1148 { 1149 /* device-specific */ 1150 if_t ifp = na->ifp; 1151 struct ptnet_softc *sc = if_getsoftc(ifp); 1152 int native = (na == &sc->ptna->hwup.up); 1153 struct ptnet_queue *pq; 1154 enum txrx t; 1155 int ret = 0; 1156 int i; 1157 1158 if (!onoff) { 1159 sc->ptna->backend_regifs--; 1160 } 1161 1162 /* If this is the last netmap client, guest interrupt enable flags may 1163 * be in arbitrary state. Since these flags are going to be used also 1164 * by the netdevice driver, we have to make sure to start with 1165 * notifications enabled. Also, schedule NAPI to flush pending packets 1166 * in the RX rings, since we will not receive further interrupts 1167 * until these will be processed. */ 1168 if (native && !onoff && na->active_fds == 0) { 1169 D("Exit netmap mode, re-enable interrupts"); 1170 for (i = 0; i < sc->num_rings; i++) { 1171 pq = sc->queues + i; 1172 pq->ptring->guest_need_kick = 1; 1173 } 1174 } 1175 1176 if (onoff) { 1177 if (sc->ptna->backend_regifs == 0) { 1178 /* Initialize notification enable fields in the CSB. */ 1179 for (i = 0; i < sc->num_rings; i++) { 1180 pq = sc->queues + i; 1181 pq->ptring->host_need_kick = 1; 1182 pq->ptring->guest_need_kick = 1183 (!(ifp->if_capenable & IFCAP_POLLING) 1184 && i >= sc->num_tx_rings); 1185 } 1186 1187 /* Set the virtio-net header length. */ 1188 ptnet_update_vnet_hdr(sc); 1189 1190 /* Make sure the host adapter passed through is ready 1191 * for txsync/rxsync. */ 1192 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE); 1193 if (ret) { 1194 return ret; 1195 } 1196 } 1197 1198 /* Sync from CSB must be done after REGIF PTCTL. Skip this 1199 * step only if this is a netmap client and it is not the 1200 * first one. */ 1201 if ((!native && sc->ptna->backend_regifs == 0) || 1202 (native && na->active_fds == 0)) { 1203 ptnet_sync_from_csb(sc, na); 1204 } 1205 1206 /* If not native, don't call nm_set_native_flags, since we don't want 1207 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1208 if (native) { 1209 for_rx_tx(t) { 1210 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1211 struct netmap_kring *kring = &NMR(na, t)[i]; 1212 1213 if (nm_kring_pending_on(kring)) { 1214 kring->nr_mode = NKR_NETMAP_ON; 1215 } 1216 } 1217 } 1218 nm_set_native_flags(na); 1219 } 1220 1221 } else { 1222 if (native) { 1223 nm_clear_native_flags(na); 1224 for_rx_tx(t) { 1225 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1226 struct netmap_kring *kring = &NMR(na, t)[i]; 1227 1228 if (nm_kring_pending_off(kring)) { 1229 kring->nr_mode = NKR_NETMAP_OFF; 1230 } 1231 } 1232 } 1233 } 1234 1235 /* Sync from CSB must be done before UNREGIF PTCTL, on the last 1236 * netmap client. */ 1237 if (native && na->active_fds == 0) { 1238 ptnet_sync_from_csb(sc, na); 1239 } 1240 1241 if (sc->ptna->backend_regifs == 0) { 1242 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE); 1243 } 1244 } 1245 1246 if (onoff) { 1247 sc->ptna->backend_regifs++; 1248 } 1249 1250 return ret; 1251 } 1252 1253 static int 1254 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1255 { 1256 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1257 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1258 bool notify; 1259 1260 notify = netmap_pt_guest_txsync(pq->ptring, kring, flags); 1261 if (notify) { 1262 ptnet_kick(pq); 1263 } 1264 1265 return 0; 1266 } 1267 1268 static int 1269 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1270 { 1271 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1272 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1273 bool notify; 1274 1275 notify = netmap_pt_guest_rxsync(pq->ptring, kring, flags); 1276 if (notify) { 1277 ptnet_kick(pq); 1278 } 1279 1280 return 0; 1281 } 1282 1283 static void 1284 ptnet_tx_intr(void *opaque) 1285 { 1286 struct ptnet_queue *pq = opaque; 1287 struct ptnet_softc *sc = pq->sc; 1288 1289 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1290 #ifdef PTNETMAP_STATS 1291 pq->stats.intrs ++; 1292 #endif /* PTNETMAP_STATS */ 1293 1294 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1295 return; 1296 } 1297 1298 /* Schedule the tasqueue to flush process transmissions requests. 1299 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1300 * at least when using MSI-X interrupts. The if_em driver, instead 1301 * schedule taskqueue when using legacy interrupts. */ 1302 taskqueue_enqueue(pq->taskq, &pq->task); 1303 } 1304 1305 static void 1306 ptnet_rx_intr(void *opaque) 1307 { 1308 struct ptnet_queue *pq = opaque; 1309 struct ptnet_softc *sc = pq->sc; 1310 unsigned int unused; 1311 1312 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1313 #ifdef PTNETMAP_STATS 1314 pq->stats.intrs ++; 1315 #endif /* PTNETMAP_STATS */ 1316 1317 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1318 return; 1319 } 1320 1321 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1322 * receive-side processing is executed directly in the interrupt 1323 * service routine. Alternatively, we may schedule the taskqueue. */ 1324 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1325 } 1326 1327 /* The following offloadings-related functions are taken from the vtnet 1328 * driver, but the same functionality is required for the ptnet driver. 1329 * As a temporary solution, I copied this code from vtnet and I started 1330 * to generalize it (taking away driver-specific statistic accounting), 1331 * making as little modifications as possible. 1332 * In the future we need to share these functions between vtnet and ptnet. 1333 */ 1334 static int 1335 ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 1336 { 1337 struct ether_vlan_header *evh; 1338 int offset; 1339 1340 evh = mtod(m, struct ether_vlan_header *); 1341 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1342 /* BMV: We should handle nested VLAN tags too. */ 1343 *etype = ntohs(evh->evl_proto); 1344 offset = sizeof(struct ether_vlan_header); 1345 } else { 1346 *etype = ntohs(evh->evl_encap_proto); 1347 offset = sizeof(struct ether_header); 1348 } 1349 1350 switch (*etype) { 1351 #if defined(INET) 1352 case ETHERTYPE_IP: { 1353 struct ip *ip, iphdr; 1354 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1355 m_copydata(m, offset, sizeof(struct ip), 1356 (caddr_t) &iphdr); 1357 ip = &iphdr; 1358 } else 1359 ip = (struct ip *)(m->m_data + offset); 1360 *proto = ip->ip_p; 1361 *start = offset + (ip->ip_hl << 2); 1362 break; 1363 } 1364 #endif 1365 #if defined(INET6) 1366 case ETHERTYPE_IPV6: 1367 *proto = -1; 1368 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 1369 /* Assert the network stack sent us a valid packet. */ 1370 KASSERT(*start > offset, 1371 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 1372 *start, offset, *proto)); 1373 break; 1374 #endif 1375 default: 1376 /* Here we should increment the tx_csum_bad_ethtype counter. */ 1377 return (EINVAL); 1378 } 1379 1380 return (0); 1381 } 1382 1383 static int 1384 ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, 1385 int offset, bool allow_ecn, struct virtio_net_hdr *hdr) 1386 { 1387 static struct timeval lastecn; 1388 static int curecn; 1389 struct tcphdr *tcp, tcphdr; 1390 1391 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 1392 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 1393 tcp = &tcphdr; 1394 } else 1395 tcp = (struct tcphdr *)(m->m_data + offset); 1396 1397 hdr->hdr_len = offset + (tcp->th_off << 2); 1398 hdr->gso_size = m->m_pkthdr.tso_segsz; 1399 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 1400 VIRTIO_NET_HDR_GSO_TCPV6; 1401 1402 if (tcp->th_flags & TH_CWR) { 1403 /* 1404 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 1405 * ECN support is not on a per-interface basis, but globally via 1406 * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 1407 */ 1408 if (!allow_ecn) { 1409 if (ppsratecheck(&lastecn, &curecn, 1)) 1410 if_printf(ifp, 1411 "TSO with ECN not negotiated with host\n"); 1412 return (ENOTSUP); 1413 } 1414 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1415 } 1416 1417 /* Here we should increment tx_tso counter. */ 1418 1419 return (0); 1420 } 1421 1422 static struct mbuf * 1423 ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, 1424 struct virtio_net_hdr *hdr) 1425 { 1426 int flags, etype, csum_start, proto, error; 1427 1428 flags = m->m_pkthdr.csum_flags; 1429 1430 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); 1431 if (error) 1432 goto drop; 1433 1434 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || 1435 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { 1436 /* 1437 * We could compare the IP protocol vs the CSUM_ flag too, 1438 * but that really should not be necessary. 1439 */ 1440 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1441 hdr->csum_start = csum_start; 1442 hdr->csum_offset = m->m_pkthdr.csum_data; 1443 /* Here we should increment the tx_csum counter. */ 1444 } 1445 1446 if (flags & CSUM_TSO) { 1447 if (__predict_false(proto != IPPROTO_TCP)) { 1448 /* Likely failed to correctly parse the mbuf. 1449 * Here we should increment the tx_tso_not_tcp 1450 * counter. */ 1451 goto drop; 1452 } 1453 1454 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 1455 ("%s: mbuf %p TSO without checksum offload %#x", 1456 __func__, m, flags)); 1457 1458 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, 1459 allow_ecn, hdr); 1460 if (error) 1461 goto drop; 1462 } 1463 1464 return (m); 1465 1466 drop: 1467 m_freem(m); 1468 return (NULL); 1469 } 1470 1471 static void 1472 ptnet_vlan_tag_remove(struct mbuf *m) 1473 { 1474 struct ether_vlan_header *evh; 1475 1476 evh = mtod(m, struct ether_vlan_header *); 1477 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1478 m->m_flags |= M_VLANTAG; 1479 1480 /* Strip the 802.1Q header. */ 1481 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1482 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1483 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1484 } 1485 1486 /* 1487 * Use the checksum offset in the VirtIO header to set the 1488 * correct CSUM_* flags. 1489 */ 1490 static int 1491 ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, 1492 struct virtio_net_hdr *hdr) 1493 { 1494 #if defined(INET) || defined(INET6) 1495 int offset = hdr->csum_start + hdr->csum_offset; 1496 #endif 1497 1498 /* Only do a basic sanity check on the offset. */ 1499 switch (eth_type) { 1500 #if defined(INET) 1501 case ETHERTYPE_IP: 1502 if (__predict_false(offset < ip_start + sizeof(struct ip))) 1503 return (1); 1504 break; 1505 #endif 1506 #if defined(INET6) 1507 case ETHERTYPE_IPV6: 1508 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1509 return (1); 1510 break; 1511 #endif 1512 default: 1513 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1514 return (1); 1515 } 1516 1517 /* 1518 * Use the offset to determine the appropriate CSUM_* flags. This is 1519 * a bit dirty, but we can get by with it since the checksum offsets 1520 * happen to be different. We assume the host host does not do IPv4 1521 * header checksum offloading. 1522 */ 1523 switch (hdr->csum_offset) { 1524 case offsetof(struct udphdr, uh_sum): 1525 case offsetof(struct tcphdr, th_sum): 1526 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1527 m->m_pkthdr.csum_data = 0xFFFF; 1528 break; 1529 case offsetof(struct sctphdr, checksum): 1530 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1531 break; 1532 default: 1533 /* Here we should increment the rx_csum_bad_offset counter. */ 1534 return (1); 1535 } 1536 1537 return (0); 1538 } 1539 1540 static int 1541 ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, 1542 struct virtio_net_hdr *hdr) 1543 { 1544 int offset, proto; 1545 1546 switch (eth_type) { 1547 #if defined(INET) 1548 case ETHERTYPE_IP: { 1549 struct ip *ip; 1550 if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1551 return (1); 1552 ip = (struct ip *)(m->m_data + ip_start); 1553 proto = ip->ip_p; 1554 offset = ip_start + (ip->ip_hl << 2); 1555 break; 1556 } 1557 #endif 1558 #if defined(INET6) 1559 case ETHERTYPE_IPV6: 1560 if (__predict_false(m->m_len < ip_start + 1561 sizeof(struct ip6_hdr))) 1562 return (1); 1563 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1564 if (__predict_false(offset < 0)) 1565 return (1); 1566 break; 1567 #endif 1568 default: 1569 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1570 return (1); 1571 } 1572 1573 switch (proto) { 1574 case IPPROTO_TCP: 1575 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1576 return (1); 1577 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1578 m->m_pkthdr.csum_data = 0xFFFF; 1579 break; 1580 case IPPROTO_UDP: 1581 if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1582 return (1); 1583 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1584 m->m_pkthdr.csum_data = 0xFFFF; 1585 break; 1586 case IPPROTO_SCTP: 1587 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) 1588 return (1); 1589 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1590 break; 1591 default: 1592 /* 1593 * For the remaining protocols, FreeBSD does not support 1594 * checksum offloading, so the checksum will be recomputed. 1595 */ 1596 #if 0 1597 if_printf(ifp, "cksum offload of unsupported " 1598 "protocol eth_type=%#x proto=%d csum_start=%d " 1599 "csum_offset=%d\n", __func__, eth_type, proto, 1600 hdr->csum_start, hdr->csum_offset); 1601 #endif 1602 break; 1603 } 1604 1605 return (0); 1606 } 1607 1608 /* 1609 * Set the appropriate CSUM_* flags. Unfortunately, the information 1610 * provided is not directly useful to us. The VirtIO header gives the 1611 * offset of the checksum, which is all Linux needs, but this is not 1612 * how FreeBSD does things. We are forced to peek inside the packet 1613 * a bit. 1614 * 1615 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1616 * could accept the offsets and let the stack figure it out. 1617 */ 1618 static int 1619 ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) 1620 { 1621 struct ether_header *eh; 1622 struct ether_vlan_header *evh; 1623 uint16_t eth_type; 1624 int offset, error; 1625 1626 eh = mtod(m, struct ether_header *); 1627 eth_type = ntohs(eh->ether_type); 1628 if (eth_type == ETHERTYPE_VLAN) { 1629 /* BMV: We should handle nested VLAN tags too. */ 1630 evh = mtod(m, struct ether_vlan_header *); 1631 eth_type = ntohs(evh->evl_proto); 1632 offset = sizeof(struct ether_vlan_header); 1633 } else 1634 offset = sizeof(struct ether_header); 1635 1636 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1637 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); 1638 else 1639 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); 1640 1641 return (error); 1642 } 1643 /* End of offloading-related functions to be shared with vtnet. */ 1644 1645 static inline void 1646 ptnet_sync_tail(struct ptnet_ring *ptring, struct netmap_kring *kring) 1647 { 1648 struct netmap_ring *ring = kring->ring; 1649 1650 /* Update hwcur and hwtail as known by the host. */ 1651 ptnetmap_guest_read_kring_csb(ptring, kring); 1652 1653 /* nm_sync_finalize */ 1654 ring->tail = kring->rtail = kring->nr_hwtail; 1655 } 1656 1657 static void 1658 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1659 unsigned int head, unsigned int sync_flags) 1660 { 1661 struct netmap_ring *ring = kring->ring; 1662 struct ptnet_ring *ptring = pq->ptring; 1663 1664 /* Some packets have been pushed to the netmap ring. We have 1665 * to tell the host to process the new packets, updating cur 1666 * and head in the CSB. */ 1667 ring->head = ring->cur = head; 1668 1669 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1670 kring->rcur = kring->rhead = head; 1671 1672 ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead); 1673 1674 /* Kick the host if needed. */ 1675 if (NM_ACCESS_ONCE(ptring->host_need_kick)) { 1676 ptring->sync_flags = sync_flags; 1677 ptnet_kick(pq); 1678 } 1679 } 1680 1681 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1682 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1683 (_k)->rtail - (_h)) < (_min) 1684 1685 /* This function may be called by the network stack, or by 1686 * by the taskqueue thread. */ 1687 static int 1688 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1689 bool may_resched) 1690 { 1691 struct ptnet_softc *sc = pq->sc; 1692 bool have_vnet_hdr = sc->vnet_hdr_len; 1693 struct netmap_adapter *na = &sc->ptna->dr.up; 1694 if_t ifp = sc->ifp; 1695 unsigned int batch_count = 0; 1696 struct ptnet_ring *ptring; 1697 struct netmap_kring *kring; 1698 struct netmap_ring *ring; 1699 struct netmap_slot *slot; 1700 unsigned int count = 0; 1701 unsigned int minspace; 1702 unsigned int head; 1703 unsigned int lim; 1704 struct mbuf *mhead; 1705 struct mbuf *mf; 1706 int nmbuf_bytes; 1707 uint8_t *nmbuf; 1708 1709 if (!PTNET_Q_TRYLOCK(pq)) { 1710 /* We failed to acquire the lock, schedule the taskqueue. */ 1711 RD(1, "Deferring TX work"); 1712 if (may_resched) { 1713 taskqueue_enqueue(pq->taskq, &pq->task); 1714 } 1715 1716 return 0; 1717 } 1718 1719 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1720 PTNET_Q_UNLOCK(pq); 1721 RD(1, "Interface is down"); 1722 return ENETDOWN; 1723 } 1724 1725 ptring = pq->ptring; 1726 kring = na->tx_rings + pq->kring_id; 1727 ring = kring->ring; 1728 lim = kring->nkr_num_slots - 1; 1729 head = ring->head; 1730 minspace = sc->min_tx_space; 1731 1732 while (count < budget) { 1733 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1734 /* We ran out of slot, let's see if the host has 1735 * freed up some, by reading hwcur and hwtail from 1736 * the CSB. */ 1737 ptnet_sync_tail(ptring, kring); 1738 1739 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1740 /* Still no slots available. Reactivate the 1741 * interrupts so that we can be notified 1742 * when some free slots are made available by 1743 * the host. */ 1744 ptring->guest_need_kick = 1; 1745 1746 /* Double-check. */ 1747 ptnet_sync_tail(ptring, kring); 1748 if (likely(PTNET_TX_NOSPACE(head, kring, 1749 minspace))) { 1750 break; 1751 } 1752 1753 RD(1, "Found more slots by doublecheck"); 1754 /* More slots were freed before reactivating 1755 * the interrupts. */ 1756 ptring->guest_need_kick = 0; 1757 } 1758 } 1759 1760 mhead = drbr_peek(ifp, pq->bufring); 1761 if (!mhead) { 1762 break; 1763 } 1764 1765 /* Initialize transmission state variables. */ 1766 slot = ring->slot + head; 1767 nmbuf = NMB(na, slot); 1768 nmbuf_bytes = 0; 1769 1770 /* If needed, prepare the virtio-net header at the beginning 1771 * of the first slot. */ 1772 if (have_vnet_hdr) { 1773 struct virtio_net_hdr *vh = 1774 (struct virtio_net_hdr *)nmbuf; 1775 1776 /* For performance, we could replace this memset() with 1777 * two 8-bytes-wide writes. */ 1778 memset(nmbuf, 0, PTNET_HDR_SIZE); 1779 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1780 mhead = ptnet_tx_offload(ifp, mhead, false, 1781 vh); 1782 if (unlikely(!mhead)) { 1783 /* Packet dropped because errors 1784 * occurred while preparing the vnet 1785 * header. Let's go ahead with the next 1786 * packet. */ 1787 pq->stats.errors ++; 1788 drbr_advance(ifp, pq->bufring); 1789 continue; 1790 } 1791 } 1792 ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1793 "csum_start %u csum_ofs %u hdr_len = %u " 1794 "gso_size %u gso_type %x", __func__, 1795 mhead->m_pkthdr.csum_flags, vh->flags, 1796 vh->csum_start, vh->csum_offset, vh->hdr_len, 1797 vh->gso_size, vh->gso_type); 1798 1799 nmbuf += PTNET_HDR_SIZE; 1800 nmbuf_bytes += PTNET_HDR_SIZE; 1801 } 1802 1803 for (mf = mhead; mf; mf = mf->m_next) { 1804 uint8_t *mdata = mf->m_data; 1805 int mlen = mf->m_len; 1806 1807 for (;;) { 1808 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1809 1810 if (mlen < copy) { 1811 copy = mlen; 1812 } 1813 memcpy(nmbuf, mdata, copy); 1814 1815 mdata += copy; 1816 mlen -= copy; 1817 nmbuf += copy; 1818 nmbuf_bytes += copy; 1819 1820 if (!mlen) { 1821 break; 1822 } 1823 1824 slot->len = nmbuf_bytes; 1825 slot->flags = NS_MOREFRAG; 1826 1827 head = nm_next(head, lim); 1828 KASSERT(head != ring->tail, 1829 ("Unexpectedly run out of TX space")); 1830 slot = ring->slot + head; 1831 nmbuf = NMB(na, slot); 1832 nmbuf_bytes = 0; 1833 } 1834 } 1835 1836 /* Complete last slot and update head. */ 1837 slot->len = nmbuf_bytes; 1838 slot->flags = 0; 1839 head = nm_next(head, lim); 1840 1841 /* Consume the packet just processed. */ 1842 drbr_advance(ifp, pq->bufring); 1843 1844 /* Copy the packet to listeners. */ 1845 ETHER_BPF_MTAP(ifp, mhead); 1846 1847 pq->stats.packets ++; 1848 pq->stats.bytes += mhead->m_pkthdr.len; 1849 if (mhead->m_flags & M_MCAST) { 1850 pq->stats.mcasts ++; 1851 } 1852 1853 m_freem(mhead); 1854 1855 count ++; 1856 if (++batch_count == PTNET_TX_BATCH) { 1857 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1858 batch_count = 0; 1859 } 1860 } 1861 1862 if (batch_count) { 1863 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1864 } 1865 1866 if (count >= budget && may_resched) { 1867 DBG(RD(1, "out of budget: resched, %d mbufs pending\n", 1868 drbr_inuse(ifp, pq->bufring))); 1869 taskqueue_enqueue(pq->taskq, &pq->task); 1870 } 1871 1872 PTNET_Q_UNLOCK(pq); 1873 1874 return count; 1875 } 1876 1877 static int 1878 ptnet_transmit(if_t ifp, struct mbuf *m) 1879 { 1880 struct ptnet_softc *sc = if_getsoftc(ifp); 1881 struct ptnet_queue *pq; 1882 unsigned int queue_idx; 1883 int err; 1884 1885 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1886 1887 /* Insert 802.1Q header if needed. */ 1888 if (m->m_flags & M_VLANTAG) { 1889 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1890 if (m == NULL) { 1891 return ENOBUFS; 1892 } 1893 m->m_flags &= ~M_VLANTAG; 1894 } 1895 1896 /* Get the flow-id if available. */ 1897 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1898 m->m_pkthdr.flowid : curcpu; 1899 1900 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1901 queue_idx %= sc->num_tx_rings; 1902 } 1903 1904 pq = sc->queues + queue_idx; 1905 1906 err = drbr_enqueue(ifp, pq->bufring, m); 1907 if (err) { 1908 /* ENOBUFS when the bufring is full */ 1909 RD(1, "%s: drbr_enqueue() failed %d\n", 1910 __func__, err); 1911 pq->stats.errors ++; 1912 return err; 1913 } 1914 1915 if (ifp->if_capenable & IFCAP_POLLING) { 1916 /* If polling is on, the transmit queues will be 1917 * drained by the poller. */ 1918 return 0; 1919 } 1920 1921 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1922 1923 return (err < 0) ? err : 0; 1924 } 1925 1926 static unsigned int 1927 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1928 { 1929 struct netmap_ring *ring = kring->ring; 1930 struct netmap_slot *slot = ring->slot + head; 1931 1932 for (;;) { 1933 head = nm_next(head, kring->nkr_num_slots - 1); 1934 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1935 break; 1936 } 1937 slot = ring->slot + head; 1938 } 1939 1940 return head; 1941 } 1942 1943 static inline struct mbuf * 1944 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1945 { 1946 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1947 1948 do { 1949 unsigned int copy; 1950 1951 if (mtail->m_len == MCLBYTES) { 1952 struct mbuf *mf; 1953 1954 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1955 if (unlikely(!mf)) { 1956 return NULL; 1957 } 1958 1959 mtail->m_next = mf; 1960 mtail = mf; 1961 mdata = mtod(mtail, uint8_t *); 1962 mtail->m_len = 0; 1963 } 1964 1965 copy = MCLBYTES - mtail->m_len; 1966 if (nmbuf_len < copy) { 1967 copy = nmbuf_len; 1968 } 1969 1970 memcpy(mdata, nmbuf, copy); 1971 1972 nmbuf += copy; 1973 nmbuf_len -= copy; 1974 mdata += copy; 1975 mtail->m_len += copy; 1976 } while (nmbuf_len); 1977 1978 return mtail; 1979 } 1980 1981 static int 1982 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 1983 { 1984 struct ptnet_softc *sc = pq->sc; 1985 bool have_vnet_hdr = sc->vnet_hdr_len; 1986 struct ptnet_ring *ptring = pq->ptring; 1987 struct netmap_adapter *na = &sc->ptna->dr.up; 1988 struct netmap_kring *kring = na->rx_rings + pq->kring_id; 1989 struct netmap_ring *ring = kring->ring; 1990 unsigned int const lim = kring->nkr_num_slots - 1; 1991 unsigned int head = ring->head; 1992 unsigned int batch_count = 0; 1993 if_t ifp = sc->ifp; 1994 unsigned int count = 0; 1995 1996 PTNET_Q_LOCK(pq); 1997 1998 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1999 goto unlock; 2000 } 2001 2002 kring->nr_kflags &= ~NKR_PENDINTR; 2003 2004 while (count < budget) { 2005 unsigned int prev_head = head; 2006 struct mbuf *mhead, *mtail; 2007 struct virtio_net_hdr *vh; 2008 struct netmap_slot *slot; 2009 unsigned int nmbuf_len; 2010 uint8_t *nmbuf; 2011 host_sync: 2012 if (head == ring->tail) { 2013 /* We ran out of slot, let's see if the host has 2014 * added some, by reading hwcur and hwtail from 2015 * the CSB. */ 2016 ptnet_sync_tail(ptring, kring); 2017 2018 if (head == ring->tail) { 2019 /* Still no slots available. Reactivate 2020 * interrupts as they were disabled by the 2021 * host thread right before issuing the 2022 * last interrupt. */ 2023 ptring->guest_need_kick = 1; 2024 2025 /* Double-check. */ 2026 ptnet_sync_tail(ptring, kring); 2027 if (likely(head == ring->tail)) { 2028 break; 2029 } 2030 ptring->guest_need_kick = 0; 2031 } 2032 } 2033 2034 /* Initialize ring state variables, possibly grabbing the 2035 * virtio-net header. */ 2036 slot = ring->slot + head; 2037 nmbuf = NMB(na, slot); 2038 nmbuf_len = slot->len; 2039 2040 vh = (struct virtio_net_hdr *)nmbuf; 2041 if (have_vnet_hdr) { 2042 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 2043 /* There is no good reason why host should 2044 * put the header in multiple netmap slots. 2045 * If this is the case, discard. */ 2046 RD(1, "Fragmented vnet-hdr: dropping"); 2047 head = ptnet_rx_discard(kring, head); 2048 pq->stats.iqdrops ++; 2049 goto skip; 2050 } 2051 ND(1, "%s: vnet hdr: flags %x csum_start %u " 2052 "csum_ofs %u hdr_len = %u gso_size %u " 2053 "gso_type %x", __func__, vh->flags, 2054 vh->csum_start, vh->csum_offset, vh->hdr_len, 2055 vh->gso_size, vh->gso_type); 2056 nmbuf += PTNET_HDR_SIZE; 2057 nmbuf_len -= PTNET_HDR_SIZE; 2058 } 2059 2060 /* Allocate the head of a new mbuf chain. 2061 * We use m_getcl() to allocate an mbuf with standard cluster 2062 * size (MCLBYTES). In the future we could use m_getjcl() 2063 * to choose different sizes. */ 2064 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2065 if (unlikely(mhead == NULL)) { 2066 device_printf(sc->dev, "%s: failed to allocate mbuf " 2067 "head\n", __func__); 2068 pq->stats.errors ++; 2069 break; 2070 } 2071 2072 /* Initialize the mbuf state variables. */ 2073 mhead->m_pkthdr.len = nmbuf_len; 2074 mtail->m_len = 0; 2075 2076 /* Scan all the netmap slots containing the current packet. */ 2077 for (;;) { 2078 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 2079 "len %u, flags %u\n", __func__, 2080 head, ring->tail, slot->len, 2081 slot->flags)); 2082 2083 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 2084 if (unlikely(!mtail)) { 2085 /* Ouch. We ran out of memory while processing 2086 * a packet. We have to restore the previous 2087 * head position, free the mbuf chain, and 2088 * schedule the taskqueue to give the packet 2089 * another chance. */ 2090 device_printf(sc->dev, "%s: failed to allocate" 2091 " mbuf frag, reset head %u --> %u\n", 2092 __func__, head, prev_head); 2093 head = prev_head; 2094 m_freem(mhead); 2095 pq->stats.errors ++; 2096 if (may_resched) { 2097 taskqueue_enqueue(pq->taskq, 2098 &pq->task); 2099 } 2100 goto escape; 2101 } 2102 2103 /* We have to increment head irrespective of the 2104 * NS_MOREFRAG being set or not. */ 2105 head = nm_next(head, lim); 2106 2107 if (!(slot->flags & NS_MOREFRAG)) { 2108 break; 2109 } 2110 2111 if (unlikely(head == ring->tail)) { 2112 /* The very last slot prepared by the host has 2113 * the NS_MOREFRAG set. Drop it and continue 2114 * the outer cycle (to do the double-check). */ 2115 RD(1, "Incomplete packet: dropping"); 2116 m_freem(mhead); 2117 pq->stats.iqdrops ++; 2118 goto host_sync; 2119 } 2120 2121 slot = ring->slot + head; 2122 nmbuf = NMB(na, slot); 2123 nmbuf_len = slot->len; 2124 mhead->m_pkthdr.len += nmbuf_len; 2125 } 2126 2127 mhead->m_pkthdr.rcvif = ifp; 2128 mhead->m_pkthdr.csum_flags = 0; 2129 2130 /* Store the queue idx in the packet header. */ 2131 mhead->m_pkthdr.flowid = pq->kring_id; 2132 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 2133 2134 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 2135 struct ether_header *eh; 2136 2137 eh = mtod(mhead, struct ether_header *); 2138 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2139 ptnet_vlan_tag_remove(mhead); 2140 /* 2141 * With the 802.1Q header removed, update the 2142 * checksum starting location accordingly. 2143 */ 2144 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2145 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 2146 } 2147 } 2148 2149 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM 2150 | VIRTIO_NET_HDR_F_DATA_VALID))) { 2151 if (unlikely(ptnet_rx_csum(mhead, vh))) { 2152 m_freem(mhead); 2153 RD(1, "Csum offload error: dropping"); 2154 pq->stats.iqdrops ++; 2155 goto skip; 2156 } 2157 } 2158 2159 pq->stats.packets ++; 2160 pq->stats.bytes += mhead->m_pkthdr.len; 2161 2162 PTNET_Q_UNLOCK(pq); 2163 (*ifp->if_input)(ifp, mhead); 2164 PTNET_Q_LOCK(pq); 2165 2166 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2167 /* The interface has gone down while we didn't 2168 * have the lock. Stop any processing and exit. */ 2169 goto unlock; 2170 } 2171 skip: 2172 count ++; 2173 if (++batch_count == PTNET_RX_BATCH) { 2174 /* Some packets have been pushed to the network stack. 2175 * We need to update the CSB to tell the host about the new 2176 * ring->cur and ring->head (RX buffer refill). */ 2177 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2178 batch_count = 0; 2179 } 2180 } 2181 escape: 2182 if (batch_count) { 2183 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2184 2185 } 2186 2187 if (count >= budget && may_resched) { 2188 /* If we ran out of budget or the double-check found new 2189 * slots to process, schedule the taskqueue. */ 2190 DBG(RD(1, "out of budget: resched h %u t %u\n", 2191 head, ring->tail)); 2192 taskqueue_enqueue(pq->taskq, &pq->task); 2193 } 2194 unlock: 2195 PTNET_Q_UNLOCK(pq); 2196 2197 return count; 2198 } 2199 2200 static void 2201 ptnet_rx_task(void *context, int pending) 2202 { 2203 struct ptnet_queue *pq = context; 2204 2205 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2206 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 2207 } 2208 2209 static void 2210 ptnet_tx_task(void *context, int pending) 2211 { 2212 struct ptnet_queue *pq = context; 2213 2214 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2215 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 2216 } 2217 2218 #ifdef DEVICE_POLLING 2219 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 2220 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 2221 static int 2222 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 2223 { 2224 struct ptnet_softc *sc = if_getsoftc(ifp); 2225 unsigned int queue_budget; 2226 unsigned int count = 0; 2227 bool borrow = false; 2228 int i; 2229 2230 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 2231 queue_budget = MAX(budget / sc->num_rings, 1); 2232 RD(1, "Per-queue budget is %d", queue_budget); 2233 2234 while (budget) { 2235 unsigned int rcnt = 0; 2236 2237 for (i = 0; i < sc->num_rings; i++) { 2238 struct ptnet_queue *pq = sc->queues + i; 2239 2240 if (borrow) { 2241 queue_budget = MIN(queue_budget, budget); 2242 if (queue_budget == 0) { 2243 break; 2244 } 2245 } 2246 2247 if (i < sc->num_tx_rings) { 2248 rcnt += ptnet_drain_transmit_queue(pq, 2249 queue_budget, false); 2250 } else { 2251 rcnt += ptnet_rx_eof(pq, queue_budget, 2252 false); 2253 } 2254 } 2255 2256 if (!rcnt) { 2257 /* A scan of the queues gave no result, we can 2258 * stop here. */ 2259 break; 2260 } 2261 2262 if (rcnt > budget) { 2263 /* This may happen when initial budget < sc->num_rings, 2264 * since one packet budget is given to each queue 2265 * anyway. Just pretend we didn't eat "so much". */ 2266 rcnt = budget; 2267 } 2268 count += rcnt; 2269 budget -= rcnt; 2270 borrow = true; 2271 } 2272 2273 2274 return count; 2275 } 2276 #endif /* DEVICE_POLLING */ 2277