1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Driver for ptnet paravirtualized network device. */ 30 31 #include <sys/cdefs.h> 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/taskqueue.h> 46 #include <sys/smp.h> 47 #include <sys/time.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 #include <net/bpf.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet/udp.h> 70 #include <netinet/tcp.h> 71 72 #include <machine/bus.h> 73 #include <machine/resource.h> 74 #include <sys/bus.h> 75 #include <sys/rman.h> 76 77 #include <dev/pci/pcivar.h> 78 #include <dev/pci/pcireg.h> 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 83 #include <sys/selinfo.h> 84 #include <net/netmap.h> 85 #include <dev/netmap/netmap_kern.h> 86 #include <net/netmap_virt.h> 87 #include <dev/netmap/netmap_mem2.h> 88 #include <dev/virtio/network/virtio_net.h> 89 90 #ifndef INET 91 #error "INET not defined, cannot support offloadings" 92 #endif 93 94 #if __FreeBSD_version >= 1100000 95 static uint64_t ptnet_get_counter(if_t, ift_counter); 96 #else 97 typedef struct ifnet *if_t; 98 #define if_getsoftc(_ifp) (_ifp)->if_softc 99 #endif 100 101 //#define PTNETMAP_STATS 102 //#define DEBUG 103 #ifdef DEBUG 104 #define DBG(x) x 105 #else /* !DEBUG */ 106 #define DBG(x) 107 #endif /* !DEBUG */ 108 109 extern int ptnet_vnet_hdr; /* Tunable parameter */ 110 111 struct ptnet_softc; 112 113 struct ptnet_queue_stats { 114 uint64_t packets; /* if_[io]packets */ 115 uint64_t bytes; /* if_[io]bytes */ 116 uint64_t errors; /* if_[io]errors */ 117 uint64_t iqdrops; /* if_iqdrops */ 118 uint64_t mcasts; /* if_[io]mcasts */ 119 #ifdef PTNETMAP_STATS 120 uint64_t intrs; 121 uint64_t kicks; 122 #endif /* PTNETMAP_STATS */ 123 }; 124 125 struct ptnet_queue { 126 struct ptnet_softc *sc; 127 struct resource *irq; 128 void *cookie; 129 int kring_id; 130 struct nm_csb_atok *atok; 131 struct nm_csb_ktoa *ktoa; 132 unsigned int kick; 133 struct mtx lock; 134 struct buf_ring *bufring; /* for TX queues */ 135 struct ptnet_queue_stats stats; 136 #ifdef PTNETMAP_STATS 137 struct ptnet_queue_stats last_stats; 138 #endif /* PTNETMAP_STATS */ 139 struct taskqueue *taskq; 140 struct task task; 141 char lock_name[16]; 142 }; 143 144 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 145 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 146 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 147 148 struct ptnet_softc { 149 device_t dev; 150 if_t ifp; 151 struct ifmedia media; 152 struct mtx lock; 153 char lock_name[16]; 154 char hwaddr[ETHER_ADDR_LEN]; 155 156 /* Mirror of PTFEAT register. */ 157 uint32_t ptfeatures; 158 unsigned int vnet_hdr_len; 159 160 /* PCI BARs support. */ 161 struct resource *iomem; 162 struct resource *msix_mem; 163 164 unsigned int num_rings; 165 unsigned int num_tx_rings; 166 struct ptnet_queue *queues; 167 struct ptnet_queue *rxqueues; 168 struct nm_csb_atok *csb_gh; 169 struct nm_csb_ktoa *csb_hg; 170 171 unsigned int min_tx_space; 172 173 struct netmap_pt_guest_adapter *ptna; 174 175 struct callout tick; 176 #ifdef PTNETMAP_STATS 177 struct timeval last_ts; 178 #endif /* PTNETMAP_STATS */ 179 }; 180 181 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 182 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 183 184 static int ptnet_probe(device_t); 185 static int ptnet_attach(device_t); 186 static int ptnet_detach(device_t); 187 static int ptnet_suspend(device_t); 188 static int ptnet_resume(device_t); 189 static int ptnet_shutdown(device_t); 190 191 static void ptnet_init(void *opaque); 192 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 193 static int ptnet_init_locked(struct ptnet_softc *sc); 194 static int ptnet_stop(struct ptnet_softc *sc); 195 static int ptnet_transmit(if_t ifp, struct mbuf *m); 196 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 197 unsigned int budget, 198 bool may_resched); 199 static void ptnet_qflush(if_t ifp); 200 static void ptnet_tx_task(void *context, int pending); 201 202 static int ptnet_media_change(if_t ifp); 203 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 204 #ifdef PTNETMAP_STATS 205 static void ptnet_tick(void *opaque); 206 #endif 207 208 static int ptnet_irqs_init(struct ptnet_softc *sc); 209 static void ptnet_irqs_fini(struct ptnet_softc *sc); 210 211 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); 212 static int ptnet_nm_config(struct netmap_adapter *na, 213 struct nm_config_info *info); 214 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 215 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 216 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 217 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 218 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); 219 220 static void ptnet_tx_intr(void *opaque); 221 static void ptnet_rx_intr(void *opaque); 222 223 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 224 unsigned int head); 225 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 226 bool may_resched); 227 static void ptnet_rx_task(void *context, int pending); 228 229 #ifdef DEVICE_POLLING 230 static poll_handler_t ptnet_poll; 231 #endif 232 233 static device_method_t ptnet_methods[] = { 234 DEVMETHOD(device_probe, ptnet_probe), 235 DEVMETHOD(device_attach, ptnet_attach), 236 DEVMETHOD(device_detach, ptnet_detach), 237 DEVMETHOD(device_suspend, ptnet_suspend), 238 DEVMETHOD(device_resume, ptnet_resume), 239 DEVMETHOD(device_shutdown, ptnet_shutdown), 240 DEVMETHOD_END 241 }; 242 243 static driver_t ptnet_driver = { 244 "ptnet", 245 ptnet_methods, 246 sizeof(struct ptnet_softc) 247 }; 248 249 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 250 static devclass_t ptnet_devclass; 251 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 252 NULL, NULL, SI_ORDER_MIDDLE + 2); 253 254 static int 255 ptnet_probe(device_t dev) 256 { 257 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 258 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 259 return (ENXIO); 260 } 261 262 device_set_desc(dev, "ptnet network adapter"); 263 264 return (BUS_PROBE_DEFAULT); 265 } 266 267 static inline void ptnet_kick(struct ptnet_queue *pq) 268 { 269 #ifdef PTNETMAP_STATS 270 pq->stats.kicks ++; 271 #endif /* PTNETMAP_STATS */ 272 bus_write_4(pq->sc->iomem, pq->kick, 0); 273 } 274 275 #define PTNET_BUF_RING_SIZE 4096 276 #define PTNET_RX_BUDGET 512 277 #define PTNET_RX_BATCH 1 278 #define PTNET_TX_BUDGET 512 279 #define PTNET_TX_BATCH 64 280 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 281 #define PTNET_MAX_PKT_SIZE 65536 282 283 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) 284 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) 285 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 286 PTNET_CSUM_OFFLOAD_IPV6) 287 288 static int 289 ptnet_attach(device_t dev) 290 { 291 uint32_t ptfeatures = 0; 292 unsigned int num_rx_rings, num_tx_rings; 293 struct netmap_adapter na_arg; 294 unsigned int nifp_offset; 295 struct ptnet_softc *sc; 296 if_t ifp; 297 uint32_t macreg; 298 int err, rid; 299 int i; 300 301 sc = device_get_softc(dev); 302 sc->dev = dev; 303 304 /* Setup PCI resources. */ 305 pci_enable_busmaster(dev); 306 307 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 308 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 309 RF_ACTIVE); 310 if (sc->iomem == NULL) { 311 device_printf(dev, "Failed to map I/O BAR\n"); 312 return (ENXIO); 313 } 314 315 /* Negotiate features with the hypervisor. */ 316 if (ptnet_vnet_hdr) { 317 ptfeatures |= PTNETMAP_F_VNET_HDR; 318 } 319 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 320 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 321 sc->ptfeatures = ptfeatures; 322 323 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 324 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 325 sc->num_rings = num_tx_rings + num_rx_rings; 326 sc->num_tx_rings = num_tx_rings; 327 328 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { 329 device_printf(dev, "CSB cannot handle that many rings (%u)\n", 330 sc->num_rings); 331 err = ENOMEM; 332 goto err_path; 333 } 334 335 /* Allocate CSB and carry out CSB allocation protocol. */ 336 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 337 (size_t)0, -1UL, PAGE_SIZE, 0); 338 if (sc->csb_gh == NULL) { 339 device_printf(dev, "Failed to allocate CSB\n"); 340 err = ENOMEM; 341 goto err_path; 342 } 343 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); 344 345 { 346 /* 347 * We use uint64_t rather than vm_paddr_t since we 348 * need 64 bit addresses even on 32 bit platforms. 349 */ 350 uint64_t paddr = vtophys(sc->csb_gh); 351 352 /* CSB allocation protocol: write to BAH first, then 353 * to BAL (for both GH and HG sections). */ 354 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 355 (paddr >> 32) & 0xffffffff); 356 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 357 paddr & 0xffffffff); 358 paddr = vtophys(sc->csb_hg); 359 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 360 (paddr >> 32) & 0xffffffff); 361 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 362 paddr & 0xffffffff); 363 } 364 365 /* Allocate and initialize per-queue data structures. */ 366 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 367 M_DEVBUF, M_NOWAIT | M_ZERO); 368 if (sc->queues == NULL) { 369 err = ENOMEM; 370 goto err_path; 371 } 372 sc->rxqueues = sc->queues + num_tx_rings; 373 374 for (i = 0; i < sc->num_rings; i++) { 375 struct ptnet_queue *pq = sc->queues + i; 376 377 pq->sc = sc; 378 pq->kring_id = i; 379 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 380 pq->atok = sc->csb_gh + i; 381 pq->ktoa = sc->csb_hg + i; 382 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 383 device_get_nameunit(dev), i); 384 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 385 if (i >= num_tx_rings) { 386 /* RX queue: fix kring_id. */ 387 pq->kring_id -= num_tx_rings; 388 } else { 389 /* TX queue: allocate buf_ring. */ 390 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 391 M_DEVBUF, M_NOWAIT, &pq->lock); 392 if (pq->bufring == NULL) { 393 err = ENOMEM; 394 goto err_path; 395 } 396 } 397 } 398 399 sc->min_tx_space = 64; /* Safe initial value. */ 400 401 err = ptnet_irqs_init(sc); 402 if (err) { 403 goto err_path; 404 } 405 406 /* Setup Ethernet interface. */ 407 sc->ifp = ifp = if_alloc(IFT_ETHER); 408 if (ifp == NULL) { 409 device_printf(dev, "Failed to allocate ifnet\n"); 410 err = ENOMEM; 411 goto err_path; 412 } 413 414 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 415 ifp->if_baudrate = IF_Gbps(10); 416 ifp->if_softc = sc; 417 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 418 ifp->if_init = ptnet_init; 419 ifp->if_ioctl = ptnet_ioctl; 420 #if __FreeBSD_version >= 1100000 421 ifp->if_get_counter = ptnet_get_counter; 422 #endif 423 ifp->if_transmit = ptnet_transmit; 424 ifp->if_qflush = ptnet_qflush; 425 426 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 427 ptnet_media_status); 428 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 429 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 430 431 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 432 sc->hwaddr[0] = (macreg >> 8) & 0xff; 433 sc->hwaddr[1] = macreg & 0xff; 434 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 435 sc->hwaddr[2] = (macreg >> 24) & 0xff; 436 sc->hwaddr[3] = (macreg >> 16) & 0xff; 437 sc->hwaddr[4] = (macreg >> 8) & 0xff; 438 sc->hwaddr[5] = macreg & 0xff; 439 440 ether_ifattach(ifp, sc->hwaddr); 441 442 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 443 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 444 445 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 446 /* Similarly to what the vtnet driver does, we can emulate 447 * VLAN offloadings by inserting and removing the 802.1Q 448 * header during transmit and receive. We are then able 449 * to do checksum offloading of VLAN frames. */ 450 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 451 | IFCAP_VLAN_HWCSUM 452 | IFCAP_TSO | IFCAP_LRO 453 | IFCAP_VLAN_HWTSO 454 | IFCAP_VLAN_HWTAGGING; 455 } 456 457 ifp->if_capenable = ifp->if_capabilities; 458 #ifdef DEVICE_POLLING 459 /* Don't enable polling by default. */ 460 ifp->if_capabilities |= IFCAP_POLLING; 461 #endif 462 snprintf(sc->lock_name, sizeof(sc->lock_name), 463 "%s", device_get_nameunit(dev)); 464 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 465 callout_init_mtx(&sc->tick, &sc->lock, 0); 466 467 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 468 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 469 memset(&na_arg, 0, sizeof(na_arg)); 470 na_arg.ifp = ifp; 471 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 472 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 473 na_arg.num_tx_rings = num_tx_rings; 474 na_arg.num_rx_rings = num_rx_rings; 475 na_arg.nm_config = ptnet_nm_config; 476 na_arg.nm_krings_create = ptnet_nm_krings_create; 477 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 478 na_arg.nm_dtor = ptnet_nm_dtor; 479 na_arg.nm_intr = ptnet_nm_intr; 480 na_arg.nm_register = ptnet_nm_register; 481 na_arg.nm_txsync = ptnet_nm_txsync; 482 na_arg.nm_rxsync = ptnet_nm_rxsync; 483 484 netmap_pt_guest_attach(&na_arg, nifp_offset, 485 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 486 487 /* Now a netmap adapter for this ifp has been allocated, and it 488 * can be accessed through NA(ifp). We also have to initialize the CSB 489 * pointer. */ 490 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 491 492 /* If virtio-net header was negotiated, set the virt_hdr_len field in 493 * the netmap adapter, to inform users that this netmap adapter requires 494 * the application to deal with the headers. */ 495 ptnet_update_vnet_hdr(sc); 496 497 device_printf(dev, "%s() completed\n", __func__); 498 499 return (0); 500 501 err_path: 502 ptnet_detach(dev); 503 return err; 504 } 505 506 /* Stop host sync-kloop if it was running. */ 507 static void 508 ptnet_device_shutdown(struct ptnet_softc *sc) 509 { 510 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 511 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); 512 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); 513 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); 514 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); 515 } 516 517 static int 518 ptnet_detach(device_t dev) 519 { 520 struct ptnet_softc *sc = device_get_softc(dev); 521 int i; 522 523 ptnet_device_shutdown(sc); 524 525 #ifdef DEVICE_POLLING 526 if (sc->ifp->if_capenable & IFCAP_POLLING) { 527 ether_poll_deregister(sc->ifp); 528 } 529 #endif 530 callout_drain(&sc->tick); 531 532 if (sc->queues) { 533 /* Drain taskqueues before calling if_detach. */ 534 for (i = 0; i < sc->num_rings; i++) { 535 struct ptnet_queue *pq = sc->queues + i; 536 537 if (pq->taskq) { 538 taskqueue_drain(pq->taskq, &pq->task); 539 } 540 } 541 } 542 543 if (sc->ifp) { 544 ether_ifdetach(sc->ifp); 545 546 /* Uninitialize netmap adapters for this device. */ 547 netmap_detach(sc->ifp); 548 549 ifmedia_removeall(&sc->media); 550 if_free(sc->ifp); 551 sc->ifp = NULL; 552 } 553 554 ptnet_irqs_fini(sc); 555 556 if (sc->csb_gh) { 557 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); 558 sc->csb_gh = NULL; 559 sc->csb_hg = NULL; 560 } 561 562 if (sc->queues) { 563 for (i = 0; i < sc->num_rings; i++) { 564 struct ptnet_queue *pq = sc->queues + i; 565 566 if (mtx_initialized(&pq->lock)) { 567 mtx_destroy(&pq->lock); 568 } 569 if (pq->bufring != NULL) { 570 buf_ring_free(pq->bufring, M_DEVBUF); 571 } 572 } 573 free(sc->queues, M_DEVBUF); 574 sc->queues = NULL; 575 } 576 577 if (sc->iomem) { 578 bus_release_resource(dev, SYS_RES_IOPORT, 579 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 580 sc->iomem = NULL; 581 } 582 583 mtx_destroy(&sc->lock); 584 585 device_printf(dev, "%s() completed\n", __func__); 586 587 return (0); 588 } 589 590 static int 591 ptnet_suspend(device_t dev) 592 { 593 struct ptnet_softc *sc = device_get_softc(dev); 594 595 (void)sc; 596 597 return (0); 598 } 599 600 static int 601 ptnet_resume(device_t dev) 602 { 603 struct ptnet_softc *sc = device_get_softc(dev); 604 605 (void)sc; 606 607 return (0); 608 } 609 610 static int 611 ptnet_shutdown(device_t dev) 612 { 613 struct ptnet_softc *sc = device_get_softc(dev); 614 615 ptnet_device_shutdown(sc); 616 617 return (0); 618 } 619 620 static int 621 ptnet_irqs_init(struct ptnet_softc *sc) 622 { 623 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 624 int nvecs = sc->num_rings; 625 device_t dev = sc->dev; 626 int err = ENOSPC; 627 int cpu_cur; 628 int i; 629 630 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 631 device_printf(dev, "Could not find MSI-X capability\n"); 632 return (ENXIO); 633 } 634 635 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 636 &rid, RF_ACTIVE); 637 if (sc->msix_mem == NULL) { 638 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 639 return (ENXIO); 640 } 641 642 if (pci_msix_count(dev) < nvecs) { 643 device_printf(dev, "Not enough MSI-X vectors\n"); 644 goto err_path; 645 } 646 647 err = pci_alloc_msix(dev, &nvecs); 648 if (err) { 649 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 650 goto err_path; 651 } 652 653 for (i = 0; i < nvecs; i++) { 654 struct ptnet_queue *pq = sc->queues + i; 655 656 rid = i + 1; 657 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 658 RF_ACTIVE); 659 if (pq->irq == NULL) { 660 device_printf(dev, "Failed to allocate interrupt " 661 "for queue #%d\n", i); 662 err = ENOSPC; 663 goto err_path; 664 } 665 } 666 667 cpu_cur = CPU_FIRST(); 668 for (i = 0; i < nvecs; i++) { 669 struct ptnet_queue *pq = sc->queues + i; 670 void (*handler)(void *) = ptnet_tx_intr; 671 672 if (i >= sc->num_tx_rings) { 673 handler = ptnet_rx_intr; 674 } 675 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 676 NULL /* intr_filter */, handler, 677 pq, &pq->cookie); 678 if (err) { 679 device_printf(dev, "Failed to register intr handler " 680 "for queue #%d\n", i); 681 goto err_path; 682 } 683 684 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 685 #if 0 686 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 687 #endif 688 cpu_cur = CPU_NEXT(cpu_cur); 689 } 690 691 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 692 693 cpu_cur = CPU_FIRST(); 694 for (i = 0; i < nvecs; i++) { 695 struct ptnet_queue *pq = sc->queues + i; 696 static void (*handler)(void *context, int pending); 697 698 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; 699 700 TASK_INIT(&pq->task, 0, handler, pq); 701 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 702 taskqueue_thread_enqueue, &pq->taskq); 703 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 704 device_get_nameunit(sc->dev), cpu_cur); 705 cpu_cur = CPU_NEXT(cpu_cur); 706 } 707 708 return 0; 709 err_path: 710 ptnet_irqs_fini(sc); 711 return err; 712 } 713 714 static void 715 ptnet_irqs_fini(struct ptnet_softc *sc) 716 { 717 device_t dev = sc->dev; 718 int i; 719 720 for (i = 0; i < sc->num_rings; i++) { 721 struct ptnet_queue *pq = sc->queues + i; 722 723 if (pq->taskq) { 724 taskqueue_free(pq->taskq); 725 pq->taskq = NULL; 726 } 727 728 if (pq->cookie) { 729 bus_teardown_intr(dev, pq->irq, pq->cookie); 730 pq->cookie = NULL; 731 } 732 733 if (pq->irq) { 734 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 735 pq->irq = NULL; 736 } 737 } 738 739 if (sc->msix_mem) { 740 pci_release_msi(dev); 741 742 bus_release_resource(dev, SYS_RES_MEMORY, 743 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 744 sc->msix_mem); 745 sc->msix_mem = NULL; 746 } 747 } 748 749 static void 750 ptnet_init(void *opaque) 751 { 752 struct ptnet_softc *sc = opaque; 753 754 PTNET_CORE_LOCK(sc); 755 ptnet_init_locked(sc); 756 PTNET_CORE_UNLOCK(sc); 757 } 758 759 static int 760 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 761 { 762 struct ptnet_softc *sc = if_getsoftc(ifp); 763 device_t dev = sc->dev; 764 struct ifreq *ifr = (struct ifreq *)data; 765 int mask __unused, err = 0; 766 767 switch (cmd) { 768 case SIOCSIFFLAGS: 769 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 770 PTNET_CORE_LOCK(sc); 771 if (ifp->if_flags & IFF_UP) { 772 /* Network stack wants the iff to be up. */ 773 err = ptnet_init_locked(sc); 774 } else { 775 /* Network stack wants the iff to be down. */ 776 err = ptnet_stop(sc); 777 } 778 /* We don't need to do nothing to support IFF_PROMISC, 779 * since that is managed by the backend port. */ 780 PTNET_CORE_UNLOCK(sc); 781 break; 782 783 case SIOCSIFCAP: 784 device_printf(dev, "SIOCSIFCAP %x %x\n", 785 ifr->ifr_reqcap, ifp->if_capenable); 786 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 787 #ifdef DEVICE_POLLING 788 if (mask & IFCAP_POLLING) { 789 struct ptnet_queue *pq; 790 int i; 791 792 if (ifr->ifr_reqcap & IFCAP_POLLING) { 793 err = ether_poll_register(ptnet_poll, ifp); 794 if (err) { 795 break; 796 } 797 /* Stop queues and sync with taskqueues. */ 798 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 799 for (i = 0; i < sc->num_rings; i++) { 800 pq = sc-> queues + i; 801 /* Make sure the worker sees the 802 * IFF_DRV_RUNNING down. */ 803 PTNET_Q_LOCK(pq); 804 pq->atok->appl_need_kick = 0; 805 PTNET_Q_UNLOCK(pq); 806 /* Wait for rescheduling to finish. */ 807 if (pq->taskq) { 808 taskqueue_drain(pq->taskq, 809 &pq->task); 810 } 811 } 812 ifp->if_drv_flags |= IFF_DRV_RUNNING; 813 } else { 814 err = ether_poll_deregister(ifp); 815 for (i = 0; i < sc->num_rings; i++) { 816 pq = sc-> queues + i; 817 PTNET_Q_LOCK(pq); 818 pq->atok->appl_need_kick = 1; 819 PTNET_Q_UNLOCK(pq); 820 } 821 } 822 } 823 #endif /* DEVICE_POLLING */ 824 ifp->if_capenable = ifr->ifr_reqcap; 825 break; 826 827 case SIOCSIFMTU: 828 /* We support any reasonable MTU. */ 829 if (ifr->ifr_mtu < ETHERMIN || 830 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 831 err = EINVAL; 832 } else { 833 PTNET_CORE_LOCK(sc); 834 ifp->if_mtu = ifr->ifr_mtu; 835 PTNET_CORE_UNLOCK(sc); 836 } 837 break; 838 839 case SIOCSIFMEDIA: 840 case SIOCGIFMEDIA: 841 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 842 break; 843 844 default: 845 err = ether_ioctl(ifp, cmd, data); 846 break; 847 } 848 849 return err; 850 } 851 852 static int 853 ptnet_init_locked(struct ptnet_softc *sc) 854 { 855 if_t ifp = sc->ifp; 856 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 857 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 858 unsigned int nm_buf_size; 859 int ret; 860 861 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 862 return 0; /* nothing to do */ 863 } 864 865 device_printf(sc->dev, "%s\n", __func__); 866 867 /* Translate offload capabilities according to if_capenable. */ 868 ifp->if_hwassist = 0; 869 if (ifp->if_capenable & IFCAP_TXCSUM) 870 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 871 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 872 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 873 if (ifp->if_capenable & IFCAP_TSO4) 874 ifp->if_hwassist |= CSUM_IP_TSO; 875 if (ifp->if_capenable & IFCAP_TSO6) 876 ifp->if_hwassist |= CSUM_IP6_TSO; 877 878 /* 879 * Prepare the interface for netmap mode access. 880 */ 881 netmap_update_config(na_dr); 882 883 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 884 if (ret) { 885 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 886 return ret; 887 } 888 889 if (sc->ptna->backend_users == 0) { 890 ret = ptnet_nm_krings_create(na_nm); 891 if (ret) { 892 device_printf(sc->dev, "ptnet_nm_krings_create() " 893 "failed\n"); 894 goto err_mem_finalize; 895 } 896 897 ret = netmap_mem_rings_create(na_dr); 898 if (ret) { 899 device_printf(sc->dev, "netmap_mem_rings_create() " 900 "failed\n"); 901 goto err_rings_create; 902 } 903 904 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 905 if (ret) { 906 device_printf(sc->dev, "netmap_mem_get_lut() " 907 "failed\n"); 908 goto err_get_lut; 909 } 910 } 911 912 ret = ptnet_nm_register(na_dr, 1 /* on */); 913 if (ret) { 914 goto err_register; 915 } 916 917 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 918 919 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 920 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 921 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 922 sc->min_tx_space); 923 #ifdef PTNETMAP_STATS 924 callout_reset(&sc->tick, hz, ptnet_tick, sc); 925 #endif 926 927 ifp->if_drv_flags |= IFF_DRV_RUNNING; 928 929 return 0; 930 931 err_register: 932 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 933 err_get_lut: 934 netmap_mem_rings_delete(na_dr); 935 err_rings_create: 936 ptnet_nm_krings_delete(na_nm); 937 err_mem_finalize: 938 netmap_mem_deref(na_dr->nm_mem, na_dr); 939 940 return ret; 941 } 942 943 /* To be called under core lock. */ 944 static int 945 ptnet_stop(struct ptnet_softc *sc) 946 { 947 if_t ifp = sc->ifp; 948 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 949 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 950 int i; 951 952 device_printf(sc->dev, "%s\n", __func__); 953 954 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 955 return 0; /* nothing to do */ 956 } 957 958 /* Clear the driver-ready flag, and synchronize with all the queues, 959 * so that after this loop we are sure nobody is working anymore with 960 * the device. This scheme is taken from the vtnet driver. */ 961 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 962 callout_stop(&sc->tick); 963 for (i = 0; i < sc->num_rings; i++) { 964 PTNET_Q_LOCK(sc->queues + i); 965 PTNET_Q_UNLOCK(sc->queues + i); 966 } 967 968 ptnet_nm_register(na_dr, 0 /* off */); 969 970 if (sc->ptna->backend_users == 0) { 971 netmap_mem_rings_delete(na_dr); 972 ptnet_nm_krings_delete(na_nm); 973 } 974 netmap_mem_deref(na_dr->nm_mem, na_dr); 975 976 return 0; 977 } 978 979 static void 980 ptnet_qflush(if_t ifp) 981 { 982 struct ptnet_softc *sc = if_getsoftc(ifp); 983 int i; 984 985 /* Flush all the bufrings and do the interface flush. */ 986 for (i = 0; i < sc->num_rings; i++) { 987 struct ptnet_queue *pq = sc->queues + i; 988 struct mbuf *m; 989 990 PTNET_Q_LOCK(pq); 991 if (pq->bufring) { 992 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 993 m_freem(m); 994 } 995 } 996 PTNET_Q_UNLOCK(pq); 997 } 998 999 if_qflush(ifp); 1000 } 1001 1002 static int 1003 ptnet_media_change(if_t ifp) 1004 { 1005 struct ptnet_softc *sc = if_getsoftc(ifp); 1006 struct ifmedia *ifm = &sc->media; 1007 1008 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1009 return EINVAL; 1010 } 1011 1012 return 0; 1013 } 1014 1015 #if __FreeBSD_version >= 1100000 1016 static uint64_t 1017 ptnet_get_counter(if_t ifp, ift_counter cnt) 1018 { 1019 struct ptnet_softc *sc = if_getsoftc(ifp); 1020 struct ptnet_queue_stats stats[2]; 1021 int i; 1022 1023 /* Accumulate statistics over the queues. */ 1024 memset(stats, 0, sizeof(stats)); 1025 for (i = 0; i < sc->num_rings; i++) { 1026 struct ptnet_queue *pq = sc->queues + i; 1027 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1028 1029 stats[idx].packets += pq->stats.packets; 1030 stats[idx].bytes += pq->stats.bytes; 1031 stats[idx].errors += pq->stats.errors; 1032 stats[idx].iqdrops += pq->stats.iqdrops; 1033 stats[idx].mcasts += pq->stats.mcasts; 1034 } 1035 1036 switch (cnt) { 1037 case IFCOUNTER_IPACKETS: 1038 return (stats[1].packets); 1039 case IFCOUNTER_IQDROPS: 1040 return (stats[1].iqdrops); 1041 case IFCOUNTER_IERRORS: 1042 return (stats[1].errors); 1043 case IFCOUNTER_OPACKETS: 1044 return (stats[0].packets); 1045 case IFCOUNTER_OBYTES: 1046 return (stats[0].bytes); 1047 case IFCOUNTER_OMCASTS: 1048 return (stats[0].mcasts); 1049 default: 1050 return (if_get_counter_default(ifp, cnt)); 1051 } 1052 } 1053 #endif 1054 1055 1056 #ifdef PTNETMAP_STATS 1057 /* Called under core lock. */ 1058 static void 1059 ptnet_tick(void *opaque) 1060 { 1061 struct ptnet_softc *sc = opaque; 1062 int i; 1063 1064 for (i = 0; i < sc->num_rings; i++) { 1065 struct ptnet_queue *pq = sc->queues + i; 1066 struct ptnet_queue_stats cur = pq->stats; 1067 struct timeval now; 1068 unsigned int delta; 1069 1070 microtime(&now); 1071 delta = now.tv_usec - sc->last_ts.tv_usec + 1072 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1073 delta /= 1000; /* in milliseconds */ 1074 1075 if (delta == 0) 1076 continue; 1077 1078 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1079 "intr %lu\n", i, delta, 1080 (cur.packets - pq->last_stats.packets), 1081 (cur.kicks - pq->last_stats.kicks), 1082 (cur.intrs - pq->last_stats.intrs)); 1083 pq->last_stats = cur; 1084 } 1085 microtime(&sc->last_ts); 1086 callout_schedule(&sc->tick, hz); 1087 } 1088 #endif /* PTNETMAP_STATS */ 1089 1090 static void 1091 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1092 { 1093 /* We are always active, as the backend netmap port is 1094 * always open in netmap mode. */ 1095 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1096 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1097 } 1098 1099 static uint32_t 1100 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) 1101 { 1102 /* 1103 * Write a command and read back error status, 1104 * with zero meaning success. 1105 */ 1106 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1107 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1108 } 1109 1110 static int 1111 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) 1112 { 1113 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1114 1115 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1116 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1117 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1118 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1119 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); 1120 1121 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", 1122 info->num_tx_rings, info->num_rx_rings, 1123 info->num_tx_descs, info->num_rx_descs, 1124 info->rx_buf_maxsize); 1125 1126 return 0; 1127 } 1128 1129 static void 1130 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1131 { 1132 int i; 1133 1134 /* Sync krings from the host, reading from 1135 * CSB. */ 1136 for (i = 0; i < sc->num_rings; i++) { 1137 struct nm_csb_atok *atok = sc->queues[i].atok; 1138 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; 1139 struct netmap_kring *kring; 1140 1141 if (i < na->num_tx_rings) { 1142 kring = na->tx_rings[i]; 1143 } else { 1144 kring = na->rx_rings[i - na->num_tx_rings]; 1145 } 1146 kring->rhead = kring->ring->head = atok->head; 1147 kring->rcur = kring->ring->cur = atok->cur; 1148 kring->nr_hwcur = ktoa->hwcur; 1149 kring->nr_hwtail = kring->rtail = 1150 kring->ring->tail = ktoa->hwtail; 1151 1152 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1153 ktoa->hwcur, atok->head, atok->cur, 1154 ktoa->hwtail); 1155 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1156 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1157 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1158 kring->rtail, kring->ring->tail); 1159 } 1160 } 1161 1162 static void 1163 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1164 { 1165 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1166 1167 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1168 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1169 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1170 } 1171 1172 static int 1173 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1174 { 1175 /* device-specific */ 1176 if_t ifp = na->ifp; 1177 struct ptnet_softc *sc = if_getsoftc(ifp); 1178 int native = (na == &sc->ptna->hwup.up); 1179 struct ptnet_queue *pq; 1180 int ret = 0; 1181 int i; 1182 1183 if (!onoff) { 1184 sc->ptna->backend_users--; 1185 } 1186 1187 /* If this is the last netmap client, guest interrupt enable flags may 1188 * be in arbitrary state. Since these flags are going to be used also 1189 * by the netdevice driver, we have to make sure to start with 1190 * notifications enabled. Also, schedule NAPI to flush pending packets 1191 * in the RX rings, since we will not receive further interrupts 1192 * until these will be processed. */ 1193 if (native && !onoff && na->active_fds == 0) { 1194 nm_prinf("Exit netmap mode, re-enable interrupts"); 1195 for (i = 0; i < sc->num_rings; i++) { 1196 pq = sc->queues + i; 1197 pq->atok->appl_need_kick = 1; 1198 } 1199 } 1200 1201 if (onoff) { 1202 if (sc->ptna->backend_users == 0) { 1203 /* Initialize notification enable fields in the CSB. */ 1204 for (i = 0; i < sc->num_rings; i++) { 1205 pq = sc->queues + i; 1206 pq->ktoa->kern_need_kick = 1; 1207 pq->atok->appl_need_kick = 1208 (!(ifp->if_capenable & IFCAP_POLLING) 1209 && i >= sc->num_tx_rings); 1210 } 1211 1212 /* Set the virtio-net header length. */ 1213 ptnet_update_vnet_hdr(sc); 1214 1215 /* Make sure the host adapter passed through is ready 1216 * for txsync/rxsync. */ 1217 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); 1218 if (ret) { 1219 return ret; 1220 } 1221 1222 /* Align the guest krings and rings to the state stored 1223 * in the CSB. */ 1224 ptnet_sync_from_csb(sc, na); 1225 } 1226 1227 /* If not native, don't call nm_set_native_flags, since we don't want 1228 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1229 if (native) { 1230 netmap_krings_mode_commit(na, onoff); 1231 nm_set_native_flags(na); 1232 } 1233 1234 } else { 1235 if (native) { 1236 nm_clear_native_flags(na); 1237 netmap_krings_mode_commit(na, onoff); 1238 } 1239 1240 if (sc->ptna->backend_users == 0) { 1241 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); 1242 } 1243 } 1244 1245 if (onoff) { 1246 sc->ptna->backend_users++; 1247 } 1248 1249 return ret; 1250 } 1251 1252 static int 1253 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1254 { 1255 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1256 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1257 bool notify; 1258 1259 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); 1260 if (notify) { 1261 ptnet_kick(pq); 1262 } 1263 1264 return 0; 1265 } 1266 1267 static int 1268 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1269 { 1270 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1271 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1272 bool notify; 1273 1274 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); 1275 if (notify) { 1276 ptnet_kick(pq); 1277 } 1278 1279 return 0; 1280 } 1281 1282 static void 1283 ptnet_nm_intr(struct netmap_adapter *na, int onoff) 1284 { 1285 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1286 int i; 1287 1288 for (i = 0; i < sc->num_rings; i++) { 1289 struct ptnet_queue *pq = sc->queues + i; 1290 pq->atok->appl_need_kick = onoff; 1291 } 1292 } 1293 1294 static void 1295 ptnet_tx_intr(void *opaque) 1296 { 1297 struct ptnet_queue *pq = opaque; 1298 struct ptnet_softc *sc = pq->sc; 1299 1300 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1301 #ifdef PTNETMAP_STATS 1302 pq->stats.intrs ++; 1303 #endif /* PTNETMAP_STATS */ 1304 1305 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1306 return; 1307 } 1308 1309 /* Schedule the tasqueue to flush process transmissions requests. 1310 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1311 * at least when using MSI-X interrupts. The if_em driver, instead 1312 * schedule taskqueue when using legacy interrupts. */ 1313 taskqueue_enqueue(pq->taskq, &pq->task); 1314 } 1315 1316 static void 1317 ptnet_rx_intr(void *opaque) 1318 { 1319 struct ptnet_queue *pq = opaque; 1320 struct ptnet_softc *sc = pq->sc; 1321 unsigned int unused; 1322 1323 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1324 #ifdef PTNETMAP_STATS 1325 pq->stats.intrs ++; 1326 #endif /* PTNETMAP_STATS */ 1327 1328 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1329 return; 1330 } 1331 1332 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1333 * receive-side processing is executed directly in the interrupt 1334 * service routine. Alternatively, we may schedule the taskqueue. */ 1335 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1336 } 1337 1338 /* The following offloadings-related functions are taken from the vtnet 1339 * driver, but the same functionality is required for the ptnet driver. 1340 * As a temporary solution, I copied this code from vtnet and I started 1341 * to generalize it (taking away driver-specific statistic accounting), 1342 * making as little modifications as possible. 1343 * In the future we need to share these functions between vtnet and ptnet. 1344 */ 1345 static int 1346 ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 1347 { 1348 struct ether_vlan_header *evh; 1349 int offset; 1350 1351 evh = mtod(m, struct ether_vlan_header *); 1352 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1353 /* BMV: We should handle nested VLAN tags too. */ 1354 *etype = ntohs(evh->evl_proto); 1355 offset = sizeof(struct ether_vlan_header); 1356 } else { 1357 *etype = ntohs(evh->evl_encap_proto); 1358 offset = sizeof(struct ether_header); 1359 } 1360 1361 switch (*etype) { 1362 #if defined(INET) 1363 case ETHERTYPE_IP: { 1364 struct ip *ip, iphdr; 1365 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1366 m_copydata(m, offset, sizeof(struct ip), 1367 (caddr_t) &iphdr); 1368 ip = &iphdr; 1369 } else 1370 ip = (struct ip *)(m->m_data + offset); 1371 *proto = ip->ip_p; 1372 *start = offset + (ip->ip_hl << 2); 1373 break; 1374 } 1375 #endif 1376 #if defined(INET6) 1377 case ETHERTYPE_IPV6: 1378 *proto = -1; 1379 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 1380 /* Assert the network stack sent us a valid packet. */ 1381 KASSERT(*start > offset, 1382 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 1383 *start, offset, *proto)); 1384 break; 1385 #endif 1386 default: 1387 /* Here we should increment the tx_csum_bad_ethtype counter. */ 1388 return (EINVAL); 1389 } 1390 1391 return (0); 1392 } 1393 1394 static int 1395 ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, 1396 int offset, bool allow_ecn, struct virtio_net_hdr *hdr) 1397 { 1398 static struct timeval lastecn; 1399 static int curecn; 1400 struct tcphdr *tcp, tcphdr; 1401 1402 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 1403 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 1404 tcp = &tcphdr; 1405 } else 1406 tcp = (struct tcphdr *)(m->m_data + offset); 1407 1408 hdr->hdr_len = offset + (tcp->th_off << 2); 1409 hdr->gso_size = m->m_pkthdr.tso_segsz; 1410 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 1411 VIRTIO_NET_HDR_GSO_TCPV6; 1412 1413 if (tcp->th_flags & TH_CWR) { 1414 /* 1415 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 1416 * ECN support is not on a per-interface basis, but globally via 1417 * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 1418 */ 1419 if (!allow_ecn) { 1420 if (ppsratecheck(&lastecn, &curecn, 1)) 1421 if_printf(ifp, 1422 "TSO with ECN not negotiated with host\n"); 1423 return (ENOTSUP); 1424 } 1425 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1426 } 1427 1428 /* Here we should increment tx_tso counter. */ 1429 1430 return (0); 1431 } 1432 1433 static struct mbuf * 1434 ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, 1435 struct virtio_net_hdr *hdr) 1436 { 1437 int flags, etype, csum_start, proto, error; 1438 1439 flags = m->m_pkthdr.csum_flags; 1440 1441 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); 1442 if (error) 1443 goto drop; 1444 1445 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || 1446 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { 1447 /* 1448 * We could compare the IP protocol vs the CSUM_ flag too, 1449 * but that really should not be necessary. 1450 */ 1451 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1452 hdr->csum_start = csum_start; 1453 hdr->csum_offset = m->m_pkthdr.csum_data; 1454 /* Here we should increment the tx_csum counter. */ 1455 } 1456 1457 if (flags & CSUM_TSO) { 1458 if (__predict_false(proto != IPPROTO_TCP)) { 1459 /* Likely failed to correctly parse the mbuf. 1460 * Here we should increment the tx_tso_not_tcp 1461 * counter. */ 1462 goto drop; 1463 } 1464 1465 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 1466 ("%s: mbuf %p TSO without checksum offload %#x", 1467 __func__, m, flags)); 1468 1469 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, 1470 allow_ecn, hdr); 1471 if (error) 1472 goto drop; 1473 } 1474 1475 return (m); 1476 1477 drop: 1478 m_freem(m); 1479 return (NULL); 1480 } 1481 1482 static void 1483 ptnet_vlan_tag_remove(struct mbuf *m) 1484 { 1485 struct ether_vlan_header *evh; 1486 1487 evh = mtod(m, struct ether_vlan_header *); 1488 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1489 m->m_flags |= M_VLANTAG; 1490 1491 /* Strip the 802.1Q header. */ 1492 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1493 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1494 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1495 } 1496 1497 /* 1498 * Use the checksum offset in the VirtIO header to set the 1499 * correct CSUM_* flags. 1500 */ 1501 static int 1502 ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, 1503 struct virtio_net_hdr *hdr) 1504 { 1505 #if defined(INET) || defined(INET6) 1506 int offset = hdr->csum_start + hdr->csum_offset; 1507 #endif 1508 1509 /* Only do a basic sanity check on the offset. */ 1510 switch (eth_type) { 1511 #if defined(INET) 1512 case ETHERTYPE_IP: 1513 if (__predict_false(offset < ip_start + sizeof(struct ip))) 1514 return (1); 1515 break; 1516 #endif 1517 #if defined(INET6) 1518 case ETHERTYPE_IPV6: 1519 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1520 return (1); 1521 break; 1522 #endif 1523 default: 1524 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1525 return (1); 1526 } 1527 1528 /* 1529 * Use the offset to determine the appropriate CSUM_* flags. This is 1530 * a bit dirty, but we can get by with it since the checksum offsets 1531 * happen to be different. We assume the host host does not do IPv4 1532 * header checksum offloading. 1533 */ 1534 switch (hdr->csum_offset) { 1535 case offsetof(struct udphdr, uh_sum): 1536 case offsetof(struct tcphdr, th_sum): 1537 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1538 m->m_pkthdr.csum_data = 0xFFFF; 1539 break; 1540 default: 1541 /* Here we should increment the rx_csum_bad_offset counter. */ 1542 return (1); 1543 } 1544 1545 return (0); 1546 } 1547 1548 static int 1549 ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, 1550 struct virtio_net_hdr *hdr) 1551 { 1552 int offset, proto; 1553 1554 switch (eth_type) { 1555 #if defined(INET) 1556 case ETHERTYPE_IP: { 1557 struct ip *ip; 1558 if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1559 return (1); 1560 ip = (struct ip *)(m->m_data + ip_start); 1561 proto = ip->ip_p; 1562 offset = ip_start + (ip->ip_hl << 2); 1563 break; 1564 } 1565 #endif 1566 #if defined(INET6) 1567 case ETHERTYPE_IPV6: 1568 if (__predict_false(m->m_len < ip_start + 1569 sizeof(struct ip6_hdr))) 1570 return (1); 1571 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1572 if (__predict_false(offset < 0)) 1573 return (1); 1574 break; 1575 #endif 1576 default: 1577 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1578 return (1); 1579 } 1580 1581 switch (proto) { 1582 case IPPROTO_TCP: 1583 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1584 return (1); 1585 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1586 m->m_pkthdr.csum_data = 0xFFFF; 1587 break; 1588 case IPPROTO_UDP: 1589 if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1590 return (1); 1591 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1592 m->m_pkthdr.csum_data = 0xFFFF; 1593 break; 1594 default: 1595 /* 1596 * For the remaining protocols, FreeBSD does not support 1597 * checksum offloading, so the checksum will be recomputed. 1598 */ 1599 #if 0 1600 if_printf(ifp, "cksum offload of unsupported " 1601 "protocol eth_type=%#x proto=%d csum_start=%d " 1602 "csum_offset=%d\n", __func__, eth_type, proto, 1603 hdr->csum_start, hdr->csum_offset); 1604 #endif 1605 break; 1606 } 1607 1608 return (0); 1609 } 1610 1611 /* 1612 * Set the appropriate CSUM_* flags. Unfortunately, the information 1613 * provided is not directly useful to us. The VirtIO header gives the 1614 * offset of the checksum, which is all Linux needs, but this is not 1615 * how FreeBSD does things. We are forced to peek inside the packet 1616 * a bit. 1617 * 1618 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1619 * could accept the offsets and let the stack figure it out. 1620 */ 1621 static int 1622 ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) 1623 { 1624 struct ether_header *eh; 1625 struct ether_vlan_header *evh; 1626 uint16_t eth_type; 1627 int offset, error; 1628 1629 eh = mtod(m, struct ether_header *); 1630 eth_type = ntohs(eh->ether_type); 1631 if (eth_type == ETHERTYPE_VLAN) { 1632 /* BMV: We should handle nested VLAN tags too. */ 1633 evh = mtod(m, struct ether_vlan_header *); 1634 eth_type = ntohs(evh->evl_proto); 1635 offset = sizeof(struct ether_vlan_header); 1636 } else 1637 offset = sizeof(struct ether_header); 1638 1639 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1640 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); 1641 else 1642 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); 1643 1644 return (error); 1645 } 1646 /* End of offloading-related functions to be shared with vtnet. */ 1647 1648 static void 1649 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1650 unsigned int head, unsigned int sync_flags) 1651 { 1652 struct netmap_ring *ring = kring->ring; 1653 struct nm_csb_atok *atok = pq->atok; 1654 struct nm_csb_ktoa *ktoa = pq->ktoa; 1655 1656 /* Some packets have been pushed to the netmap ring. We have 1657 * to tell the host to process the new packets, updating cur 1658 * and head in the CSB. */ 1659 ring->head = ring->cur = head; 1660 1661 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1662 kring->rcur = kring->rhead = head; 1663 1664 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); 1665 1666 /* Kick the host if needed. */ 1667 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { 1668 atok->sync_flags = sync_flags; 1669 ptnet_kick(pq); 1670 } 1671 } 1672 1673 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1674 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1675 (_k)->rtail - (_h)) < (_min) 1676 1677 /* This function may be called by the network stack, or by 1678 * by the taskqueue thread. */ 1679 static int 1680 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1681 bool may_resched) 1682 { 1683 struct ptnet_softc *sc = pq->sc; 1684 bool have_vnet_hdr = sc->vnet_hdr_len; 1685 struct netmap_adapter *na = &sc->ptna->dr.up; 1686 if_t ifp = sc->ifp; 1687 unsigned int batch_count = 0; 1688 struct nm_csb_atok *atok; 1689 struct nm_csb_ktoa *ktoa; 1690 struct netmap_kring *kring; 1691 struct netmap_ring *ring; 1692 struct netmap_slot *slot; 1693 unsigned int count = 0; 1694 unsigned int minspace; 1695 unsigned int head; 1696 unsigned int lim; 1697 struct mbuf *mhead; 1698 struct mbuf *mf; 1699 int nmbuf_bytes; 1700 uint8_t *nmbuf; 1701 1702 if (!PTNET_Q_TRYLOCK(pq)) { 1703 /* We failed to acquire the lock, schedule the taskqueue. */ 1704 nm_prlim(1, "Deferring TX work"); 1705 if (may_resched) { 1706 taskqueue_enqueue(pq->taskq, &pq->task); 1707 } 1708 1709 return 0; 1710 } 1711 1712 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1713 PTNET_Q_UNLOCK(pq); 1714 nm_prlim(1, "Interface is down"); 1715 return ENETDOWN; 1716 } 1717 1718 atok = pq->atok; 1719 ktoa = pq->ktoa; 1720 kring = na->tx_rings[pq->kring_id]; 1721 ring = kring->ring; 1722 lim = kring->nkr_num_slots - 1; 1723 head = ring->head; 1724 minspace = sc->min_tx_space; 1725 1726 while (count < budget) { 1727 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1728 /* We ran out of slot, let's see if the host has 1729 * freed up some, by reading hwcur and hwtail from 1730 * the CSB. */ 1731 ptnet_sync_tail(ktoa, kring); 1732 1733 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1734 /* Still no slots available. Reactivate the 1735 * interrupts so that we can be notified 1736 * when some free slots are made available by 1737 * the host. */ 1738 atok->appl_need_kick = 1; 1739 1740 /* Double check. We need a full barrier to 1741 * prevent the store to atok->appl_need_kick 1742 * to be reordered with the load from 1743 * ktoa->hwcur and ktoa->hwtail (store-load 1744 * barrier). */ 1745 nm_stld_barrier(); 1746 ptnet_sync_tail(ktoa, kring); 1747 if (likely(PTNET_TX_NOSPACE(head, kring, 1748 minspace))) { 1749 break; 1750 } 1751 1752 nm_prlim(1, "Found more slots by doublecheck"); 1753 /* More slots were freed before reactivating 1754 * the interrupts. */ 1755 atok->appl_need_kick = 0; 1756 } 1757 } 1758 1759 mhead = drbr_peek(ifp, pq->bufring); 1760 if (!mhead) { 1761 break; 1762 } 1763 1764 /* Initialize transmission state variables. */ 1765 slot = ring->slot + head; 1766 nmbuf = NMB(na, slot); 1767 nmbuf_bytes = 0; 1768 1769 /* If needed, prepare the virtio-net header at the beginning 1770 * of the first slot. */ 1771 if (have_vnet_hdr) { 1772 struct virtio_net_hdr *vh = 1773 (struct virtio_net_hdr *)nmbuf; 1774 1775 /* For performance, we could replace this memset() with 1776 * two 8-bytes-wide writes. */ 1777 memset(nmbuf, 0, PTNET_HDR_SIZE); 1778 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1779 mhead = ptnet_tx_offload(ifp, mhead, false, 1780 vh); 1781 if (unlikely(!mhead)) { 1782 /* Packet dropped because errors 1783 * occurred while preparing the vnet 1784 * header. Let's go ahead with the next 1785 * packet. */ 1786 pq->stats.errors ++; 1787 drbr_advance(ifp, pq->bufring); 1788 continue; 1789 } 1790 } 1791 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1792 "csum_start %u csum_ofs %u hdr_len = %u " 1793 "gso_size %u gso_type %x", __func__, 1794 mhead->m_pkthdr.csum_flags, vh->flags, 1795 vh->csum_start, vh->csum_offset, vh->hdr_len, 1796 vh->gso_size, vh->gso_type); 1797 1798 nmbuf += PTNET_HDR_SIZE; 1799 nmbuf_bytes += PTNET_HDR_SIZE; 1800 } 1801 1802 for (mf = mhead; mf; mf = mf->m_next) { 1803 uint8_t *mdata = mf->m_data; 1804 int mlen = mf->m_len; 1805 1806 for (;;) { 1807 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1808 1809 if (mlen < copy) { 1810 copy = mlen; 1811 } 1812 memcpy(nmbuf, mdata, copy); 1813 1814 mdata += copy; 1815 mlen -= copy; 1816 nmbuf += copy; 1817 nmbuf_bytes += copy; 1818 1819 if (!mlen) { 1820 break; 1821 } 1822 1823 slot->len = nmbuf_bytes; 1824 slot->flags = NS_MOREFRAG; 1825 1826 head = nm_next(head, lim); 1827 KASSERT(head != ring->tail, 1828 ("Unexpectedly run out of TX space")); 1829 slot = ring->slot + head; 1830 nmbuf = NMB(na, slot); 1831 nmbuf_bytes = 0; 1832 } 1833 } 1834 1835 /* Complete last slot and update head. */ 1836 slot->len = nmbuf_bytes; 1837 slot->flags = 0; 1838 head = nm_next(head, lim); 1839 1840 /* Consume the packet just processed. */ 1841 drbr_advance(ifp, pq->bufring); 1842 1843 /* Copy the packet to listeners. */ 1844 ETHER_BPF_MTAP(ifp, mhead); 1845 1846 pq->stats.packets ++; 1847 pq->stats.bytes += mhead->m_pkthdr.len; 1848 if (mhead->m_flags & M_MCAST) { 1849 pq->stats.mcasts ++; 1850 } 1851 1852 m_freem(mhead); 1853 1854 count ++; 1855 if (++batch_count == PTNET_TX_BATCH) { 1856 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1857 batch_count = 0; 1858 } 1859 } 1860 1861 if (batch_count) { 1862 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1863 } 1864 1865 if (count >= budget && may_resched) { 1866 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n", 1867 drbr_inuse(ifp, pq->bufring))); 1868 taskqueue_enqueue(pq->taskq, &pq->task); 1869 } 1870 1871 PTNET_Q_UNLOCK(pq); 1872 1873 return count; 1874 } 1875 1876 static int 1877 ptnet_transmit(if_t ifp, struct mbuf *m) 1878 { 1879 struct ptnet_softc *sc = if_getsoftc(ifp); 1880 struct ptnet_queue *pq; 1881 unsigned int queue_idx; 1882 int err; 1883 1884 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1885 1886 /* Insert 802.1Q header if needed. */ 1887 if (m->m_flags & M_VLANTAG) { 1888 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1889 if (m == NULL) { 1890 return ENOBUFS; 1891 } 1892 m->m_flags &= ~M_VLANTAG; 1893 } 1894 1895 /* Get the flow-id if available. */ 1896 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1897 m->m_pkthdr.flowid : curcpu; 1898 1899 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1900 queue_idx %= sc->num_tx_rings; 1901 } 1902 1903 pq = sc->queues + queue_idx; 1904 1905 err = drbr_enqueue(ifp, pq->bufring, m); 1906 if (err) { 1907 /* ENOBUFS when the bufring is full */ 1908 nm_prlim(1, "%s: drbr_enqueue() failed %d\n", 1909 __func__, err); 1910 pq->stats.errors ++; 1911 return err; 1912 } 1913 1914 if (ifp->if_capenable & IFCAP_POLLING) { 1915 /* If polling is on, the transmit queues will be 1916 * drained by the poller. */ 1917 return 0; 1918 } 1919 1920 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1921 1922 return (err < 0) ? err : 0; 1923 } 1924 1925 static unsigned int 1926 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1927 { 1928 struct netmap_ring *ring = kring->ring; 1929 struct netmap_slot *slot = ring->slot + head; 1930 1931 for (;;) { 1932 head = nm_next(head, kring->nkr_num_slots - 1); 1933 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1934 break; 1935 } 1936 slot = ring->slot + head; 1937 } 1938 1939 return head; 1940 } 1941 1942 static inline struct mbuf * 1943 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1944 { 1945 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1946 1947 do { 1948 unsigned int copy; 1949 1950 if (mtail->m_len == MCLBYTES) { 1951 struct mbuf *mf; 1952 1953 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1954 if (unlikely(!mf)) { 1955 return NULL; 1956 } 1957 1958 mtail->m_next = mf; 1959 mtail = mf; 1960 mdata = mtod(mtail, uint8_t *); 1961 mtail->m_len = 0; 1962 } 1963 1964 copy = MCLBYTES - mtail->m_len; 1965 if (nmbuf_len < copy) { 1966 copy = nmbuf_len; 1967 } 1968 1969 memcpy(mdata, nmbuf, copy); 1970 1971 nmbuf += copy; 1972 nmbuf_len -= copy; 1973 mdata += copy; 1974 mtail->m_len += copy; 1975 } while (nmbuf_len); 1976 1977 return mtail; 1978 } 1979 1980 static int 1981 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 1982 { 1983 struct ptnet_softc *sc = pq->sc; 1984 bool have_vnet_hdr = sc->vnet_hdr_len; 1985 struct nm_csb_atok *atok = pq->atok; 1986 struct nm_csb_ktoa *ktoa = pq->ktoa; 1987 struct netmap_adapter *na = &sc->ptna->dr.up; 1988 struct netmap_kring *kring = na->rx_rings[pq->kring_id]; 1989 struct netmap_ring *ring = kring->ring; 1990 unsigned int const lim = kring->nkr_num_slots - 1; 1991 unsigned int batch_count = 0; 1992 if_t ifp = sc->ifp; 1993 unsigned int count = 0; 1994 uint32_t head; 1995 1996 PTNET_Q_LOCK(pq); 1997 1998 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1999 goto unlock; 2000 } 2001 2002 kring->nr_kflags &= ~NKR_PENDINTR; 2003 2004 head = ring->head; 2005 while (count < budget) { 2006 uint32_t prev_head = head; 2007 struct mbuf *mhead, *mtail; 2008 struct virtio_net_hdr *vh; 2009 struct netmap_slot *slot; 2010 unsigned int nmbuf_len; 2011 uint8_t *nmbuf; 2012 int deliver = 1; /* the mbuf to the network stack. */ 2013 host_sync: 2014 if (head == ring->tail) { 2015 /* We ran out of slot, let's see if the host has 2016 * added some, by reading hwcur and hwtail from 2017 * the CSB. */ 2018 ptnet_sync_tail(ktoa, kring); 2019 2020 if (head == ring->tail) { 2021 /* Still no slots available. Reactivate 2022 * interrupts as they were disabled by the 2023 * host thread right before issuing the 2024 * last interrupt. */ 2025 atok->appl_need_kick = 1; 2026 2027 /* Double check for more completed RX slots. 2028 * We need a full barrier to prevent the store 2029 * to atok->appl_need_kick to be reordered with 2030 * the load from ktoa->hwcur and ktoa->hwtail 2031 * (store-load barrier). */ 2032 nm_stld_barrier(); 2033 ptnet_sync_tail(ktoa, kring); 2034 if (likely(head == ring->tail)) { 2035 break; 2036 } 2037 atok->appl_need_kick = 0; 2038 } 2039 } 2040 2041 /* Initialize ring state variables, possibly grabbing the 2042 * virtio-net header. */ 2043 slot = ring->slot + head; 2044 nmbuf = NMB(na, slot); 2045 nmbuf_len = slot->len; 2046 2047 vh = (struct virtio_net_hdr *)nmbuf; 2048 if (have_vnet_hdr) { 2049 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 2050 /* There is no good reason why host should 2051 * put the header in multiple netmap slots. 2052 * If this is the case, discard. */ 2053 nm_prlim(1, "Fragmented vnet-hdr: dropping"); 2054 head = ptnet_rx_discard(kring, head); 2055 pq->stats.iqdrops ++; 2056 deliver = 0; 2057 goto skip; 2058 } 2059 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u " 2060 "csum_ofs %u hdr_len = %u gso_size %u " 2061 "gso_type %x", __func__, vh->flags, 2062 vh->csum_start, vh->csum_offset, vh->hdr_len, 2063 vh->gso_size, vh->gso_type); 2064 nmbuf += PTNET_HDR_SIZE; 2065 nmbuf_len -= PTNET_HDR_SIZE; 2066 } 2067 2068 /* Allocate the head of a new mbuf chain. 2069 * We use m_getcl() to allocate an mbuf with standard cluster 2070 * size (MCLBYTES). In the future we could use m_getjcl() 2071 * to choose different sizes. */ 2072 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2073 if (unlikely(mhead == NULL)) { 2074 device_printf(sc->dev, "%s: failed to allocate mbuf " 2075 "head\n", __func__); 2076 pq->stats.errors ++; 2077 break; 2078 } 2079 2080 /* Initialize the mbuf state variables. */ 2081 mhead->m_pkthdr.len = nmbuf_len; 2082 mtail->m_len = 0; 2083 2084 /* Scan all the netmap slots containing the current packet. */ 2085 for (;;) { 2086 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 2087 "len %u, flags %u\n", __func__, 2088 head, ring->tail, slot->len, 2089 slot->flags)); 2090 2091 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 2092 if (unlikely(!mtail)) { 2093 /* Ouch. We ran out of memory while processing 2094 * a packet. We have to restore the previous 2095 * head position, free the mbuf chain, and 2096 * schedule the taskqueue to give the packet 2097 * another chance. */ 2098 device_printf(sc->dev, "%s: failed to allocate" 2099 " mbuf frag, reset head %u --> %u\n", 2100 __func__, head, prev_head); 2101 head = prev_head; 2102 m_freem(mhead); 2103 pq->stats.errors ++; 2104 if (may_resched) { 2105 taskqueue_enqueue(pq->taskq, 2106 &pq->task); 2107 } 2108 goto escape; 2109 } 2110 2111 /* We have to increment head irrespective of the 2112 * NS_MOREFRAG being set or not. */ 2113 head = nm_next(head, lim); 2114 2115 if (!(slot->flags & NS_MOREFRAG)) { 2116 break; 2117 } 2118 2119 if (unlikely(head == ring->tail)) { 2120 /* The very last slot prepared by the host has 2121 * the NS_MOREFRAG set. Drop it and continue 2122 * the outer cycle (to do the double-check). */ 2123 nm_prlim(1, "Incomplete packet: dropping"); 2124 m_freem(mhead); 2125 pq->stats.iqdrops ++; 2126 goto host_sync; 2127 } 2128 2129 slot = ring->slot + head; 2130 nmbuf = NMB(na, slot); 2131 nmbuf_len = slot->len; 2132 mhead->m_pkthdr.len += nmbuf_len; 2133 } 2134 2135 mhead->m_pkthdr.rcvif = ifp; 2136 mhead->m_pkthdr.csum_flags = 0; 2137 2138 /* Store the queue idx in the packet header. */ 2139 mhead->m_pkthdr.flowid = pq->kring_id; 2140 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 2141 2142 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 2143 struct ether_header *eh; 2144 2145 eh = mtod(mhead, struct ether_header *); 2146 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2147 ptnet_vlan_tag_remove(mhead); 2148 /* 2149 * With the 802.1Q header removed, update the 2150 * checksum starting location accordingly. 2151 */ 2152 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2153 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 2154 } 2155 } 2156 2157 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM 2158 | VIRTIO_NET_HDR_F_DATA_VALID))) { 2159 if (unlikely(ptnet_rx_csum(mhead, vh))) { 2160 m_freem(mhead); 2161 nm_prlim(1, "Csum offload error: dropping"); 2162 pq->stats.iqdrops ++; 2163 deliver = 0; 2164 } 2165 } 2166 2167 skip: 2168 count ++; 2169 if (++batch_count >= PTNET_RX_BATCH) { 2170 /* Some packets have been (or will be) pushed to the network 2171 * stack. We need to update the CSB to tell the host about 2172 * the new ring->cur and ring->head (RX buffer refill). */ 2173 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2174 batch_count = 0; 2175 } 2176 2177 if (likely(deliver)) { 2178 pq->stats.packets ++; 2179 pq->stats.bytes += mhead->m_pkthdr.len; 2180 2181 PTNET_Q_UNLOCK(pq); 2182 (*ifp->if_input)(ifp, mhead); 2183 PTNET_Q_LOCK(pq); 2184 /* The ring->head index (and related indices) are 2185 * updated under pq lock by ptnet_ring_update(). 2186 * Since we dropped the lock to call if_input(), we 2187 * must reload ring->head and restart processing the 2188 * ring from there. */ 2189 head = ring->head; 2190 2191 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2192 /* The interface has gone down while we didn't 2193 * have the lock. Stop any processing and exit. */ 2194 goto unlock; 2195 } 2196 } 2197 } 2198 escape: 2199 if (batch_count) { 2200 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2201 2202 } 2203 2204 if (count >= budget && may_resched) { 2205 /* If we ran out of budget or the double-check found new 2206 * slots to process, schedule the taskqueue. */ 2207 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n", 2208 head, ring->tail)); 2209 taskqueue_enqueue(pq->taskq, &pq->task); 2210 } 2211 unlock: 2212 PTNET_Q_UNLOCK(pq); 2213 2214 return count; 2215 } 2216 2217 static void 2218 ptnet_rx_task(void *context, int pending) 2219 { 2220 struct ptnet_queue *pq = context; 2221 2222 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2223 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 2224 } 2225 2226 static void 2227 ptnet_tx_task(void *context, int pending) 2228 { 2229 struct ptnet_queue *pq = context; 2230 2231 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2232 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 2233 } 2234 2235 #ifdef DEVICE_POLLING 2236 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 2237 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 2238 static int 2239 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 2240 { 2241 struct ptnet_softc *sc = if_getsoftc(ifp); 2242 unsigned int queue_budget; 2243 unsigned int count = 0; 2244 bool borrow = false; 2245 int i; 2246 2247 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 2248 queue_budget = MAX(budget / sc->num_rings, 1); 2249 nm_prlim(1, "Per-queue budget is %d", queue_budget); 2250 2251 while (budget) { 2252 unsigned int rcnt = 0; 2253 2254 for (i = 0; i < sc->num_rings; i++) { 2255 struct ptnet_queue *pq = sc->queues + i; 2256 2257 if (borrow) { 2258 queue_budget = MIN(queue_budget, budget); 2259 if (queue_budget == 0) { 2260 break; 2261 } 2262 } 2263 2264 if (i < sc->num_tx_rings) { 2265 rcnt += ptnet_drain_transmit_queue(pq, 2266 queue_budget, false); 2267 } else { 2268 rcnt += ptnet_rx_eof(pq, queue_budget, 2269 false); 2270 } 2271 } 2272 2273 if (!rcnt) { 2274 /* A scan of the queues gave no result, we can 2275 * stop here. */ 2276 break; 2277 } 2278 2279 if (rcnt > budget) { 2280 /* This may happen when initial budget < sc->num_rings, 2281 * since one packet budget is given to each queue 2282 * anyway. Just pretend we didn't eat "so much". */ 2283 rcnt = budget; 2284 } 2285 count += rcnt; 2286 budget -= rcnt; 2287 borrow = true; 2288 } 2289 2290 2291 return count; 2292 } 2293 #endif /* DEVICE_POLLING */ 2294