1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Driver for ptnet paravirtualized network device. */ 30 31 #include <sys/cdefs.h> 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/taskqueue.h> 46 #include <sys/smp.h> 47 #include <sys/time.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 #include <net/bpf.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet/udp.h> 70 #include <netinet/tcp.h> 71 #include <netinet/sctp.h> 72 73 #include <machine/bus.h> 74 #include <machine/resource.h> 75 #include <sys/bus.h> 76 #include <sys/rman.h> 77 78 #include <dev/pci/pcivar.h> 79 #include <dev/pci/pcireg.h> 80 81 #include "opt_inet.h" 82 #include "opt_inet6.h" 83 84 #include <sys/selinfo.h> 85 #include <net/netmap.h> 86 #include <dev/netmap/netmap_kern.h> 87 #include <net/netmap_virt.h> 88 #include <dev/netmap/netmap_mem2.h> 89 #include <dev/virtio/network/virtio_net.h> 90 91 #ifndef INET 92 #error "INET not defined, cannot support offloadings" 93 #endif 94 95 #if __FreeBSD_version >= 1100000 96 static uint64_t ptnet_get_counter(if_t, ift_counter); 97 #else 98 typedef struct ifnet *if_t; 99 #define if_getsoftc(_ifp) (_ifp)->if_softc 100 #endif 101 102 //#define PTNETMAP_STATS 103 //#define DEBUG 104 #ifdef DEBUG 105 #define DBG(x) x 106 #else /* !DEBUG */ 107 #define DBG(x) 108 #endif /* !DEBUG */ 109 110 extern int ptnet_vnet_hdr; /* Tunable parameter */ 111 112 struct ptnet_softc; 113 114 struct ptnet_queue_stats { 115 uint64_t packets; /* if_[io]packets */ 116 uint64_t bytes; /* if_[io]bytes */ 117 uint64_t errors; /* if_[io]errors */ 118 uint64_t iqdrops; /* if_iqdrops */ 119 uint64_t mcasts; /* if_[io]mcasts */ 120 #ifdef PTNETMAP_STATS 121 uint64_t intrs; 122 uint64_t kicks; 123 #endif /* PTNETMAP_STATS */ 124 }; 125 126 struct ptnet_queue { 127 struct ptnet_softc *sc; 128 struct resource *irq; 129 void *cookie; 130 int kring_id; 131 struct ptnet_csb_gh *ptgh; 132 struct ptnet_csb_hg *pthg; 133 unsigned int kick; 134 struct mtx lock; 135 struct buf_ring *bufring; /* for TX queues */ 136 struct ptnet_queue_stats stats; 137 #ifdef PTNETMAP_STATS 138 struct ptnet_queue_stats last_stats; 139 #endif /* PTNETMAP_STATS */ 140 struct taskqueue *taskq; 141 struct task task; 142 char lock_name[16]; 143 }; 144 145 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 146 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 147 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 148 149 struct ptnet_softc { 150 device_t dev; 151 if_t ifp; 152 struct ifmedia media; 153 struct mtx lock; 154 char lock_name[16]; 155 char hwaddr[ETHER_ADDR_LEN]; 156 157 /* Mirror of PTFEAT register. */ 158 uint32_t ptfeatures; 159 unsigned int vnet_hdr_len; 160 161 /* PCI BARs support. */ 162 struct resource *iomem; 163 struct resource *msix_mem; 164 165 unsigned int num_rings; 166 unsigned int num_tx_rings; 167 struct ptnet_queue *queues; 168 struct ptnet_queue *rxqueues; 169 struct ptnet_csb_gh *csb_gh; 170 struct ptnet_csb_hg *csb_hg; 171 172 unsigned int min_tx_space; 173 174 struct netmap_pt_guest_adapter *ptna; 175 176 struct callout tick; 177 #ifdef PTNETMAP_STATS 178 struct timeval last_ts; 179 #endif /* PTNETMAP_STATS */ 180 }; 181 182 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 183 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 184 185 static int ptnet_probe(device_t); 186 static int ptnet_attach(device_t); 187 static int ptnet_detach(device_t); 188 static int ptnet_suspend(device_t); 189 static int ptnet_resume(device_t); 190 static int ptnet_shutdown(device_t); 191 192 static void ptnet_init(void *opaque); 193 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 194 static int ptnet_init_locked(struct ptnet_softc *sc); 195 static int ptnet_stop(struct ptnet_softc *sc); 196 static int ptnet_transmit(if_t ifp, struct mbuf *m); 197 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 198 unsigned int budget, 199 bool may_resched); 200 static void ptnet_qflush(if_t ifp); 201 static void ptnet_tx_task(void *context, int pending); 202 203 static int ptnet_media_change(if_t ifp); 204 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 205 #ifdef PTNETMAP_STATS 206 static void ptnet_tick(void *opaque); 207 #endif 208 209 static int ptnet_irqs_init(struct ptnet_softc *sc); 210 static void ptnet_irqs_fini(struct ptnet_softc *sc); 211 212 static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); 213 static int ptnet_nm_config(struct netmap_adapter *na, 214 struct nm_config_info *info); 215 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 216 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 217 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 218 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 219 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); 220 221 static void ptnet_tx_intr(void *opaque); 222 static void ptnet_rx_intr(void *opaque); 223 224 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 225 unsigned int head); 226 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 227 bool may_resched); 228 static void ptnet_rx_task(void *context, int pending); 229 230 #ifdef DEVICE_POLLING 231 static poll_handler_t ptnet_poll; 232 #endif 233 234 static device_method_t ptnet_methods[] = { 235 DEVMETHOD(device_probe, ptnet_probe), 236 DEVMETHOD(device_attach, ptnet_attach), 237 DEVMETHOD(device_detach, ptnet_detach), 238 DEVMETHOD(device_suspend, ptnet_suspend), 239 DEVMETHOD(device_resume, ptnet_resume), 240 DEVMETHOD(device_shutdown, ptnet_shutdown), 241 DEVMETHOD_END 242 }; 243 244 static driver_t ptnet_driver = { 245 "ptnet", 246 ptnet_methods, 247 sizeof(struct ptnet_softc) 248 }; 249 250 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 251 static devclass_t ptnet_devclass; 252 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 253 NULL, NULL, SI_ORDER_MIDDLE + 2); 254 255 static int 256 ptnet_probe(device_t dev) 257 { 258 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 259 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 260 return (ENXIO); 261 } 262 263 device_set_desc(dev, "ptnet network adapter"); 264 265 return (BUS_PROBE_DEFAULT); 266 } 267 268 static inline void ptnet_kick(struct ptnet_queue *pq) 269 { 270 #ifdef PTNETMAP_STATS 271 pq->stats.kicks ++; 272 #endif /* PTNETMAP_STATS */ 273 bus_write_4(pq->sc->iomem, pq->kick, 0); 274 } 275 276 #define PTNET_BUF_RING_SIZE 4096 277 #define PTNET_RX_BUDGET 512 278 #define PTNET_RX_BATCH 1 279 #define PTNET_TX_BUDGET 512 280 #define PTNET_TX_BATCH 64 281 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 282 #define PTNET_MAX_PKT_SIZE 65536 283 284 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) 285 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ 286 CSUM_SCTP_IPV6) 287 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 288 PTNET_CSUM_OFFLOAD_IPV6) 289 290 static int 291 ptnet_attach(device_t dev) 292 { 293 uint32_t ptfeatures = 0; 294 unsigned int num_rx_rings, num_tx_rings; 295 struct netmap_adapter na_arg; 296 unsigned int nifp_offset; 297 struct ptnet_softc *sc; 298 if_t ifp; 299 uint32_t macreg; 300 int err, rid; 301 int i; 302 303 sc = device_get_softc(dev); 304 sc->dev = dev; 305 306 /* Setup PCI resources. */ 307 pci_enable_busmaster(dev); 308 309 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 310 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 311 RF_ACTIVE); 312 if (sc->iomem == NULL) { 313 device_printf(dev, "Failed to map I/O BAR\n"); 314 return (ENXIO); 315 } 316 317 /* Negotiate features with the hypervisor. */ 318 if (ptnet_vnet_hdr) { 319 ptfeatures |= PTNETMAP_F_VNET_HDR; 320 } 321 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 322 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 323 sc->ptfeatures = ptfeatures; 324 325 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 326 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 327 sc->num_rings = num_tx_rings + num_rx_rings; 328 sc->num_tx_rings = num_tx_rings; 329 330 if (sc->num_rings * sizeof(struct ptnet_csb_gh) > PAGE_SIZE) { 331 device_printf(dev, "CSB cannot handle that many rings (%u)\n", 332 sc->num_rings); 333 err = ENOMEM; 334 goto err_path; 335 } 336 337 /* Allocate CSB and carry out CSB allocation protocol. */ 338 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 339 (size_t)0, -1UL, PAGE_SIZE, 0); 340 if (sc->csb_gh == NULL) { 341 device_printf(dev, "Failed to allocate CSB\n"); 342 err = ENOMEM; 343 goto err_path; 344 } 345 sc->csb_hg = (struct ptnet_csb_hg *)(((char *)sc->csb_gh) + PAGE_SIZE); 346 347 { 348 /* 349 * We use uint64_t rather than vm_paddr_t since we 350 * need 64 bit addresses even on 32 bit platforms. 351 */ 352 uint64_t paddr = vtophys(sc->csb_gh); 353 354 /* CSB allocation protocol: write to BAH first, then 355 * to BAL (for both GH and HG sections). */ 356 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 357 (paddr >> 32) & 0xffffffff); 358 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 359 paddr & 0xffffffff); 360 paddr = vtophys(sc->csb_hg); 361 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 362 (paddr >> 32) & 0xffffffff); 363 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 364 paddr & 0xffffffff); 365 } 366 367 /* Allocate and initialize per-queue data structures. */ 368 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 369 M_DEVBUF, M_NOWAIT | M_ZERO); 370 if (sc->queues == NULL) { 371 err = ENOMEM; 372 goto err_path; 373 } 374 sc->rxqueues = sc->queues + num_tx_rings; 375 376 for (i = 0; i < sc->num_rings; i++) { 377 struct ptnet_queue *pq = sc->queues + i; 378 379 pq->sc = sc; 380 pq->kring_id = i; 381 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 382 pq->ptgh = sc->csb_gh + i; 383 pq->pthg = sc->csb_hg + i; 384 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 385 device_get_nameunit(dev), i); 386 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 387 if (i >= num_tx_rings) { 388 /* RX queue: fix kring_id. */ 389 pq->kring_id -= num_tx_rings; 390 } else { 391 /* TX queue: allocate buf_ring. */ 392 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 393 M_DEVBUF, M_NOWAIT, &pq->lock); 394 if (pq->bufring == NULL) { 395 err = ENOMEM; 396 goto err_path; 397 } 398 } 399 } 400 401 sc->min_tx_space = 64; /* Safe initial value. */ 402 403 err = ptnet_irqs_init(sc); 404 if (err) { 405 goto err_path; 406 } 407 408 /* Setup Ethernet interface. */ 409 sc->ifp = ifp = if_alloc(IFT_ETHER); 410 if (ifp == NULL) { 411 device_printf(dev, "Failed to allocate ifnet\n"); 412 err = ENOMEM; 413 goto err_path; 414 } 415 416 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 417 ifp->if_baudrate = IF_Gbps(10); 418 ifp->if_softc = sc; 419 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 420 ifp->if_init = ptnet_init; 421 ifp->if_ioctl = ptnet_ioctl; 422 #if __FreeBSD_version >= 1100000 423 ifp->if_get_counter = ptnet_get_counter; 424 #endif 425 ifp->if_transmit = ptnet_transmit; 426 ifp->if_qflush = ptnet_qflush; 427 428 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 429 ptnet_media_status); 430 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 431 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 432 433 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 434 sc->hwaddr[0] = (macreg >> 8) & 0xff; 435 sc->hwaddr[1] = macreg & 0xff; 436 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 437 sc->hwaddr[2] = (macreg >> 24) & 0xff; 438 sc->hwaddr[3] = (macreg >> 16) & 0xff; 439 sc->hwaddr[4] = (macreg >> 8) & 0xff; 440 sc->hwaddr[5] = macreg & 0xff; 441 442 ether_ifattach(ifp, sc->hwaddr); 443 444 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 445 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 446 447 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 448 /* Similarly to what the vtnet driver does, we can emulate 449 * VLAN offloadings by inserting and removing the 802.1Q 450 * header during transmit and receive. We are then able 451 * to do checksum offloading of VLAN frames. */ 452 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 453 | IFCAP_VLAN_HWCSUM 454 | IFCAP_TSO | IFCAP_LRO 455 | IFCAP_VLAN_HWTSO 456 | IFCAP_VLAN_HWTAGGING; 457 } 458 459 ifp->if_capenable = ifp->if_capabilities; 460 #ifdef DEVICE_POLLING 461 /* Don't enable polling by default. */ 462 ifp->if_capabilities |= IFCAP_POLLING; 463 #endif 464 snprintf(sc->lock_name, sizeof(sc->lock_name), 465 "%s", device_get_nameunit(dev)); 466 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 467 callout_init_mtx(&sc->tick, &sc->lock, 0); 468 469 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 470 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 471 memset(&na_arg, 0, sizeof(na_arg)); 472 na_arg.ifp = ifp; 473 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 474 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 475 na_arg.num_tx_rings = num_tx_rings; 476 na_arg.num_rx_rings = num_rx_rings; 477 na_arg.nm_config = ptnet_nm_config; 478 na_arg.nm_krings_create = ptnet_nm_krings_create; 479 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 480 na_arg.nm_dtor = ptnet_nm_dtor; 481 na_arg.nm_intr = ptnet_nm_intr; 482 na_arg.nm_register = ptnet_nm_register; 483 na_arg.nm_txsync = ptnet_nm_txsync; 484 na_arg.nm_rxsync = ptnet_nm_rxsync; 485 486 netmap_pt_guest_attach(&na_arg, nifp_offset, 487 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 488 489 /* Now a netmap adapter for this ifp has been allocated, and it 490 * can be accessed through NA(ifp). We also have to initialize the CSB 491 * pointer. */ 492 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 493 494 /* If virtio-net header was negotiated, set the virt_hdr_len field in 495 * the netmap adapter, to inform users that this netmap adapter requires 496 * the application to deal with the headers. */ 497 ptnet_update_vnet_hdr(sc); 498 499 device_printf(dev, "%s() completed\n", __func__); 500 501 return (0); 502 503 err_path: 504 ptnet_detach(dev); 505 return err; 506 } 507 508 static int 509 ptnet_detach(device_t dev) 510 { 511 struct ptnet_softc *sc = device_get_softc(dev); 512 int i; 513 514 #ifdef DEVICE_POLLING 515 if (sc->ifp->if_capenable & IFCAP_POLLING) { 516 ether_poll_deregister(sc->ifp); 517 } 518 #endif 519 callout_drain(&sc->tick); 520 521 if (sc->queues) { 522 /* Drain taskqueues before calling if_detach. */ 523 for (i = 0; i < sc->num_rings; i++) { 524 struct ptnet_queue *pq = sc->queues + i; 525 526 if (pq->taskq) { 527 taskqueue_drain(pq->taskq, &pq->task); 528 } 529 } 530 } 531 532 if (sc->ifp) { 533 ether_ifdetach(sc->ifp); 534 535 /* Uninitialize netmap adapters for this device. */ 536 netmap_detach(sc->ifp); 537 538 ifmedia_removeall(&sc->media); 539 if_free(sc->ifp); 540 sc->ifp = NULL; 541 } 542 543 ptnet_irqs_fini(sc); 544 545 if (sc->csb_gh) { 546 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); 547 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); 548 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); 549 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); 550 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); 551 sc->csb_gh = NULL; 552 sc->csb_hg = NULL; 553 } 554 555 if (sc->queues) { 556 for (i = 0; i < sc->num_rings; i++) { 557 struct ptnet_queue *pq = sc->queues + i; 558 559 if (mtx_initialized(&pq->lock)) { 560 mtx_destroy(&pq->lock); 561 } 562 if (pq->bufring != NULL) { 563 buf_ring_free(pq->bufring, M_DEVBUF); 564 } 565 } 566 free(sc->queues, M_DEVBUF); 567 sc->queues = NULL; 568 } 569 570 if (sc->iomem) { 571 bus_release_resource(dev, SYS_RES_IOPORT, 572 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 573 sc->iomem = NULL; 574 } 575 576 mtx_destroy(&sc->lock); 577 578 device_printf(dev, "%s() completed\n", __func__); 579 580 return (0); 581 } 582 583 static int 584 ptnet_suspend(device_t dev) 585 { 586 struct ptnet_softc *sc; 587 588 sc = device_get_softc(dev); 589 (void)sc; 590 591 return (0); 592 } 593 594 static int 595 ptnet_resume(device_t dev) 596 { 597 struct ptnet_softc *sc; 598 599 sc = device_get_softc(dev); 600 (void)sc; 601 602 return (0); 603 } 604 605 static int 606 ptnet_shutdown(device_t dev) 607 { 608 /* 609 * Suspend already does all of what we need to 610 * do here; we just never expect to be resumed. 611 */ 612 return (ptnet_suspend(dev)); 613 } 614 615 static int 616 ptnet_irqs_init(struct ptnet_softc *sc) 617 { 618 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 619 int nvecs = sc->num_rings; 620 device_t dev = sc->dev; 621 int err = ENOSPC; 622 int cpu_cur; 623 int i; 624 625 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 626 device_printf(dev, "Could not find MSI-X capability\n"); 627 return (ENXIO); 628 } 629 630 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 631 &rid, RF_ACTIVE); 632 if (sc->msix_mem == NULL) { 633 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 634 return (ENXIO); 635 } 636 637 if (pci_msix_count(dev) < nvecs) { 638 device_printf(dev, "Not enough MSI-X vectors\n"); 639 goto err_path; 640 } 641 642 err = pci_alloc_msix(dev, &nvecs); 643 if (err) { 644 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 645 goto err_path; 646 } 647 648 for (i = 0; i < nvecs; i++) { 649 struct ptnet_queue *pq = sc->queues + i; 650 651 rid = i + 1; 652 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 653 RF_ACTIVE); 654 if (pq->irq == NULL) { 655 device_printf(dev, "Failed to allocate interrupt " 656 "for queue #%d\n", i); 657 err = ENOSPC; 658 goto err_path; 659 } 660 } 661 662 cpu_cur = CPU_FIRST(); 663 for (i = 0; i < nvecs; i++) { 664 struct ptnet_queue *pq = sc->queues + i; 665 void (*handler)(void *) = ptnet_tx_intr; 666 667 if (i >= sc->num_tx_rings) { 668 handler = ptnet_rx_intr; 669 } 670 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 671 NULL /* intr_filter */, handler, 672 pq, &pq->cookie); 673 if (err) { 674 device_printf(dev, "Failed to register intr handler " 675 "for queue #%d\n", i); 676 goto err_path; 677 } 678 679 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 680 #if 0 681 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 682 #endif 683 cpu_cur = CPU_NEXT(cpu_cur); 684 } 685 686 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 687 688 cpu_cur = CPU_FIRST(); 689 for (i = 0; i < nvecs; i++) { 690 struct ptnet_queue *pq = sc->queues + i; 691 static void (*handler)(void *context, int pending); 692 693 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; 694 695 TASK_INIT(&pq->task, 0, handler, pq); 696 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 697 taskqueue_thread_enqueue, &pq->taskq); 698 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 699 device_get_nameunit(sc->dev), cpu_cur); 700 cpu_cur = CPU_NEXT(cpu_cur); 701 } 702 703 return 0; 704 err_path: 705 ptnet_irqs_fini(sc); 706 return err; 707 } 708 709 static void 710 ptnet_irqs_fini(struct ptnet_softc *sc) 711 { 712 device_t dev = sc->dev; 713 int i; 714 715 for (i = 0; i < sc->num_rings; i++) { 716 struct ptnet_queue *pq = sc->queues + i; 717 718 if (pq->taskq) { 719 taskqueue_free(pq->taskq); 720 pq->taskq = NULL; 721 } 722 723 if (pq->cookie) { 724 bus_teardown_intr(dev, pq->irq, pq->cookie); 725 pq->cookie = NULL; 726 } 727 728 if (pq->irq) { 729 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 730 pq->irq = NULL; 731 } 732 } 733 734 if (sc->msix_mem) { 735 pci_release_msi(dev); 736 737 bus_release_resource(dev, SYS_RES_MEMORY, 738 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 739 sc->msix_mem); 740 sc->msix_mem = NULL; 741 } 742 } 743 744 static void 745 ptnet_init(void *opaque) 746 { 747 struct ptnet_softc *sc = opaque; 748 749 PTNET_CORE_LOCK(sc); 750 ptnet_init_locked(sc); 751 PTNET_CORE_UNLOCK(sc); 752 } 753 754 static int 755 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 756 { 757 struct ptnet_softc *sc = if_getsoftc(ifp); 758 device_t dev = sc->dev; 759 struct ifreq *ifr = (struct ifreq *)data; 760 int mask __unused, err = 0; 761 762 switch (cmd) { 763 case SIOCSIFFLAGS: 764 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 765 PTNET_CORE_LOCK(sc); 766 if (ifp->if_flags & IFF_UP) { 767 /* Network stack wants the iff to be up. */ 768 err = ptnet_init_locked(sc); 769 } else { 770 /* Network stack wants the iff to be down. */ 771 err = ptnet_stop(sc); 772 } 773 /* We don't need to do nothing to support IFF_PROMISC, 774 * since that is managed by the backend port. */ 775 PTNET_CORE_UNLOCK(sc); 776 break; 777 778 case SIOCSIFCAP: 779 device_printf(dev, "SIOCSIFCAP %x %x\n", 780 ifr->ifr_reqcap, ifp->if_capenable); 781 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 782 #ifdef DEVICE_POLLING 783 if (mask & IFCAP_POLLING) { 784 struct ptnet_queue *pq; 785 int i; 786 787 if (ifr->ifr_reqcap & IFCAP_POLLING) { 788 err = ether_poll_register(ptnet_poll, ifp); 789 if (err) { 790 break; 791 } 792 /* Stop queues and sync with taskqueues. */ 793 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 794 for (i = 0; i < sc->num_rings; i++) { 795 pq = sc-> queues + i; 796 /* Make sure the worker sees the 797 * IFF_DRV_RUNNING down. */ 798 PTNET_Q_LOCK(pq); 799 pq->ptgh->guest_need_kick = 0; 800 PTNET_Q_UNLOCK(pq); 801 /* Wait for rescheduling to finish. */ 802 if (pq->taskq) { 803 taskqueue_drain(pq->taskq, 804 &pq->task); 805 } 806 } 807 ifp->if_drv_flags |= IFF_DRV_RUNNING; 808 } else { 809 err = ether_poll_deregister(ifp); 810 for (i = 0; i < sc->num_rings; i++) { 811 pq = sc-> queues + i; 812 PTNET_Q_LOCK(pq); 813 pq->ptgh->guest_need_kick = 1; 814 PTNET_Q_UNLOCK(pq); 815 } 816 } 817 } 818 #endif /* DEVICE_POLLING */ 819 ifp->if_capenable = ifr->ifr_reqcap; 820 break; 821 822 case SIOCSIFMTU: 823 /* We support any reasonable MTU. */ 824 if (ifr->ifr_mtu < ETHERMIN || 825 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 826 err = EINVAL; 827 } else { 828 PTNET_CORE_LOCK(sc); 829 ifp->if_mtu = ifr->ifr_mtu; 830 PTNET_CORE_UNLOCK(sc); 831 } 832 break; 833 834 case SIOCSIFMEDIA: 835 case SIOCGIFMEDIA: 836 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 837 break; 838 839 default: 840 err = ether_ioctl(ifp, cmd, data); 841 break; 842 } 843 844 return err; 845 } 846 847 static int 848 ptnet_init_locked(struct ptnet_softc *sc) 849 { 850 if_t ifp = sc->ifp; 851 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 852 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 853 unsigned int nm_buf_size; 854 int ret; 855 856 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 857 return 0; /* nothing to do */ 858 } 859 860 device_printf(sc->dev, "%s\n", __func__); 861 862 /* Translate offload capabilities according to if_capenable. */ 863 ifp->if_hwassist = 0; 864 if (ifp->if_capenable & IFCAP_TXCSUM) 865 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 866 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 867 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 868 if (ifp->if_capenable & IFCAP_TSO4) 869 ifp->if_hwassist |= CSUM_IP_TSO; 870 if (ifp->if_capenable & IFCAP_TSO6) 871 ifp->if_hwassist |= CSUM_IP6_TSO; 872 873 /* 874 * Prepare the interface for netmap mode access. 875 */ 876 netmap_update_config(na_dr); 877 878 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 879 if (ret) { 880 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 881 return ret; 882 } 883 884 if (sc->ptna->backend_regifs == 0) { 885 ret = ptnet_nm_krings_create(na_nm); 886 if (ret) { 887 device_printf(sc->dev, "ptnet_nm_krings_create() " 888 "failed\n"); 889 goto err_mem_finalize; 890 } 891 892 ret = netmap_mem_rings_create(na_dr); 893 if (ret) { 894 device_printf(sc->dev, "netmap_mem_rings_create() " 895 "failed\n"); 896 goto err_rings_create; 897 } 898 899 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 900 if (ret) { 901 device_printf(sc->dev, "netmap_mem_get_lut() " 902 "failed\n"); 903 goto err_get_lut; 904 } 905 } 906 907 ret = ptnet_nm_register(na_dr, 1 /* on */); 908 if (ret) { 909 goto err_register; 910 } 911 912 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 913 914 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 915 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 916 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 917 sc->min_tx_space); 918 #ifdef PTNETMAP_STATS 919 callout_reset(&sc->tick, hz, ptnet_tick, sc); 920 #endif 921 922 ifp->if_drv_flags |= IFF_DRV_RUNNING; 923 924 return 0; 925 926 err_register: 927 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 928 err_get_lut: 929 netmap_mem_rings_delete(na_dr); 930 err_rings_create: 931 ptnet_nm_krings_delete(na_nm); 932 err_mem_finalize: 933 netmap_mem_deref(na_dr->nm_mem, na_dr); 934 935 return ret; 936 } 937 938 /* To be called under core lock. */ 939 static int 940 ptnet_stop(struct ptnet_softc *sc) 941 { 942 if_t ifp = sc->ifp; 943 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 944 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 945 int i; 946 947 device_printf(sc->dev, "%s\n", __func__); 948 949 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 950 return 0; /* nothing to do */ 951 } 952 953 /* Clear the driver-ready flag, and synchronize with all the queues, 954 * so that after this loop we are sure nobody is working anymore with 955 * the device. This scheme is taken from the vtnet driver. */ 956 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 957 callout_stop(&sc->tick); 958 for (i = 0; i < sc->num_rings; i++) { 959 PTNET_Q_LOCK(sc->queues + i); 960 PTNET_Q_UNLOCK(sc->queues + i); 961 } 962 963 ptnet_nm_register(na_dr, 0 /* off */); 964 965 if (sc->ptna->backend_regifs == 0) { 966 netmap_mem_rings_delete(na_dr); 967 ptnet_nm_krings_delete(na_nm); 968 } 969 netmap_mem_deref(na_dr->nm_mem, na_dr); 970 971 return 0; 972 } 973 974 static void 975 ptnet_qflush(if_t ifp) 976 { 977 struct ptnet_softc *sc = if_getsoftc(ifp); 978 int i; 979 980 /* Flush all the bufrings and do the interface flush. */ 981 for (i = 0; i < sc->num_rings; i++) { 982 struct ptnet_queue *pq = sc->queues + i; 983 struct mbuf *m; 984 985 PTNET_Q_LOCK(pq); 986 if (pq->bufring) { 987 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 988 m_freem(m); 989 } 990 } 991 PTNET_Q_UNLOCK(pq); 992 } 993 994 if_qflush(ifp); 995 } 996 997 static int 998 ptnet_media_change(if_t ifp) 999 { 1000 struct ptnet_softc *sc = if_getsoftc(ifp); 1001 struct ifmedia *ifm = &sc->media; 1002 1003 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1004 return EINVAL; 1005 } 1006 1007 return 0; 1008 } 1009 1010 #if __FreeBSD_version >= 1100000 1011 static uint64_t 1012 ptnet_get_counter(if_t ifp, ift_counter cnt) 1013 { 1014 struct ptnet_softc *sc = if_getsoftc(ifp); 1015 struct ptnet_queue_stats stats[2]; 1016 int i; 1017 1018 /* Accumulate statistics over the queues. */ 1019 memset(stats, 0, sizeof(stats)); 1020 for (i = 0; i < sc->num_rings; i++) { 1021 struct ptnet_queue *pq = sc->queues + i; 1022 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1023 1024 stats[idx].packets += pq->stats.packets; 1025 stats[idx].bytes += pq->stats.bytes; 1026 stats[idx].errors += pq->stats.errors; 1027 stats[idx].iqdrops += pq->stats.iqdrops; 1028 stats[idx].mcasts += pq->stats.mcasts; 1029 } 1030 1031 switch (cnt) { 1032 case IFCOUNTER_IPACKETS: 1033 return (stats[1].packets); 1034 case IFCOUNTER_IQDROPS: 1035 return (stats[1].iqdrops); 1036 case IFCOUNTER_IERRORS: 1037 return (stats[1].errors); 1038 case IFCOUNTER_OPACKETS: 1039 return (stats[0].packets); 1040 case IFCOUNTER_OBYTES: 1041 return (stats[0].bytes); 1042 case IFCOUNTER_OMCASTS: 1043 return (stats[0].mcasts); 1044 default: 1045 return (if_get_counter_default(ifp, cnt)); 1046 } 1047 } 1048 #endif 1049 1050 1051 #ifdef PTNETMAP_STATS 1052 /* Called under core lock. */ 1053 static void 1054 ptnet_tick(void *opaque) 1055 { 1056 struct ptnet_softc *sc = opaque; 1057 int i; 1058 1059 for (i = 0; i < sc->num_rings; i++) { 1060 struct ptnet_queue *pq = sc->queues + i; 1061 struct ptnet_queue_stats cur = pq->stats; 1062 struct timeval now; 1063 unsigned int delta; 1064 1065 microtime(&now); 1066 delta = now.tv_usec - sc->last_ts.tv_usec + 1067 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1068 delta /= 1000; /* in milliseconds */ 1069 1070 if (delta == 0) 1071 continue; 1072 1073 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1074 "intr %lu\n", i, delta, 1075 (cur.packets - pq->last_stats.packets), 1076 (cur.kicks - pq->last_stats.kicks), 1077 (cur.intrs - pq->last_stats.intrs)); 1078 pq->last_stats = cur; 1079 } 1080 microtime(&sc->last_ts); 1081 callout_schedule(&sc->tick, hz); 1082 } 1083 #endif /* PTNETMAP_STATS */ 1084 1085 static void 1086 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1087 { 1088 /* We are always active, as the backend netmap port is 1089 * always open in netmap mode. */ 1090 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1091 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1092 } 1093 1094 static uint32_t 1095 ptnet_nm_ptctl(if_t ifp, uint32_t cmd) 1096 { 1097 struct ptnet_softc *sc = if_getsoftc(ifp); 1098 /* 1099 * Write a command and read back error status, 1100 * with zero meaning success. 1101 */ 1102 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1103 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1104 } 1105 1106 static int 1107 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) 1108 { 1109 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1110 1111 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1112 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1113 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1114 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1115 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); 1116 1117 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", 1118 info->num_tx_rings, info->num_rx_rings, 1119 info->num_tx_descs, info->num_rx_descs, 1120 info->rx_buf_maxsize); 1121 1122 return 0; 1123 } 1124 1125 static void 1126 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1127 { 1128 int i; 1129 1130 /* Sync krings from the host, reading from 1131 * CSB. */ 1132 for (i = 0; i < sc->num_rings; i++) { 1133 struct ptnet_csb_gh *ptgh = sc->queues[i].ptgh; 1134 struct ptnet_csb_hg *pthg = sc->queues[i].pthg; 1135 struct netmap_kring *kring; 1136 1137 if (i < na->num_tx_rings) { 1138 kring = na->tx_rings[i]; 1139 } else { 1140 kring = na->rx_rings[i - na->num_tx_rings]; 1141 } 1142 kring->rhead = kring->ring->head = ptgh->head; 1143 kring->rcur = kring->ring->cur = ptgh->cur; 1144 kring->nr_hwcur = pthg->hwcur; 1145 kring->nr_hwtail = kring->rtail = 1146 kring->ring->tail = pthg->hwtail; 1147 1148 ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1149 pthg->hwcur, ptgh->head, ptgh->cur, 1150 pthg->hwtail); 1151 ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1152 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1153 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1154 kring->rtail, kring->ring->tail); 1155 } 1156 } 1157 1158 static void 1159 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1160 { 1161 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1162 1163 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1164 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1165 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1166 } 1167 1168 static int 1169 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1170 { 1171 /* device-specific */ 1172 if_t ifp = na->ifp; 1173 struct ptnet_softc *sc = if_getsoftc(ifp); 1174 int native = (na == &sc->ptna->hwup.up); 1175 struct ptnet_queue *pq; 1176 enum txrx t; 1177 int ret = 0; 1178 int i; 1179 1180 if (!onoff) { 1181 sc->ptna->backend_regifs--; 1182 } 1183 1184 /* If this is the last netmap client, guest interrupt enable flags may 1185 * be in arbitrary state. Since these flags are going to be used also 1186 * by the netdevice driver, we have to make sure to start with 1187 * notifications enabled. Also, schedule NAPI to flush pending packets 1188 * in the RX rings, since we will not receive further interrupts 1189 * until these will be processed. */ 1190 if (native && !onoff && na->active_fds == 0) { 1191 D("Exit netmap mode, re-enable interrupts"); 1192 for (i = 0; i < sc->num_rings; i++) { 1193 pq = sc->queues + i; 1194 pq->ptgh->guest_need_kick = 1; 1195 } 1196 } 1197 1198 if (onoff) { 1199 if (sc->ptna->backend_regifs == 0) { 1200 /* Initialize notification enable fields in the CSB. */ 1201 for (i = 0; i < sc->num_rings; i++) { 1202 pq = sc->queues + i; 1203 pq->pthg->host_need_kick = 1; 1204 pq->ptgh->guest_need_kick = 1205 (!(ifp->if_capenable & IFCAP_POLLING) 1206 && i >= sc->num_tx_rings); 1207 } 1208 1209 /* Set the virtio-net header length. */ 1210 ptnet_update_vnet_hdr(sc); 1211 1212 /* Make sure the host adapter passed through is ready 1213 * for txsync/rxsync. */ 1214 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE); 1215 if (ret) { 1216 return ret; 1217 } 1218 } 1219 1220 /* Sync from CSB must be done after REGIF PTCTL. Skip this 1221 * step only if this is a netmap client and it is not the 1222 * first one. */ 1223 if ((!native && sc->ptna->backend_regifs == 0) || 1224 (native && na->active_fds == 0)) { 1225 ptnet_sync_from_csb(sc, na); 1226 } 1227 1228 /* If not native, don't call nm_set_native_flags, since we don't want 1229 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1230 if (native) { 1231 for_rx_tx(t) { 1232 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1233 struct netmap_kring *kring = NMR(na, t)[i]; 1234 1235 if (nm_kring_pending_on(kring)) { 1236 kring->nr_mode = NKR_NETMAP_ON; 1237 } 1238 } 1239 } 1240 nm_set_native_flags(na); 1241 } 1242 1243 } else { 1244 if (native) { 1245 nm_clear_native_flags(na); 1246 for_rx_tx(t) { 1247 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1248 struct netmap_kring *kring = NMR(na, t)[i]; 1249 1250 if (nm_kring_pending_off(kring)) { 1251 kring->nr_mode = NKR_NETMAP_OFF; 1252 } 1253 } 1254 } 1255 } 1256 1257 /* Sync from CSB must be done before UNREGIF PTCTL, on the last 1258 * netmap client. */ 1259 if (native && na->active_fds == 0) { 1260 ptnet_sync_from_csb(sc, na); 1261 } 1262 1263 if (sc->ptna->backend_regifs == 0) { 1264 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE); 1265 } 1266 } 1267 1268 if (onoff) { 1269 sc->ptna->backend_regifs++; 1270 } 1271 1272 return ret; 1273 } 1274 1275 static int 1276 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1277 { 1278 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1279 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1280 bool notify; 1281 1282 notify = netmap_pt_guest_txsync(pq->ptgh, pq->pthg, kring, flags); 1283 if (notify) { 1284 ptnet_kick(pq); 1285 } 1286 1287 return 0; 1288 } 1289 1290 static int 1291 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1292 { 1293 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1294 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1295 bool notify; 1296 1297 notify = netmap_pt_guest_rxsync(pq->ptgh, pq->pthg, kring, flags); 1298 if (notify) { 1299 ptnet_kick(pq); 1300 } 1301 1302 return 0; 1303 } 1304 1305 static void 1306 ptnet_nm_intr(struct netmap_adapter *na, int onoff) 1307 { 1308 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1309 int i; 1310 1311 for (i = 0; i < sc->num_rings; i++) { 1312 struct ptnet_queue *pq = sc->queues + i; 1313 pq->ptgh->guest_need_kick = onoff; 1314 } 1315 } 1316 1317 static void 1318 ptnet_tx_intr(void *opaque) 1319 { 1320 struct ptnet_queue *pq = opaque; 1321 struct ptnet_softc *sc = pq->sc; 1322 1323 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1324 #ifdef PTNETMAP_STATS 1325 pq->stats.intrs ++; 1326 #endif /* PTNETMAP_STATS */ 1327 1328 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1329 return; 1330 } 1331 1332 /* Schedule the tasqueue to flush process transmissions requests. 1333 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1334 * at least when using MSI-X interrupts. The if_em driver, instead 1335 * schedule taskqueue when using legacy interrupts. */ 1336 taskqueue_enqueue(pq->taskq, &pq->task); 1337 } 1338 1339 static void 1340 ptnet_rx_intr(void *opaque) 1341 { 1342 struct ptnet_queue *pq = opaque; 1343 struct ptnet_softc *sc = pq->sc; 1344 unsigned int unused; 1345 1346 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1347 #ifdef PTNETMAP_STATS 1348 pq->stats.intrs ++; 1349 #endif /* PTNETMAP_STATS */ 1350 1351 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1352 return; 1353 } 1354 1355 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1356 * receive-side processing is executed directly in the interrupt 1357 * service routine. Alternatively, we may schedule the taskqueue. */ 1358 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1359 } 1360 1361 /* The following offloadings-related functions are taken from the vtnet 1362 * driver, but the same functionality is required for the ptnet driver. 1363 * As a temporary solution, I copied this code from vtnet and I started 1364 * to generalize it (taking away driver-specific statistic accounting), 1365 * making as little modifications as possible. 1366 * In the future we need to share these functions between vtnet and ptnet. 1367 */ 1368 static int 1369 ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 1370 { 1371 struct ether_vlan_header *evh; 1372 int offset; 1373 1374 evh = mtod(m, struct ether_vlan_header *); 1375 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1376 /* BMV: We should handle nested VLAN tags too. */ 1377 *etype = ntohs(evh->evl_proto); 1378 offset = sizeof(struct ether_vlan_header); 1379 } else { 1380 *etype = ntohs(evh->evl_encap_proto); 1381 offset = sizeof(struct ether_header); 1382 } 1383 1384 switch (*etype) { 1385 #if defined(INET) 1386 case ETHERTYPE_IP: { 1387 struct ip *ip, iphdr; 1388 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1389 m_copydata(m, offset, sizeof(struct ip), 1390 (caddr_t) &iphdr); 1391 ip = &iphdr; 1392 } else 1393 ip = (struct ip *)(m->m_data + offset); 1394 *proto = ip->ip_p; 1395 *start = offset + (ip->ip_hl << 2); 1396 break; 1397 } 1398 #endif 1399 #if defined(INET6) 1400 case ETHERTYPE_IPV6: 1401 *proto = -1; 1402 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 1403 /* Assert the network stack sent us a valid packet. */ 1404 KASSERT(*start > offset, 1405 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 1406 *start, offset, *proto)); 1407 break; 1408 #endif 1409 default: 1410 /* Here we should increment the tx_csum_bad_ethtype counter. */ 1411 return (EINVAL); 1412 } 1413 1414 return (0); 1415 } 1416 1417 static int 1418 ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, 1419 int offset, bool allow_ecn, struct virtio_net_hdr *hdr) 1420 { 1421 static struct timeval lastecn; 1422 static int curecn; 1423 struct tcphdr *tcp, tcphdr; 1424 1425 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 1426 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 1427 tcp = &tcphdr; 1428 } else 1429 tcp = (struct tcphdr *)(m->m_data + offset); 1430 1431 hdr->hdr_len = offset + (tcp->th_off << 2); 1432 hdr->gso_size = m->m_pkthdr.tso_segsz; 1433 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 1434 VIRTIO_NET_HDR_GSO_TCPV6; 1435 1436 if (tcp->th_flags & TH_CWR) { 1437 /* 1438 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 1439 * ECN support is not on a per-interface basis, but globally via 1440 * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 1441 */ 1442 if (!allow_ecn) { 1443 if (ppsratecheck(&lastecn, &curecn, 1)) 1444 if_printf(ifp, 1445 "TSO with ECN not negotiated with host\n"); 1446 return (ENOTSUP); 1447 } 1448 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1449 } 1450 1451 /* Here we should increment tx_tso counter. */ 1452 1453 return (0); 1454 } 1455 1456 static struct mbuf * 1457 ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, 1458 struct virtio_net_hdr *hdr) 1459 { 1460 int flags, etype, csum_start, proto, error; 1461 1462 flags = m->m_pkthdr.csum_flags; 1463 1464 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); 1465 if (error) 1466 goto drop; 1467 1468 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || 1469 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { 1470 /* 1471 * We could compare the IP protocol vs the CSUM_ flag too, 1472 * but that really should not be necessary. 1473 */ 1474 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1475 hdr->csum_start = csum_start; 1476 hdr->csum_offset = m->m_pkthdr.csum_data; 1477 /* Here we should increment the tx_csum counter. */ 1478 } 1479 1480 if (flags & CSUM_TSO) { 1481 if (__predict_false(proto != IPPROTO_TCP)) { 1482 /* Likely failed to correctly parse the mbuf. 1483 * Here we should increment the tx_tso_not_tcp 1484 * counter. */ 1485 goto drop; 1486 } 1487 1488 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 1489 ("%s: mbuf %p TSO without checksum offload %#x", 1490 __func__, m, flags)); 1491 1492 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, 1493 allow_ecn, hdr); 1494 if (error) 1495 goto drop; 1496 } 1497 1498 return (m); 1499 1500 drop: 1501 m_freem(m); 1502 return (NULL); 1503 } 1504 1505 static void 1506 ptnet_vlan_tag_remove(struct mbuf *m) 1507 { 1508 struct ether_vlan_header *evh; 1509 1510 evh = mtod(m, struct ether_vlan_header *); 1511 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1512 m->m_flags |= M_VLANTAG; 1513 1514 /* Strip the 802.1Q header. */ 1515 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1516 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1517 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1518 } 1519 1520 /* 1521 * Use the checksum offset in the VirtIO header to set the 1522 * correct CSUM_* flags. 1523 */ 1524 static int 1525 ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, 1526 struct virtio_net_hdr *hdr) 1527 { 1528 #if defined(INET) || defined(INET6) 1529 int offset = hdr->csum_start + hdr->csum_offset; 1530 #endif 1531 1532 /* Only do a basic sanity check on the offset. */ 1533 switch (eth_type) { 1534 #if defined(INET) 1535 case ETHERTYPE_IP: 1536 if (__predict_false(offset < ip_start + sizeof(struct ip))) 1537 return (1); 1538 break; 1539 #endif 1540 #if defined(INET6) 1541 case ETHERTYPE_IPV6: 1542 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1543 return (1); 1544 break; 1545 #endif 1546 default: 1547 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1548 return (1); 1549 } 1550 1551 /* 1552 * Use the offset to determine the appropriate CSUM_* flags. This is 1553 * a bit dirty, but we can get by with it since the checksum offsets 1554 * happen to be different. We assume the host host does not do IPv4 1555 * header checksum offloading. 1556 */ 1557 switch (hdr->csum_offset) { 1558 case offsetof(struct udphdr, uh_sum): 1559 case offsetof(struct tcphdr, th_sum): 1560 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1561 m->m_pkthdr.csum_data = 0xFFFF; 1562 break; 1563 case offsetof(struct sctphdr, checksum): 1564 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1565 break; 1566 default: 1567 /* Here we should increment the rx_csum_bad_offset counter. */ 1568 return (1); 1569 } 1570 1571 return (0); 1572 } 1573 1574 static int 1575 ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, 1576 struct virtio_net_hdr *hdr) 1577 { 1578 int offset, proto; 1579 1580 switch (eth_type) { 1581 #if defined(INET) 1582 case ETHERTYPE_IP: { 1583 struct ip *ip; 1584 if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1585 return (1); 1586 ip = (struct ip *)(m->m_data + ip_start); 1587 proto = ip->ip_p; 1588 offset = ip_start + (ip->ip_hl << 2); 1589 break; 1590 } 1591 #endif 1592 #if defined(INET6) 1593 case ETHERTYPE_IPV6: 1594 if (__predict_false(m->m_len < ip_start + 1595 sizeof(struct ip6_hdr))) 1596 return (1); 1597 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1598 if (__predict_false(offset < 0)) 1599 return (1); 1600 break; 1601 #endif 1602 default: 1603 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1604 return (1); 1605 } 1606 1607 switch (proto) { 1608 case IPPROTO_TCP: 1609 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1610 return (1); 1611 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1612 m->m_pkthdr.csum_data = 0xFFFF; 1613 break; 1614 case IPPROTO_UDP: 1615 if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1616 return (1); 1617 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1618 m->m_pkthdr.csum_data = 0xFFFF; 1619 break; 1620 case IPPROTO_SCTP: 1621 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) 1622 return (1); 1623 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1624 break; 1625 default: 1626 /* 1627 * For the remaining protocols, FreeBSD does not support 1628 * checksum offloading, so the checksum will be recomputed. 1629 */ 1630 #if 0 1631 if_printf(ifp, "cksum offload of unsupported " 1632 "protocol eth_type=%#x proto=%d csum_start=%d " 1633 "csum_offset=%d\n", __func__, eth_type, proto, 1634 hdr->csum_start, hdr->csum_offset); 1635 #endif 1636 break; 1637 } 1638 1639 return (0); 1640 } 1641 1642 /* 1643 * Set the appropriate CSUM_* flags. Unfortunately, the information 1644 * provided is not directly useful to us. The VirtIO header gives the 1645 * offset of the checksum, which is all Linux needs, but this is not 1646 * how FreeBSD does things. We are forced to peek inside the packet 1647 * a bit. 1648 * 1649 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1650 * could accept the offsets and let the stack figure it out. 1651 */ 1652 static int 1653 ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) 1654 { 1655 struct ether_header *eh; 1656 struct ether_vlan_header *evh; 1657 uint16_t eth_type; 1658 int offset, error; 1659 1660 eh = mtod(m, struct ether_header *); 1661 eth_type = ntohs(eh->ether_type); 1662 if (eth_type == ETHERTYPE_VLAN) { 1663 /* BMV: We should handle nested VLAN tags too. */ 1664 evh = mtod(m, struct ether_vlan_header *); 1665 eth_type = ntohs(evh->evl_proto); 1666 offset = sizeof(struct ether_vlan_header); 1667 } else 1668 offset = sizeof(struct ether_header); 1669 1670 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1671 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); 1672 else 1673 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); 1674 1675 return (error); 1676 } 1677 /* End of offloading-related functions to be shared with vtnet. */ 1678 1679 static inline void 1680 ptnet_sync_tail(struct ptnet_csb_hg *pthg, struct netmap_kring *kring) 1681 { 1682 struct netmap_ring *ring = kring->ring; 1683 1684 /* Update hwcur and hwtail as known by the host. */ 1685 ptnetmap_guest_read_kring_csb(pthg, kring); 1686 1687 /* nm_sync_finalize */ 1688 ring->tail = kring->rtail = kring->nr_hwtail; 1689 } 1690 1691 static void 1692 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1693 unsigned int head, unsigned int sync_flags) 1694 { 1695 struct netmap_ring *ring = kring->ring; 1696 struct ptnet_csb_gh *ptgh = pq->ptgh; 1697 struct ptnet_csb_hg *pthg = pq->pthg; 1698 1699 /* Some packets have been pushed to the netmap ring. We have 1700 * to tell the host to process the new packets, updating cur 1701 * and head in the CSB. */ 1702 ring->head = ring->cur = head; 1703 1704 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1705 kring->rcur = kring->rhead = head; 1706 1707 ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead); 1708 1709 /* Kick the host if needed. */ 1710 if (NM_ACCESS_ONCE(pthg->host_need_kick)) { 1711 ptgh->sync_flags = sync_flags; 1712 ptnet_kick(pq); 1713 } 1714 } 1715 1716 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1717 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1718 (_k)->rtail - (_h)) < (_min) 1719 1720 /* This function may be called by the network stack, or by 1721 * by the taskqueue thread. */ 1722 static int 1723 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1724 bool may_resched) 1725 { 1726 struct ptnet_softc *sc = pq->sc; 1727 bool have_vnet_hdr = sc->vnet_hdr_len; 1728 struct netmap_adapter *na = &sc->ptna->dr.up; 1729 if_t ifp = sc->ifp; 1730 unsigned int batch_count = 0; 1731 struct ptnet_csb_gh *ptgh; 1732 struct ptnet_csb_hg *pthg; 1733 struct netmap_kring *kring; 1734 struct netmap_ring *ring; 1735 struct netmap_slot *slot; 1736 unsigned int count = 0; 1737 unsigned int minspace; 1738 unsigned int head; 1739 unsigned int lim; 1740 struct mbuf *mhead; 1741 struct mbuf *mf; 1742 int nmbuf_bytes; 1743 uint8_t *nmbuf; 1744 1745 if (!PTNET_Q_TRYLOCK(pq)) { 1746 /* We failed to acquire the lock, schedule the taskqueue. */ 1747 RD(1, "Deferring TX work"); 1748 if (may_resched) { 1749 taskqueue_enqueue(pq->taskq, &pq->task); 1750 } 1751 1752 return 0; 1753 } 1754 1755 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1756 PTNET_Q_UNLOCK(pq); 1757 RD(1, "Interface is down"); 1758 return ENETDOWN; 1759 } 1760 1761 ptgh = pq->ptgh; 1762 pthg = pq->pthg; 1763 kring = na->tx_rings[pq->kring_id]; 1764 ring = kring->ring; 1765 lim = kring->nkr_num_slots - 1; 1766 head = ring->head; 1767 minspace = sc->min_tx_space; 1768 1769 while (count < budget) { 1770 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1771 /* We ran out of slot, let's see if the host has 1772 * freed up some, by reading hwcur and hwtail from 1773 * the CSB. */ 1774 ptnet_sync_tail(pthg, kring); 1775 1776 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1777 /* Still no slots available. Reactivate the 1778 * interrupts so that we can be notified 1779 * when some free slots are made available by 1780 * the host. */ 1781 ptgh->guest_need_kick = 1; 1782 1783 /* Double-check. */ 1784 ptnet_sync_tail(pthg, kring); 1785 if (likely(PTNET_TX_NOSPACE(head, kring, 1786 minspace))) { 1787 break; 1788 } 1789 1790 RD(1, "Found more slots by doublecheck"); 1791 /* More slots were freed before reactivating 1792 * the interrupts. */ 1793 ptgh->guest_need_kick = 0; 1794 } 1795 } 1796 1797 mhead = drbr_peek(ifp, pq->bufring); 1798 if (!mhead) { 1799 break; 1800 } 1801 1802 /* Initialize transmission state variables. */ 1803 slot = ring->slot + head; 1804 nmbuf = NMB(na, slot); 1805 nmbuf_bytes = 0; 1806 1807 /* If needed, prepare the virtio-net header at the beginning 1808 * of the first slot. */ 1809 if (have_vnet_hdr) { 1810 struct virtio_net_hdr *vh = 1811 (struct virtio_net_hdr *)nmbuf; 1812 1813 /* For performance, we could replace this memset() with 1814 * two 8-bytes-wide writes. */ 1815 memset(nmbuf, 0, PTNET_HDR_SIZE); 1816 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1817 mhead = ptnet_tx_offload(ifp, mhead, false, 1818 vh); 1819 if (unlikely(!mhead)) { 1820 /* Packet dropped because errors 1821 * occurred while preparing the vnet 1822 * header. Let's go ahead with the next 1823 * packet. */ 1824 pq->stats.errors ++; 1825 drbr_advance(ifp, pq->bufring); 1826 continue; 1827 } 1828 } 1829 ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1830 "csum_start %u csum_ofs %u hdr_len = %u " 1831 "gso_size %u gso_type %x", __func__, 1832 mhead->m_pkthdr.csum_flags, vh->flags, 1833 vh->csum_start, vh->csum_offset, vh->hdr_len, 1834 vh->gso_size, vh->gso_type); 1835 1836 nmbuf += PTNET_HDR_SIZE; 1837 nmbuf_bytes += PTNET_HDR_SIZE; 1838 } 1839 1840 for (mf = mhead; mf; mf = mf->m_next) { 1841 uint8_t *mdata = mf->m_data; 1842 int mlen = mf->m_len; 1843 1844 for (;;) { 1845 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1846 1847 if (mlen < copy) { 1848 copy = mlen; 1849 } 1850 memcpy(nmbuf, mdata, copy); 1851 1852 mdata += copy; 1853 mlen -= copy; 1854 nmbuf += copy; 1855 nmbuf_bytes += copy; 1856 1857 if (!mlen) { 1858 break; 1859 } 1860 1861 slot->len = nmbuf_bytes; 1862 slot->flags = NS_MOREFRAG; 1863 1864 head = nm_next(head, lim); 1865 KASSERT(head != ring->tail, 1866 ("Unexpectedly run out of TX space")); 1867 slot = ring->slot + head; 1868 nmbuf = NMB(na, slot); 1869 nmbuf_bytes = 0; 1870 } 1871 } 1872 1873 /* Complete last slot and update head. */ 1874 slot->len = nmbuf_bytes; 1875 slot->flags = 0; 1876 head = nm_next(head, lim); 1877 1878 /* Consume the packet just processed. */ 1879 drbr_advance(ifp, pq->bufring); 1880 1881 /* Copy the packet to listeners. */ 1882 ETHER_BPF_MTAP(ifp, mhead); 1883 1884 pq->stats.packets ++; 1885 pq->stats.bytes += mhead->m_pkthdr.len; 1886 if (mhead->m_flags & M_MCAST) { 1887 pq->stats.mcasts ++; 1888 } 1889 1890 m_freem(mhead); 1891 1892 count ++; 1893 if (++batch_count == PTNET_TX_BATCH) { 1894 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1895 batch_count = 0; 1896 } 1897 } 1898 1899 if (batch_count) { 1900 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1901 } 1902 1903 if (count >= budget && may_resched) { 1904 DBG(RD(1, "out of budget: resched, %d mbufs pending\n", 1905 drbr_inuse(ifp, pq->bufring))); 1906 taskqueue_enqueue(pq->taskq, &pq->task); 1907 } 1908 1909 PTNET_Q_UNLOCK(pq); 1910 1911 return count; 1912 } 1913 1914 static int 1915 ptnet_transmit(if_t ifp, struct mbuf *m) 1916 { 1917 struct ptnet_softc *sc = if_getsoftc(ifp); 1918 struct ptnet_queue *pq; 1919 unsigned int queue_idx; 1920 int err; 1921 1922 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1923 1924 /* Insert 802.1Q header if needed. */ 1925 if (m->m_flags & M_VLANTAG) { 1926 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1927 if (m == NULL) { 1928 return ENOBUFS; 1929 } 1930 m->m_flags &= ~M_VLANTAG; 1931 } 1932 1933 /* Get the flow-id if available. */ 1934 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1935 m->m_pkthdr.flowid : curcpu; 1936 1937 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1938 queue_idx %= sc->num_tx_rings; 1939 } 1940 1941 pq = sc->queues + queue_idx; 1942 1943 err = drbr_enqueue(ifp, pq->bufring, m); 1944 if (err) { 1945 /* ENOBUFS when the bufring is full */ 1946 RD(1, "%s: drbr_enqueue() failed %d\n", 1947 __func__, err); 1948 pq->stats.errors ++; 1949 return err; 1950 } 1951 1952 if (ifp->if_capenable & IFCAP_POLLING) { 1953 /* If polling is on, the transmit queues will be 1954 * drained by the poller. */ 1955 return 0; 1956 } 1957 1958 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1959 1960 return (err < 0) ? err : 0; 1961 } 1962 1963 static unsigned int 1964 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1965 { 1966 struct netmap_ring *ring = kring->ring; 1967 struct netmap_slot *slot = ring->slot + head; 1968 1969 for (;;) { 1970 head = nm_next(head, kring->nkr_num_slots - 1); 1971 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1972 break; 1973 } 1974 slot = ring->slot + head; 1975 } 1976 1977 return head; 1978 } 1979 1980 static inline struct mbuf * 1981 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1982 { 1983 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1984 1985 do { 1986 unsigned int copy; 1987 1988 if (mtail->m_len == MCLBYTES) { 1989 struct mbuf *mf; 1990 1991 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1992 if (unlikely(!mf)) { 1993 return NULL; 1994 } 1995 1996 mtail->m_next = mf; 1997 mtail = mf; 1998 mdata = mtod(mtail, uint8_t *); 1999 mtail->m_len = 0; 2000 } 2001 2002 copy = MCLBYTES - mtail->m_len; 2003 if (nmbuf_len < copy) { 2004 copy = nmbuf_len; 2005 } 2006 2007 memcpy(mdata, nmbuf, copy); 2008 2009 nmbuf += copy; 2010 nmbuf_len -= copy; 2011 mdata += copy; 2012 mtail->m_len += copy; 2013 } while (nmbuf_len); 2014 2015 return mtail; 2016 } 2017 2018 static int 2019 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 2020 { 2021 struct ptnet_softc *sc = pq->sc; 2022 bool have_vnet_hdr = sc->vnet_hdr_len; 2023 struct ptnet_csb_gh *ptgh = pq->ptgh; 2024 struct ptnet_csb_hg *pthg = pq->pthg; 2025 struct netmap_adapter *na = &sc->ptna->dr.up; 2026 struct netmap_kring *kring = na->rx_rings[pq->kring_id]; 2027 struct netmap_ring *ring = kring->ring; 2028 unsigned int const lim = kring->nkr_num_slots - 1; 2029 unsigned int batch_count = 0; 2030 if_t ifp = sc->ifp; 2031 unsigned int count = 0; 2032 uint32_t head; 2033 2034 PTNET_Q_LOCK(pq); 2035 2036 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2037 goto unlock; 2038 } 2039 2040 kring->nr_kflags &= ~NKR_PENDINTR; 2041 2042 head = ring->head; 2043 while (count < budget) { 2044 uint32_t prev_head = head; 2045 struct mbuf *mhead, *mtail; 2046 struct virtio_net_hdr *vh; 2047 struct netmap_slot *slot; 2048 unsigned int nmbuf_len; 2049 uint8_t *nmbuf; 2050 int deliver = 1; /* the mbuf to the network stack. */ 2051 host_sync: 2052 if (head == ring->tail) { 2053 /* We ran out of slot, let's see if the host has 2054 * added some, by reading hwcur and hwtail from 2055 * the CSB. */ 2056 ptnet_sync_tail(pthg, kring); 2057 2058 if (head == ring->tail) { 2059 /* Still no slots available. Reactivate 2060 * interrupts as they were disabled by the 2061 * host thread right before issuing the 2062 * last interrupt. */ 2063 ptgh->guest_need_kick = 1; 2064 2065 /* Double-check. */ 2066 ptnet_sync_tail(pthg, kring); 2067 if (likely(head == ring->tail)) { 2068 break; 2069 } 2070 ptgh->guest_need_kick = 0; 2071 } 2072 } 2073 2074 /* Initialize ring state variables, possibly grabbing the 2075 * virtio-net header. */ 2076 slot = ring->slot + head; 2077 nmbuf = NMB(na, slot); 2078 nmbuf_len = slot->len; 2079 2080 vh = (struct virtio_net_hdr *)nmbuf; 2081 if (have_vnet_hdr) { 2082 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 2083 /* There is no good reason why host should 2084 * put the header in multiple netmap slots. 2085 * If this is the case, discard. */ 2086 RD(1, "Fragmented vnet-hdr: dropping"); 2087 head = ptnet_rx_discard(kring, head); 2088 pq->stats.iqdrops ++; 2089 deliver = 0; 2090 goto skip; 2091 } 2092 ND(1, "%s: vnet hdr: flags %x csum_start %u " 2093 "csum_ofs %u hdr_len = %u gso_size %u " 2094 "gso_type %x", __func__, vh->flags, 2095 vh->csum_start, vh->csum_offset, vh->hdr_len, 2096 vh->gso_size, vh->gso_type); 2097 nmbuf += PTNET_HDR_SIZE; 2098 nmbuf_len -= PTNET_HDR_SIZE; 2099 } 2100 2101 /* Allocate the head of a new mbuf chain. 2102 * We use m_getcl() to allocate an mbuf with standard cluster 2103 * size (MCLBYTES). In the future we could use m_getjcl() 2104 * to choose different sizes. */ 2105 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2106 if (unlikely(mhead == NULL)) { 2107 device_printf(sc->dev, "%s: failed to allocate mbuf " 2108 "head\n", __func__); 2109 pq->stats.errors ++; 2110 break; 2111 } 2112 2113 /* Initialize the mbuf state variables. */ 2114 mhead->m_pkthdr.len = nmbuf_len; 2115 mtail->m_len = 0; 2116 2117 /* Scan all the netmap slots containing the current packet. */ 2118 for (;;) { 2119 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 2120 "len %u, flags %u\n", __func__, 2121 head, ring->tail, slot->len, 2122 slot->flags)); 2123 2124 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 2125 if (unlikely(!mtail)) { 2126 /* Ouch. We ran out of memory while processing 2127 * a packet. We have to restore the previous 2128 * head position, free the mbuf chain, and 2129 * schedule the taskqueue to give the packet 2130 * another chance. */ 2131 device_printf(sc->dev, "%s: failed to allocate" 2132 " mbuf frag, reset head %u --> %u\n", 2133 __func__, head, prev_head); 2134 head = prev_head; 2135 m_freem(mhead); 2136 pq->stats.errors ++; 2137 if (may_resched) { 2138 taskqueue_enqueue(pq->taskq, 2139 &pq->task); 2140 } 2141 goto escape; 2142 } 2143 2144 /* We have to increment head irrespective of the 2145 * NS_MOREFRAG being set or not. */ 2146 head = nm_next(head, lim); 2147 2148 if (!(slot->flags & NS_MOREFRAG)) { 2149 break; 2150 } 2151 2152 if (unlikely(head == ring->tail)) { 2153 /* The very last slot prepared by the host has 2154 * the NS_MOREFRAG set. Drop it and continue 2155 * the outer cycle (to do the double-check). */ 2156 RD(1, "Incomplete packet: dropping"); 2157 m_freem(mhead); 2158 pq->stats.iqdrops ++; 2159 goto host_sync; 2160 } 2161 2162 slot = ring->slot + head; 2163 nmbuf = NMB(na, slot); 2164 nmbuf_len = slot->len; 2165 mhead->m_pkthdr.len += nmbuf_len; 2166 } 2167 2168 mhead->m_pkthdr.rcvif = ifp; 2169 mhead->m_pkthdr.csum_flags = 0; 2170 2171 /* Store the queue idx in the packet header. */ 2172 mhead->m_pkthdr.flowid = pq->kring_id; 2173 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 2174 2175 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 2176 struct ether_header *eh; 2177 2178 eh = mtod(mhead, struct ether_header *); 2179 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2180 ptnet_vlan_tag_remove(mhead); 2181 /* 2182 * With the 802.1Q header removed, update the 2183 * checksum starting location accordingly. 2184 */ 2185 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2186 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 2187 } 2188 } 2189 2190 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM 2191 | VIRTIO_NET_HDR_F_DATA_VALID))) { 2192 if (unlikely(ptnet_rx_csum(mhead, vh))) { 2193 m_freem(mhead); 2194 RD(1, "Csum offload error: dropping"); 2195 pq->stats.iqdrops ++; 2196 deliver = 0; 2197 } 2198 } 2199 2200 skip: 2201 count ++; 2202 if (++batch_count >= PTNET_RX_BATCH) { 2203 /* Some packets have been (or will be) pushed to the network 2204 * stack. We need to update the CSB to tell the host about 2205 * the new ring->cur and ring->head (RX buffer refill). */ 2206 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2207 batch_count = 0; 2208 } 2209 2210 if (likely(deliver)) { 2211 pq->stats.packets ++; 2212 pq->stats.bytes += mhead->m_pkthdr.len; 2213 2214 PTNET_Q_UNLOCK(pq); 2215 (*ifp->if_input)(ifp, mhead); 2216 PTNET_Q_LOCK(pq); 2217 /* The ring->head index (and related indices) are 2218 * updated under pq lock by ptnet_ring_update(). 2219 * Since we dropped the lock to call if_input(), we 2220 * must reload ring->head and restart processing the 2221 * ring from there. */ 2222 head = ring->head; 2223 2224 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2225 /* The interface has gone down while we didn't 2226 * have the lock. Stop any processing and exit. */ 2227 goto unlock; 2228 } 2229 } 2230 } 2231 escape: 2232 if (batch_count) { 2233 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2234 2235 } 2236 2237 if (count >= budget && may_resched) { 2238 /* If we ran out of budget or the double-check found new 2239 * slots to process, schedule the taskqueue. */ 2240 DBG(RD(1, "out of budget: resched h %u t %u\n", 2241 head, ring->tail)); 2242 taskqueue_enqueue(pq->taskq, &pq->task); 2243 } 2244 unlock: 2245 PTNET_Q_UNLOCK(pq); 2246 2247 return count; 2248 } 2249 2250 static void 2251 ptnet_rx_task(void *context, int pending) 2252 { 2253 struct ptnet_queue *pq = context; 2254 2255 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2256 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 2257 } 2258 2259 static void 2260 ptnet_tx_task(void *context, int pending) 2261 { 2262 struct ptnet_queue *pq = context; 2263 2264 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2265 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 2266 } 2267 2268 #ifdef DEVICE_POLLING 2269 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 2270 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 2271 static int 2272 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 2273 { 2274 struct ptnet_softc *sc = if_getsoftc(ifp); 2275 unsigned int queue_budget; 2276 unsigned int count = 0; 2277 bool borrow = false; 2278 int i; 2279 2280 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 2281 queue_budget = MAX(budget / sc->num_rings, 1); 2282 RD(1, "Per-queue budget is %d", queue_budget); 2283 2284 while (budget) { 2285 unsigned int rcnt = 0; 2286 2287 for (i = 0; i < sc->num_rings; i++) { 2288 struct ptnet_queue *pq = sc->queues + i; 2289 2290 if (borrow) { 2291 queue_budget = MIN(queue_budget, budget); 2292 if (queue_budget == 0) { 2293 break; 2294 } 2295 } 2296 2297 if (i < sc->num_tx_rings) { 2298 rcnt += ptnet_drain_transmit_queue(pq, 2299 queue_budget, false); 2300 } else { 2301 rcnt += ptnet_rx_eof(pq, queue_budget, 2302 false); 2303 } 2304 } 2305 2306 if (!rcnt) { 2307 /* A scan of the queues gave no result, we can 2308 * stop here. */ 2309 break; 2310 } 2311 2312 if (rcnt > budget) { 2313 /* This may happen when initial budget < sc->num_rings, 2314 * since one packet budget is given to each queue 2315 * anyway. Just pretend we didn't eat "so much". */ 2316 rcnt = budget; 2317 } 2318 count += rcnt; 2319 budget -= rcnt; 2320 borrow = true; 2321 } 2322 2323 2324 return count; 2325 } 2326 #endif /* DEVICE_POLLING */ 2327