1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Driver for ptnet paravirtualized network device. */ 30 31 #include <sys/cdefs.h> 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/taskqueue.h> 46 #include <sys/smp.h> 47 #include <sys/time.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 #include <net/bpf.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet/udp.h> 70 #include <netinet/tcp.h> 71 #include <netinet/sctp.h> 72 73 #include <machine/bus.h> 74 #include <machine/resource.h> 75 #include <sys/bus.h> 76 #include <sys/rman.h> 77 78 #include <dev/pci/pcivar.h> 79 #include <dev/pci/pcireg.h> 80 81 #include "opt_inet.h" 82 #include "opt_inet6.h" 83 84 #include <sys/selinfo.h> 85 #include <net/netmap.h> 86 #include <dev/netmap/netmap_kern.h> 87 #include <net/netmap_virt.h> 88 #include <dev/netmap/netmap_mem2.h> 89 #include <dev/virtio/network/virtio_net.h> 90 91 #ifndef INET 92 #error "INET not defined, cannot support offloadings" 93 #endif 94 95 #if __FreeBSD_version >= 1100000 96 static uint64_t ptnet_get_counter(if_t, ift_counter); 97 #else 98 typedef struct ifnet *if_t; 99 #define if_getsoftc(_ifp) (_ifp)->if_softc 100 #endif 101 102 //#define PTNETMAP_STATS 103 //#define DEBUG 104 #ifdef DEBUG 105 #define DBG(x) x 106 #else /* !DEBUG */ 107 #define DBG(x) 108 #endif /* !DEBUG */ 109 110 extern int ptnet_vnet_hdr; /* Tunable parameter */ 111 112 struct ptnet_softc; 113 114 struct ptnet_queue_stats { 115 uint64_t packets; /* if_[io]packets */ 116 uint64_t bytes; /* if_[io]bytes */ 117 uint64_t errors; /* if_[io]errors */ 118 uint64_t iqdrops; /* if_iqdrops */ 119 uint64_t mcasts; /* if_[io]mcasts */ 120 #ifdef PTNETMAP_STATS 121 uint64_t intrs; 122 uint64_t kicks; 123 #endif /* PTNETMAP_STATS */ 124 }; 125 126 struct ptnet_queue { 127 struct ptnet_softc *sc; 128 struct resource *irq; 129 void *cookie; 130 int kring_id; 131 struct ptnet_csb_gh *ptgh; 132 struct ptnet_csb_hg *pthg; 133 unsigned int kick; 134 struct mtx lock; 135 struct buf_ring *bufring; /* for TX queues */ 136 struct ptnet_queue_stats stats; 137 #ifdef PTNETMAP_STATS 138 struct ptnet_queue_stats last_stats; 139 #endif /* PTNETMAP_STATS */ 140 struct taskqueue *taskq; 141 struct task task; 142 char lock_name[16]; 143 }; 144 145 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 146 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 147 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 148 149 struct ptnet_softc { 150 device_t dev; 151 if_t ifp; 152 struct ifmedia media; 153 struct mtx lock; 154 char lock_name[16]; 155 char hwaddr[ETHER_ADDR_LEN]; 156 157 /* Mirror of PTFEAT register. */ 158 uint32_t ptfeatures; 159 unsigned int vnet_hdr_len; 160 161 /* PCI BARs support. */ 162 struct resource *iomem; 163 struct resource *msix_mem; 164 165 unsigned int num_rings; 166 unsigned int num_tx_rings; 167 struct ptnet_queue *queues; 168 struct ptnet_queue *rxqueues; 169 struct ptnet_csb_gh *csb_gh; 170 struct ptnet_csb_hg *csb_hg; 171 172 unsigned int min_tx_space; 173 174 struct netmap_pt_guest_adapter *ptna; 175 176 struct callout tick; 177 #ifdef PTNETMAP_STATS 178 struct timeval last_ts; 179 #endif /* PTNETMAP_STATS */ 180 }; 181 182 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 183 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 184 185 static int ptnet_probe(device_t); 186 static int ptnet_attach(device_t); 187 static int ptnet_detach(device_t); 188 static int ptnet_suspend(device_t); 189 static int ptnet_resume(device_t); 190 static int ptnet_shutdown(device_t); 191 192 static void ptnet_init(void *opaque); 193 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 194 static int ptnet_init_locked(struct ptnet_softc *sc); 195 static int ptnet_stop(struct ptnet_softc *sc); 196 static int ptnet_transmit(if_t ifp, struct mbuf *m); 197 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 198 unsigned int budget, 199 bool may_resched); 200 static void ptnet_qflush(if_t ifp); 201 static void ptnet_tx_task(void *context, int pending); 202 203 static int ptnet_media_change(if_t ifp); 204 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 205 #ifdef PTNETMAP_STATS 206 static void ptnet_tick(void *opaque); 207 #endif 208 209 static int ptnet_irqs_init(struct ptnet_softc *sc); 210 static void ptnet_irqs_fini(struct ptnet_softc *sc); 211 212 static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); 213 static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, 214 unsigned *txd, unsigned *rxr, unsigned *rxd); 215 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 216 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 217 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 218 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 219 220 static void ptnet_tx_intr(void *opaque); 221 static void ptnet_rx_intr(void *opaque); 222 223 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 224 unsigned int head); 225 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 226 bool may_resched); 227 static void ptnet_rx_task(void *context, int pending); 228 229 #ifdef DEVICE_POLLING 230 static poll_handler_t ptnet_poll; 231 #endif 232 233 static device_method_t ptnet_methods[] = { 234 DEVMETHOD(device_probe, ptnet_probe), 235 DEVMETHOD(device_attach, ptnet_attach), 236 DEVMETHOD(device_detach, ptnet_detach), 237 DEVMETHOD(device_suspend, ptnet_suspend), 238 DEVMETHOD(device_resume, ptnet_resume), 239 DEVMETHOD(device_shutdown, ptnet_shutdown), 240 DEVMETHOD_END 241 }; 242 243 static driver_t ptnet_driver = { 244 "ptnet", 245 ptnet_methods, 246 sizeof(struct ptnet_softc) 247 }; 248 249 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 250 static devclass_t ptnet_devclass; 251 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 252 NULL, NULL, SI_ORDER_MIDDLE + 2); 253 254 static int 255 ptnet_probe(device_t dev) 256 { 257 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 258 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 259 return (ENXIO); 260 } 261 262 device_set_desc(dev, "ptnet network adapter"); 263 264 return (BUS_PROBE_DEFAULT); 265 } 266 267 static inline void ptnet_kick(struct ptnet_queue *pq) 268 { 269 #ifdef PTNETMAP_STATS 270 pq->stats.kicks ++; 271 #endif /* PTNETMAP_STATS */ 272 bus_write_4(pq->sc->iomem, pq->kick, 0); 273 } 274 275 #define PTNET_BUF_RING_SIZE 4096 276 #define PTNET_RX_BUDGET 512 277 #define PTNET_RX_BATCH 1 278 #define PTNET_TX_BUDGET 512 279 #define PTNET_TX_BATCH 64 280 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 281 #define PTNET_MAX_PKT_SIZE 65536 282 283 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) 284 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ 285 CSUM_SCTP_IPV6) 286 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 287 PTNET_CSUM_OFFLOAD_IPV6) 288 289 static int 290 ptnet_attach(device_t dev) 291 { 292 uint32_t ptfeatures = 0; 293 unsigned int num_rx_rings, num_tx_rings; 294 struct netmap_adapter na_arg; 295 unsigned int nifp_offset; 296 struct ptnet_softc *sc; 297 if_t ifp; 298 uint32_t macreg; 299 int err, rid; 300 int i; 301 302 sc = device_get_softc(dev); 303 sc->dev = dev; 304 305 /* Setup PCI resources. */ 306 pci_enable_busmaster(dev); 307 308 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 309 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 310 RF_ACTIVE); 311 if (sc->iomem == NULL) { 312 device_printf(dev, "Failed to map I/O BAR\n"); 313 return (ENXIO); 314 } 315 316 /* Negotiate features with the hypervisor. */ 317 if (ptnet_vnet_hdr) { 318 ptfeatures |= PTNETMAP_F_VNET_HDR; 319 } 320 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 321 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 322 sc->ptfeatures = ptfeatures; 323 324 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 325 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 326 sc->num_rings = num_tx_rings + num_rx_rings; 327 sc->num_tx_rings = num_tx_rings; 328 329 if (sc->num_rings * sizeof(struct ptnet_csb_gh) > PAGE_SIZE) { 330 device_printf(dev, "CSB cannot handle that many rings (%u)\n", 331 sc->num_rings); 332 err = ENOMEM; 333 goto err_path; 334 } 335 336 /* Allocate CSB and carry out CSB allocation protocol. */ 337 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, 338 (size_t)0, -1UL, PAGE_SIZE, 0); 339 if (sc->csb_gh == NULL) { 340 device_printf(dev, "Failed to allocate CSB\n"); 341 err = ENOMEM; 342 goto err_path; 343 } 344 sc->csb_hg = (struct ptnet_csb_hg *)(((char *)sc->csb_gh) + PAGE_SIZE); 345 346 { 347 /* 348 * We use uint64_t rather than vm_paddr_t since we 349 * need 64 bit addresses even on 32 bit platforms. 350 */ 351 uint64_t paddr = vtophys(sc->csb_gh); 352 353 /* CSB allocation protocol: write to BAH first, then 354 * to BAL (for both GH and HG sections). */ 355 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 356 (paddr >> 32) & 0xffffffff); 357 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 358 paddr & 0xffffffff); 359 paddr = vtophys(sc->csb_hg); 360 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 361 (paddr >> 32) & 0xffffffff); 362 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 363 paddr & 0xffffffff); 364 } 365 366 /* Allocate and initialize per-queue data structures. */ 367 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 368 M_DEVBUF, M_NOWAIT | M_ZERO); 369 if (sc->queues == NULL) { 370 err = ENOMEM; 371 goto err_path; 372 } 373 sc->rxqueues = sc->queues + num_tx_rings; 374 375 for (i = 0; i < sc->num_rings; i++) { 376 struct ptnet_queue *pq = sc->queues + i; 377 378 pq->sc = sc; 379 pq->kring_id = i; 380 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 381 pq->ptgh = sc->csb_gh + i; 382 pq->pthg = sc->csb_hg + i; 383 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 384 device_get_nameunit(dev), i); 385 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 386 if (i >= num_tx_rings) { 387 /* RX queue: fix kring_id. */ 388 pq->kring_id -= num_tx_rings; 389 } else { 390 /* TX queue: allocate buf_ring. */ 391 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 392 M_DEVBUF, M_NOWAIT, &pq->lock); 393 if (pq->bufring == NULL) { 394 err = ENOMEM; 395 goto err_path; 396 } 397 } 398 } 399 400 sc->min_tx_space = 64; /* Safe initial value. */ 401 402 err = ptnet_irqs_init(sc); 403 if (err) { 404 goto err_path; 405 } 406 407 /* Setup Ethernet interface. */ 408 sc->ifp = ifp = if_alloc(IFT_ETHER); 409 if (ifp == NULL) { 410 device_printf(dev, "Failed to allocate ifnet\n"); 411 err = ENOMEM; 412 goto err_path; 413 } 414 415 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 416 ifp->if_baudrate = IF_Gbps(10); 417 ifp->if_softc = sc; 418 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 419 ifp->if_init = ptnet_init; 420 ifp->if_ioctl = ptnet_ioctl; 421 #if __FreeBSD_version >= 1100000 422 ifp->if_get_counter = ptnet_get_counter; 423 #endif 424 ifp->if_transmit = ptnet_transmit; 425 ifp->if_qflush = ptnet_qflush; 426 427 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 428 ptnet_media_status); 429 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 430 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 431 432 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 433 sc->hwaddr[0] = (macreg >> 8) & 0xff; 434 sc->hwaddr[1] = macreg & 0xff; 435 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 436 sc->hwaddr[2] = (macreg >> 24) & 0xff; 437 sc->hwaddr[3] = (macreg >> 16) & 0xff; 438 sc->hwaddr[4] = (macreg >> 8) & 0xff; 439 sc->hwaddr[5] = macreg & 0xff; 440 441 ether_ifattach(ifp, sc->hwaddr); 442 443 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 444 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 445 446 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 447 /* Similarly to what the vtnet driver does, we can emulate 448 * VLAN offloadings by inserting and removing the 802.1Q 449 * header during transmit and receive. We are then able 450 * to do checksum offloading of VLAN frames. */ 451 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 452 | IFCAP_VLAN_HWCSUM 453 | IFCAP_TSO | IFCAP_LRO 454 | IFCAP_VLAN_HWTSO 455 | IFCAP_VLAN_HWTAGGING; 456 } 457 458 ifp->if_capenable = ifp->if_capabilities; 459 #ifdef DEVICE_POLLING 460 /* Don't enable polling by default. */ 461 ifp->if_capabilities |= IFCAP_POLLING; 462 #endif 463 snprintf(sc->lock_name, sizeof(sc->lock_name), 464 "%s", device_get_nameunit(dev)); 465 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 466 callout_init_mtx(&sc->tick, &sc->lock, 0); 467 468 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 469 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 470 memset(&na_arg, 0, sizeof(na_arg)); 471 na_arg.ifp = ifp; 472 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 473 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 474 na_arg.num_tx_rings = num_tx_rings; 475 na_arg.num_rx_rings = num_rx_rings; 476 na_arg.nm_config = ptnet_nm_config; 477 na_arg.nm_krings_create = ptnet_nm_krings_create; 478 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 479 na_arg.nm_dtor = ptnet_nm_dtor; 480 na_arg.nm_register = ptnet_nm_register; 481 na_arg.nm_txsync = ptnet_nm_txsync; 482 na_arg.nm_rxsync = ptnet_nm_rxsync; 483 484 netmap_pt_guest_attach(&na_arg, nifp_offset, 485 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); 486 487 /* Now a netmap adapter for this ifp has been allocated, and it 488 * can be accessed through NA(ifp). We also have to initialize the CSB 489 * pointer. */ 490 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 491 492 /* If virtio-net header was negotiated, set the virt_hdr_len field in 493 * the netmap adapter, to inform users that this netmap adapter requires 494 * the application to deal with the headers. */ 495 ptnet_update_vnet_hdr(sc); 496 497 device_printf(dev, "%s() completed\n", __func__); 498 499 return (0); 500 501 err_path: 502 ptnet_detach(dev); 503 return err; 504 } 505 506 static int 507 ptnet_detach(device_t dev) 508 { 509 struct ptnet_softc *sc = device_get_softc(dev); 510 int i; 511 512 #ifdef DEVICE_POLLING 513 if (sc->ifp->if_capenable & IFCAP_POLLING) { 514 ether_poll_deregister(sc->ifp); 515 } 516 #endif 517 callout_drain(&sc->tick); 518 519 if (sc->queues) { 520 /* Drain taskqueues before calling if_detach. */ 521 for (i = 0; i < sc->num_rings; i++) { 522 struct ptnet_queue *pq = sc->queues + i; 523 524 if (pq->taskq) { 525 taskqueue_drain(pq->taskq, &pq->task); 526 } 527 } 528 } 529 530 if (sc->ifp) { 531 ether_ifdetach(sc->ifp); 532 533 /* Uninitialize netmap adapters for this device. */ 534 netmap_detach(sc->ifp); 535 536 ifmedia_removeall(&sc->media); 537 if_free(sc->ifp); 538 sc->ifp = NULL; 539 } 540 541 ptnet_irqs_fini(sc); 542 543 if (sc->csb_gh) { 544 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); 545 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); 546 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); 547 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); 548 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); 549 sc->csb_gh = NULL; 550 sc->csb_hg = NULL; 551 } 552 553 if (sc->queues) { 554 for (i = 0; i < sc->num_rings; i++) { 555 struct ptnet_queue *pq = sc->queues + i; 556 557 if (mtx_initialized(&pq->lock)) { 558 mtx_destroy(&pq->lock); 559 } 560 if (pq->bufring != NULL) { 561 buf_ring_free(pq->bufring, M_DEVBUF); 562 } 563 } 564 free(sc->queues, M_DEVBUF); 565 sc->queues = NULL; 566 } 567 568 if (sc->iomem) { 569 bus_release_resource(dev, SYS_RES_IOPORT, 570 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 571 sc->iomem = NULL; 572 } 573 574 mtx_destroy(&sc->lock); 575 576 device_printf(dev, "%s() completed\n", __func__); 577 578 return (0); 579 } 580 581 static int 582 ptnet_suspend(device_t dev) 583 { 584 struct ptnet_softc *sc; 585 586 sc = device_get_softc(dev); 587 (void)sc; 588 589 return (0); 590 } 591 592 static int 593 ptnet_resume(device_t dev) 594 { 595 struct ptnet_softc *sc; 596 597 sc = device_get_softc(dev); 598 (void)sc; 599 600 return (0); 601 } 602 603 static int 604 ptnet_shutdown(device_t dev) 605 { 606 /* 607 * Suspend already does all of what we need to 608 * do here; we just never expect to be resumed. 609 */ 610 return (ptnet_suspend(dev)); 611 } 612 613 static int 614 ptnet_irqs_init(struct ptnet_softc *sc) 615 { 616 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 617 int nvecs = sc->num_rings; 618 device_t dev = sc->dev; 619 int err = ENOSPC; 620 int cpu_cur; 621 int i; 622 623 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 624 device_printf(dev, "Could not find MSI-X capability\n"); 625 return (ENXIO); 626 } 627 628 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 629 &rid, RF_ACTIVE); 630 if (sc->msix_mem == NULL) { 631 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 632 return (ENXIO); 633 } 634 635 if (pci_msix_count(dev) < nvecs) { 636 device_printf(dev, "Not enough MSI-X vectors\n"); 637 goto err_path; 638 } 639 640 err = pci_alloc_msix(dev, &nvecs); 641 if (err) { 642 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 643 goto err_path; 644 } 645 646 for (i = 0; i < nvecs; i++) { 647 struct ptnet_queue *pq = sc->queues + i; 648 649 rid = i + 1; 650 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 651 RF_ACTIVE); 652 if (pq->irq == NULL) { 653 device_printf(dev, "Failed to allocate interrupt " 654 "for queue #%d\n", i); 655 err = ENOSPC; 656 goto err_path; 657 } 658 } 659 660 cpu_cur = CPU_FIRST(); 661 for (i = 0; i < nvecs; i++) { 662 struct ptnet_queue *pq = sc->queues + i; 663 void (*handler)(void *) = ptnet_tx_intr; 664 665 if (i >= sc->num_tx_rings) { 666 handler = ptnet_rx_intr; 667 } 668 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 669 NULL /* intr_filter */, handler, 670 pq, &pq->cookie); 671 if (err) { 672 device_printf(dev, "Failed to register intr handler " 673 "for queue #%d\n", i); 674 goto err_path; 675 } 676 677 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 678 #if 0 679 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 680 #endif 681 cpu_cur = CPU_NEXT(cpu_cur); 682 } 683 684 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 685 686 cpu_cur = CPU_FIRST(); 687 for (i = 0; i < nvecs; i++) { 688 struct ptnet_queue *pq = sc->queues + i; 689 static void (*handler)(void *context, int pending); 690 691 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; 692 693 TASK_INIT(&pq->task, 0, handler, pq); 694 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 695 taskqueue_thread_enqueue, &pq->taskq); 696 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 697 device_get_nameunit(sc->dev), cpu_cur); 698 cpu_cur = CPU_NEXT(cpu_cur); 699 } 700 701 return 0; 702 err_path: 703 ptnet_irqs_fini(sc); 704 return err; 705 } 706 707 static void 708 ptnet_irqs_fini(struct ptnet_softc *sc) 709 { 710 device_t dev = sc->dev; 711 int i; 712 713 for (i = 0; i < sc->num_rings; i++) { 714 struct ptnet_queue *pq = sc->queues + i; 715 716 if (pq->taskq) { 717 taskqueue_free(pq->taskq); 718 pq->taskq = NULL; 719 } 720 721 if (pq->cookie) { 722 bus_teardown_intr(dev, pq->irq, pq->cookie); 723 pq->cookie = NULL; 724 } 725 726 if (pq->irq) { 727 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 728 pq->irq = NULL; 729 } 730 } 731 732 if (sc->msix_mem) { 733 pci_release_msi(dev); 734 735 bus_release_resource(dev, SYS_RES_MEMORY, 736 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 737 sc->msix_mem); 738 sc->msix_mem = NULL; 739 } 740 } 741 742 static void 743 ptnet_init(void *opaque) 744 { 745 struct ptnet_softc *sc = opaque; 746 747 PTNET_CORE_LOCK(sc); 748 ptnet_init_locked(sc); 749 PTNET_CORE_UNLOCK(sc); 750 } 751 752 static int 753 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 754 { 755 struct ptnet_softc *sc = if_getsoftc(ifp); 756 device_t dev = sc->dev; 757 struct ifreq *ifr = (struct ifreq *)data; 758 int mask, err = 0; 759 760 switch (cmd) { 761 case SIOCSIFFLAGS: 762 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 763 PTNET_CORE_LOCK(sc); 764 if (ifp->if_flags & IFF_UP) { 765 /* Network stack wants the iff to be up. */ 766 err = ptnet_init_locked(sc); 767 } else { 768 /* Network stack wants the iff to be down. */ 769 err = ptnet_stop(sc); 770 } 771 /* We don't need to do nothing to support IFF_PROMISC, 772 * since that is managed by the backend port. */ 773 PTNET_CORE_UNLOCK(sc); 774 break; 775 776 case SIOCSIFCAP: 777 device_printf(dev, "SIOCSIFCAP %x %x\n", 778 ifr->ifr_reqcap, ifp->if_capenable); 779 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 780 #ifdef DEVICE_POLLING 781 if (mask & IFCAP_POLLING) { 782 struct ptnet_queue *pq; 783 int i; 784 785 if (ifr->ifr_reqcap & IFCAP_POLLING) { 786 err = ether_poll_register(ptnet_poll, ifp); 787 if (err) { 788 break; 789 } 790 /* Stop queues and sync with taskqueues. */ 791 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 792 for (i = 0; i < sc->num_rings; i++) { 793 pq = sc-> queues + i; 794 /* Make sure the worker sees the 795 * IFF_DRV_RUNNING down. */ 796 PTNET_Q_LOCK(pq); 797 pq->ptgh->guest_need_kick = 0; 798 PTNET_Q_UNLOCK(pq); 799 /* Wait for rescheduling to finish. */ 800 if (pq->taskq) { 801 taskqueue_drain(pq->taskq, 802 &pq->task); 803 } 804 } 805 ifp->if_drv_flags |= IFF_DRV_RUNNING; 806 } else { 807 err = ether_poll_deregister(ifp); 808 for (i = 0; i < sc->num_rings; i++) { 809 pq = sc-> queues + i; 810 PTNET_Q_LOCK(pq); 811 pq->ptgh->guest_need_kick = 1; 812 PTNET_Q_UNLOCK(pq); 813 } 814 } 815 } 816 #endif /* DEVICE_POLLING */ 817 ifp->if_capenable = ifr->ifr_reqcap; 818 break; 819 820 case SIOCSIFMTU: 821 /* We support any reasonable MTU. */ 822 if (ifr->ifr_mtu < ETHERMIN || 823 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 824 err = EINVAL; 825 } else { 826 PTNET_CORE_LOCK(sc); 827 ifp->if_mtu = ifr->ifr_mtu; 828 PTNET_CORE_UNLOCK(sc); 829 } 830 break; 831 832 case SIOCSIFMEDIA: 833 case SIOCGIFMEDIA: 834 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 835 break; 836 837 default: 838 err = ether_ioctl(ifp, cmd, data); 839 break; 840 } 841 842 return err; 843 } 844 845 static int 846 ptnet_init_locked(struct ptnet_softc *sc) 847 { 848 if_t ifp = sc->ifp; 849 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 850 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 851 unsigned int nm_buf_size; 852 int ret; 853 854 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 855 return 0; /* nothing to do */ 856 } 857 858 device_printf(sc->dev, "%s\n", __func__); 859 860 /* Translate offload capabilities according to if_capenable. */ 861 ifp->if_hwassist = 0; 862 if (ifp->if_capenable & IFCAP_TXCSUM) 863 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 864 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 865 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 866 if (ifp->if_capenable & IFCAP_TSO4) 867 ifp->if_hwassist |= CSUM_IP_TSO; 868 if (ifp->if_capenable & IFCAP_TSO6) 869 ifp->if_hwassist |= CSUM_IP6_TSO; 870 871 /* 872 * Prepare the interface for netmap mode access. 873 */ 874 netmap_update_config(na_dr); 875 876 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 877 if (ret) { 878 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 879 return ret; 880 } 881 882 if (sc->ptna->backend_regifs == 0) { 883 ret = ptnet_nm_krings_create(na_nm); 884 if (ret) { 885 device_printf(sc->dev, "ptnet_nm_krings_create() " 886 "failed\n"); 887 goto err_mem_finalize; 888 } 889 890 ret = netmap_mem_rings_create(na_dr); 891 if (ret) { 892 device_printf(sc->dev, "netmap_mem_rings_create() " 893 "failed\n"); 894 goto err_rings_create; 895 } 896 897 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 898 if (ret) { 899 device_printf(sc->dev, "netmap_mem_get_lut() " 900 "failed\n"); 901 goto err_get_lut; 902 } 903 } 904 905 ret = ptnet_nm_register(na_dr, 1 /* on */); 906 if (ret) { 907 goto err_register; 908 } 909 910 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 911 912 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 913 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 914 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 915 sc->min_tx_space); 916 #ifdef PTNETMAP_STATS 917 callout_reset(&sc->tick, hz, ptnet_tick, sc); 918 #endif 919 920 ifp->if_drv_flags |= IFF_DRV_RUNNING; 921 922 return 0; 923 924 err_register: 925 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 926 err_get_lut: 927 netmap_mem_rings_delete(na_dr); 928 err_rings_create: 929 ptnet_nm_krings_delete(na_nm); 930 err_mem_finalize: 931 netmap_mem_deref(na_dr->nm_mem, na_dr); 932 933 return ret; 934 } 935 936 /* To be called under core lock. */ 937 static int 938 ptnet_stop(struct ptnet_softc *sc) 939 { 940 if_t ifp = sc->ifp; 941 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 942 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 943 int i; 944 945 device_printf(sc->dev, "%s\n", __func__); 946 947 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 948 return 0; /* nothing to do */ 949 } 950 951 /* Clear the driver-ready flag, and synchronize with all the queues, 952 * so that after this loop we are sure nobody is working anymore with 953 * the device. This scheme is taken from the vtnet driver. */ 954 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 955 callout_stop(&sc->tick); 956 for (i = 0; i < sc->num_rings; i++) { 957 PTNET_Q_LOCK(sc->queues + i); 958 PTNET_Q_UNLOCK(sc->queues + i); 959 } 960 961 ptnet_nm_register(na_dr, 0 /* off */); 962 963 if (sc->ptna->backend_regifs == 0) { 964 netmap_mem_rings_delete(na_dr); 965 ptnet_nm_krings_delete(na_nm); 966 } 967 netmap_mem_deref(na_dr->nm_mem, na_dr); 968 969 return 0; 970 } 971 972 static void 973 ptnet_qflush(if_t ifp) 974 { 975 struct ptnet_softc *sc = if_getsoftc(ifp); 976 int i; 977 978 /* Flush all the bufrings and do the interface flush. */ 979 for (i = 0; i < sc->num_rings; i++) { 980 struct ptnet_queue *pq = sc->queues + i; 981 struct mbuf *m; 982 983 PTNET_Q_LOCK(pq); 984 if (pq->bufring) { 985 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 986 m_freem(m); 987 } 988 } 989 PTNET_Q_UNLOCK(pq); 990 } 991 992 if_qflush(ifp); 993 } 994 995 static int 996 ptnet_media_change(if_t ifp) 997 { 998 struct ptnet_softc *sc = if_getsoftc(ifp); 999 struct ifmedia *ifm = &sc->media; 1000 1001 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1002 return EINVAL; 1003 } 1004 1005 return 0; 1006 } 1007 1008 #if __FreeBSD_version >= 1100000 1009 static uint64_t 1010 ptnet_get_counter(if_t ifp, ift_counter cnt) 1011 { 1012 struct ptnet_softc *sc = if_getsoftc(ifp); 1013 struct ptnet_queue_stats stats[2]; 1014 int i; 1015 1016 /* Accumulate statistics over the queues. */ 1017 memset(stats, 0, sizeof(stats)); 1018 for (i = 0; i < sc->num_rings; i++) { 1019 struct ptnet_queue *pq = sc->queues + i; 1020 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1021 1022 stats[idx].packets += pq->stats.packets; 1023 stats[idx].bytes += pq->stats.bytes; 1024 stats[idx].errors += pq->stats.errors; 1025 stats[idx].iqdrops += pq->stats.iqdrops; 1026 stats[idx].mcasts += pq->stats.mcasts; 1027 } 1028 1029 switch (cnt) { 1030 case IFCOUNTER_IPACKETS: 1031 return (stats[1].packets); 1032 case IFCOUNTER_IQDROPS: 1033 return (stats[1].iqdrops); 1034 case IFCOUNTER_IERRORS: 1035 return (stats[1].errors); 1036 case IFCOUNTER_OPACKETS: 1037 return (stats[0].packets); 1038 case IFCOUNTER_OBYTES: 1039 return (stats[0].bytes); 1040 case IFCOUNTER_OMCASTS: 1041 return (stats[0].mcasts); 1042 default: 1043 return (if_get_counter_default(ifp, cnt)); 1044 } 1045 } 1046 #endif 1047 1048 1049 #ifdef PTNETMAP_STATS 1050 /* Called under core lock. */ 1051 static void 1052 ptnet_tick(void *opaque) 1053 { 1054 struct ptnet_softc *sc = opaque; 1055 int i; 1056 1057 for (i = 0; i < sc->num_rings; i++) { 1058 struct ptnet_queue *pq = sc->queues + i; 1059 struct ptnet_queue_stats cur = pq->stats; 1060 struct timeval now; 1061 unsigned int delta; 1062 1063 microtime(&now); 1064 delta = now.tv_usec - sc->last_ts.tv_usec + 1065 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1066 delta /= 1000; /* in milliseconds */ 1067 1068 if (delta == 0) 1069 continue; 1070 1071 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1072 "intr %lu\n", i, delta, 1073 (cur.packets - pq->last_stats.packets), 1074 (cur.kicks - pq->last_stats.kicks), 1075 (cur.intrs - pq->last_stats.intrs)); 1076 pq->last_stats = cur; 1077 } 1078 microtime(&sc->last_ts); 1079 callout_schedule(&sc->tick, hz); 1080 } 1081 #endif /* PTNETMAP_STATS */ 1082 1083 static void 1084 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1085 { 1086 /* We are always active, as the backend netmap port is 1087 * always open in netmap mode. */ 1088 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1089 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1090 } 1091 1092 static uint32_t 1093 ptnet_nm_ptctl(if_t ifp, uint32_t cmd) 1094 { 1095 struct ptnet_softc *sc = if_getsoftc(ifp); 1096 /* 1097 * Write a command and read back error status, 1098 * with zero meaning success. 1099 */ 1100 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1101 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); 1102 } 1103 1104 static int 1105 ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd, 1106 unsigned *rxr, unsigned *rxd) 1107 { 1108 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1109 1110 *txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1111 *rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1112 *txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1113 *rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1114 1115 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n", 1116 *txr, *rxr, *txd, *rxd); 1117 1118 return 0; 1119 } 1120 1121 static void 1122 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1123 { 1124 int i; 1125 1126 /* Sync krings from the host, reading from 1127 * CSB. */ 1128 for (i = 0; i < sc->num_rings; i++) { 1129 struct ptnet_csb_gh *ptgh = sc->queues[i].ptgh; 1130 struct ptnet_csb_hg *pthg = sc->queues[i].pthg; 1131 struct netmap_kring *kring; 1132 1133 if (i < na->num_tx_rings) { 1134 kring = na->tx_rings + i; 1135 } else { 1136 kring = na->rx_rings + i - na->num_tx_rings; 1137 } 1138 kring->rhead = kring->ring->head = ptgh->head; 1139 kring->rcur = kring->ring->cur = ptgh->cur; 1140 kring->nr_hwcur = pthg->hwcur; 1141 kring->nr_hwtail = kring->rtail = 1142 kring->ring->tail = pthg->hwtail; 1143 1144 ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1145 pthg->hwcur, ptgh->head, ptgh->cur, 1146 pthg->hwtail); 1147 ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1148 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1149 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1150 kring->rtail, kring->ring->tail); 1151 } 1152 } 1153 1154 static void 1155 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1156 { 1157 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1158 1159 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); 1160 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); 1161 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1162 } 1163 1164 static int 1165 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1166 { 1167 /* device-specific */ 1168 if_t ifp = na->ifp; 1169 struct ptnet_softc *sc = if_getsoftc(ifp); 1170 int native = (na == &sc->ptna->hwup.up); 1171 struct ptnet_queue *pq; 1172 enum txrx t; 1173 int ret = 0; 1174 int i; 1175 1176 if (!onoff) { 1177 sc->ptna->backend_regifs--; 1178 } 1179 1180 /* If this is the last netmap client, guest interrupt enable flags may 1181 * be in arbitrary state. Since these flags are going to be used also 1182 * by the netdevice driver, we have to make sure to start with 1183 * notifications enabled. Also, schedule NAPI to flush pending packets 1184 * in the RX rings, since we will not receive further interrupts 1185 * until these will be processed. */ 1186 if (native && !onoff && na->active_fds == 0) { 1187 D("Exit netmap mode, re-enable interrupts"); 1188 for (i = 0; i < sc->num_rings; i++) { 1189 pq = sc->queues + i; 1190 pq->ptgh->guest_need_kick = 1; 1191 } 1192 } 1193 1194 if (onoff) { 1195 if (sc->ptna->backend_regifs == 0) { 1196 /* Initialize notification enable fields in the CSB. */ 1197 for (i = 0; i < sc->num_rings; i++) { 1198 pq = sc->queues + i; 1199 pq->pthg->host_need_kick = 1; 1200 pq->ptgh->guest_need_kick = 1201 (!(ifp->if_capenable & IFCAP_POLLING) 1202 && i >= sc->num_tx_rings); 1203 } 1204 1205 /* Set the virtio-net header length. */ 1206 ptnet_update_vnet_hdr(sc); 1207 1208 /* Make sure the host adapter passed through is ready 1209 * for txsync/rxsync. */ 1210 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE); 1211 if (ret) { 1212 return ret; 1213 } 1214 } 1215 1216 /* Sync from CSB must be done after REGIF PTCTL. Skip this 1217 * step only if this is a netmap client and it is not the 1218 * first one. */ 1219 if ((!native && sc->ptna->backend_regifs == 0) || 1220 (native && na->active_fds == 0)) { 1221 ptnet_sync_from_csb(sc, na); 1222 } 1223 1224 /* If not native, don't call nm_set_native_flags, since we don't want 1225 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1226 if (native) { 1227 for_rx_tx(t) { 1228 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1229 struct netmap_kring *kring = &NMR(na, t)[i]; 1230 1231 if (nm_kring_pending_on(kring)) { 1232 kring->nr_mode = NKR_NETMAP_ON; 1233 } 1234 } 1235 } 1236 nm_set_native_flags(na); 1237 } 1238 1239 } else { 1240 if (native) { 1241 nm_clear_native_flags(na); 1242 for_rx_tx(t) { 1243 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1244 struct netmap_kring *kring = &NMR(na, t)[i]; 1245 1246 if (nm_kring_pending_off(kring)) { 1247 kring->nr_mode = NKR_NETMAP_OFF; 1248 } 1249 } 1250 } 1251 } 1252 1253 /* Sync from CSB must be done before UNREGIF PTCTL, on the last 1254 * netmap client. */ 1255 if (native && na->active_fds == 0) { 1256 ptnet_sync_from_csb(sc, na); 1257 } 1258 1259 if (sc->ptna->backend_regifs == 0) { 1260 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE); 1261 } 1262 } 1263 1264 if (onoff) { 1265 sc->ptna->backend_regifs++; 1266 } 1267 1268 return ret; 1269 } 1270 1271 static int 1272 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1273 { 1274 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1275 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1276 bool notify; 1277 1278 notify = netmap_pt_guest_txsync(pq->ptgh, pq->pthg, kring, flags); 1279 if (notify) { 1280 ptnet_kick(pq); 1281 } 1282 1283 return 0; 1284 } 1285 1286 static int 1287 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1288 { 1289 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1290 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1291 bool notify; 1292 1293 notify = netmap_pt_guest_rxsync(pq->ptgh, pq->pthg, kring, flags); 1294 if (notify) { 1295 ptnet_kick(pq); 1296 } 1297 1298 return 0; 1299 } 1300 1301 static void 1302 ptnet_tx_intr(void *opaque) 1303 { 1304 struct ptnet_queue *pq = opaque; 1305 struct ptnet_softc *sc = pq->sc; 1306 1307 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1308 #ifdef PTNETMAP_STATS 1309 pq->stats.intrs ++; 1310 #endif /* PTNETMAP_STATS */ 1311 1312 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1313 return; 1314 } 1315 1316 /* Schedule the tasqueue to flush process transmissions requests. 1317 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1318 * at least when using MSI-X interrupts. The if_em driver, instead 1319 * schedule taskqueue when using legacy interrupts. */ 1320 taskqueue_enqueue(pq->taskq, &pq->task); 1321 } 1322 1323 static void 1324 ptnet_rx_intr(void *opaque) 1325 { 1326 struct ptnet_queue *pq = opaque; 1327 struct ptnet_softc *sc = pq->sc; 1328 unsigned int unused; 1329 1330 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1331 #ifdef PTNETMAP_STATS 1332 pq->stats.intrs ++; 1333 #endif /* PTNETMAP_STATS */ 1334 1335 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1336 return; 1337 } 1338 1339 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1340 * receive-side processing is executed directly in the interrupt 1341 * service routine. Alternatively, we may schedule the taskqueue. */ 1342 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1343 } 1344 1345 /* The following offloadings-related functions are taken from the vtnet 1346 * driver, but the same functionality is required for the ptnet driver. 1347 * As a temporary solution, I copied this code from vtnet and I started 1348 * to generalize it (taking away driver-specific statistic accounting), 1349 * making as little modifications as possible. 1350 * In the future we need to share these functions between vtnet and ptnet. 1351 */ 1352 static int 1353 ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 1354 { 1355 struct ether_vlan_header *evh; 1356 int offset; 1357 1358 evh = mtod(m, struct ether_vlan_header *); 1359 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1360 /* BMV: We should handle nested VLAN tags too. */ 1361 *etype = ntohs(evh->evl_proto); 1362 offset = sizeof(struct ether_vlan_header); 1363 } else { 1364 *etype = ntohs(evh->evl_encap_proto); 1365 offset = sizeof(struct ether_header); 1366 } 1367 1368 switch (*etype) { 1369 #if defined(INET) 1370 case ETHERTYPE_IP: { 1371 struct ip *ip, iphdr; 1372 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1373 m_copydata(m, offset, sizeof(struct ip), 1374 (caddr_t) &iphdr); 1375 ip = &iphdr; 1376 } else 1377 ip = (struct ip *)(m->m_data + offset); 1378 *proto = ip->ip_p; 1379 *start = offset + (ip->ip_hl << 2); 1380 break; 1381 } 1382 #endif 1383 #if defined(INET6) 1384 case ETHERTYPE_IPV6: 1385 *proto = -1; 1386 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 1387 /* Assert the network stack sent us a valid packet. */ 1388 KASSERT(*start > offset, 1389 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 1390 *start, offset, *proto)); 1391 break; 1392 #endif 1393 default: 1394 /* Here we should increment the tx_csum_bad_ethtype counter. */ 1395 return (EINVAL); 1396 } 1397 1398 return (0); 1399 } 1400 1401 static int 1402 ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, 1403 int offset, bool allow_ecn, struct virtio_net_hdr *hdr) 1404 { 1405 static struct timeval lastecn; 1406 static int curecn; 1407 struct tcphdr *tcp, tcphdr; 1408 1409 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 1410 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 1411 tcp = &tcphdr; 1412 } else 1413 tcp = (struct tcphdr *)(m->m_data + offset); 1414 1415 hdr->hdr_len = offset + (tcp->th_off << 2); 1416 hdr->gso_size = m->m_pkthdr.tso_segsz; 1417 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 1418 VIRTIO_NET_HDR_GSO_TCPV6; 1419 1420 if (tcp->th_flags & TH_CWR) { 1421 /* 1422 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 1423 * ECN support is not on a per-interface basis, but globally via 1424 * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 1425 */ 1426 if (!allow_ecn) { 1427 if (ppsratecheck(&lastecn, &curecn, 1)) 1428 if_printf(ifp, 1429 "TSO with ECN not negotiated with host\n"); 1430 return (ENOTSUP); 1431 } 1432 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1433 } 1434 1435 /* Here we should increment tx_tso counter. */ 1436 1437 return (0); 1438 } 1439 1440 static struct mbuf * 1441 ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, 1442 struct virtio_net_hdr *hdr) 1443 { 1444 int flags, etype, csum_start, proto, error; 1445 1446 flags = m->m_pkthdr.csum_flags; 1447 1448 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); 1449 if (error) 1450 goto drop; 1451 1452 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || 1453 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { 1454 /* 1455 * We could compare the IP protocol vs the CSUM_ flag too, 1456 * but that really should not be necessary. 1457 */ 1458 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1459 hdr->csum_start = csum_start; 1460 hdr->csum_offset = m->m_pkthdr.csum_data; 1461 /* Here we should increment the tx_csum counter. */ 1462 } 1463 1464 if (flags & CSUM_TSO) { 1465 if (__predict_false(proto != IPPROTO_TCP)) { 1466 /* Likely failed to correctly parse the mbuf. 1467 * Here we should increment the tx_tso_not_tcp 1468 * counter. */ 1469 goto drop; 1470 } 1471 1472 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 1473 ("%s: mbuf %p TSO without checksum offload %#x", 1474 __func__, m, flags)); 1475 1476 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, 1477 allow_ecn, hdr); 1478 if (error) 1479 goto drop; 1480 } 1481 1482 return (m); 1483 1484 drop: 1485 m_freem(m); 1486 return (NULL); 1487 } 1488 1489 static void 1490 ptnet_vlan_tag_remove(struct mbuf *m) 1491 { 1492 struct ether_vlan_header *evh; 1493 1494 evh = mtod(m, struct ether_vlan_header *); 1495 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1496 m->m_flags |= M_VLANTAG; 1497 1498 /* Strip the 802.1Q header. */ 1499 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1500 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1501 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1502 } 1503 1504 /* 1505 * Use the checksum offset in the VirtIO header to set the 1506 * correct CSUM_* flags. 1507 */ 1508 static int 1509 ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, 1510 struct virtio_net_hdr *hdr) 1511 { 1512 #if defined(INET) || defined(INET6) 1513 int offset = hdr->csum_start + hdr->csum_offset; 1514 #endif 1515 1516 /* Only do a basic sanity check on the offset. */ 1517 switch (eth_type) { 1518 #if defined(INET) 1519 case ETHERTYPE_IP: 1520 if (__predict_false(offset < ip_start + sizeof(struct ip))) 1521 return (1); 1522 break; 1523 #endif 1524 #if defined(INET6) 1525 case ETHERTYPE_IPV6: 1526 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1527 return (1); 1528 break; 1529 #endif 1530 default: 1531 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1532 return (1); 1533 } 1534 1535 /* 1536 * Use the offset to determine the appropriate CSUM_* flags. This is 1537 * a bit dirty, but we can get by with it since the checksum offsets 1538 * happen to be different. We assume the host host does not do IPv4 1539 * header checksum offloading. 1540 */ 1541 switch (hdr->csum_offset) { 1542 case offsetof(struct udphdr, uh_sum): 1543 case offsetof(struct tcphdr, th_sum): 1544 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1545 m->m_pkthdr.csum_data = 0xFFFF; 1546 break; 1547 case offsetof(struct sctphdr, checksum): 1548 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1549 break; 1550 default: 1551 /* Here we should increment the rx_csum_bad_offset counter. */ 1552 return (1); 1553 } 1554 1555 return (0); 1556 } 1557 1558 static int 1559 ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, 1560 struct virtio_net_hdr *hdr) 1561 { 1562 int offset, proto; 1563 1564 switch (eth_type) { 1565 #if defined(INET) 1566 case ETHERTYPE_IP: { 1567 struct ip *ip; 1568 if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1569 return (1); 1570 ip = (struct ip *)(m->m_data + ip_start); 1571 proto = ip->ip_p; 1572 offset = ip_start + (ip->ip_hl << 2); 1573 break; 1574 } 1575 #endif 1576 #if defined(INET6) 1577 case ETHERTYPE_IPV6: 1578 if (__predict_false(m->m_len < ip_start + 1579 sizeof(struct ip6_hdr))) 1580 return (1); 1581 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1582 if (__predict_false(offset < 0)) 1583 return (1); 1584 break; 1585 #endif 1586 default: 1587 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1588 return (1); 1589 } 1590 1591 switch (proto) { 1592 case IPPROTO_TCP: 1593 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1594 return (1); 1595 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1596 m->m_pkthdr.csum_data = 0xFFFF; 1597 break; 1598 case IPPROTO_UDP: 1599 if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1600 return (1); 1601 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1602 m->m_pkthdr.csum_data = 0xFFFF; 1603 break; 1604 case IPPROTO_SCTP: 1605 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) 1606 return (1); 1607 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1608 break; 1609 default: 1610 /* 1611 * For the remaining protocols, FreeBSD does not support 1612 * checksum offloading, so the checksum will be recomputed. 1613 */ 1614 #if 0 1615 if_printf(ifp, "cksum offload of unsupported " 1616 "protocol eth_type=%#x proto=%d csum_start=%d " 1617 "csum_offset=%d\n", __func__, eth_type, proto, 1618 hdr->csum_start, hdr->csum_offset); 1619 #endif 1620 break; 1621 } 1622 1623 return (0); 1624 } 1625 1626 /* 1627 * Set the appropriate CSUM_* flags. Unfortunately, the information 1628 * provided is not directly useful to us. The VirtIO header gives the 1629 * offset of the checksum, which is all Linux needs, but this is not 1630 * how FreeBSD does things. We are forced to peek inside the packet 1631 * a bit. 1632 * 1633 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1634 * could accept the offsets and let the stack figure it out. 1635 */ 1636 static int 1637 ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) 1638 { 1639 struct ether_header *eh; 1640 struct ether_vlan_header *evh; 1641 uint16_t eth_type; 1642 int offset, error; 1643 1644 eh = mtod(m, struct ether_header *); 1645 eth_type = ntohs(eh->ether_type); 1646 if (eth_type == ETHERTYPE_VLAN) { 1647 /* BMV: We should handle nested VLAN tags too. */ 1648 evh = mtod(m, struct ether_vlan_header *); 1649 eth_type = ntohs(evh->evl_proto); 1650 offset = sizeof(struct ether_vlan_header); 1651 } else 1652 offset = sizeof(struct ether_header); 1653 1654 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1655 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); 1656 else 1657 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); 1658 1659 return (error); 1660 } 1661 /* End of offloading-related functions to be shared with vtnet. */ 1662 1663 static inline void 1664 ptnet_sync_tail(struct ptnet_csb_hg *pthg, struct netmap_kring *kring) 1665 { 1666 struct netmap_ring *ring = kring->ring; 1667 1668 /* Update hwcur and hwtail as known by the host. */ 1669 ptnetmap_guest_read_kring_csb(pthg, kring); 1670 1671 /* nm_sync_finalize */ 1672 ring->tail = kring->rtail = kring->nr_hwtail; 1673 } 1674 1675 static void 1676 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1677 unsigned int head, unsigned int sync_flags) 1678 { 1679 struct netmap_ring *ring = kring->ring; 1680 struct ptnet_csb_gh *ptgh = pq->ptgh; 1681 struct ptnet_csb_hg *pthg = pq->pthg; 1682 1683 /* Some packets have been pushed to the netmap ring. We have 1684 * to tell the host to process the new packets, updating cur 1685 * and head in the CSB. */ 1686 ring->head = ring->cur = head; 1687 1688 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1689 kring->rcur = kring->rhead = head; 1690 1691 ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead); 1692 1693 /* Kick the host if needed. */ 1694 if (NM_ACCESS_ONCE(pthg->host_need_kick)) { 1695 ptgh->sync_flags = sync_flags; 1696 ptnet_kick(pq); 1697 } 1698 } 1699 1700 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1701 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1702 (_k)->rtail - (_h)) < (_min) 1703 1704 /* This function may be called by the network stack, or by 1705 * by the taskqueue thread. */ 1706 static int 1707 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1708 bool may_resched) 1709 { 1710 struct ptnet_softc *sc = pq->sc; 1711 bool have_vnet_hdr = sc->vnet_hdr_len; 1712 struct netmap_adapter *na = &sc->ptna->dr.up; 1713 if_t ifp = sc->ifp; 1714 unsigned int batch_count = 0; 1715 struct ptnet_csb_gh *ptgh; 1716 struct ptnet_csb_hg *pthg; 1717 struct netmap_kring *kring; 1718 struct netmap_ring *ring; 1719 struct netmap_slot *slot; 1720 unsigned int count = 0; 1721 unsigned int minspace; 1722 unsigned int head; 1723 unsigned int lim; 1724 struct mbuf *mhead; 1725 struct mbuf *mf; 1726 int nmbuf_bytes; 1727 uint8_t *nmbuf; 1728 1729 if (!PTNET_Q_TRYLOCK(pq)) { 1730 /* We failed to acquire the lock, schedule the taskqueue. */ 1731 RD(1, "Deferring TX work"); 1732 if (may_resched) { 1733 taskqueue_enqueue(pq->taskq, &pq->task); 1734 } 1735 1736 return 0; 1737 } 1738 1739 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1740 PTNET_Q_UNLOCK(pq); 1741 RD(1, "Interface is down"); 1742 return ENETDOWN; 1743 } 1744 1745 ptgh = pq->ptgh; 1746 pthg = pq->pthg; 1747 kring = na->tx_rings + pq->kring_id; 1748 ring = kring->ring; 1749 lim = kring->nkr_num_slots - 1; 1750 head = ring->head; 1751 minspace = sc->min_tx_space; 1752 1753 while (count < budget) { 1754 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1755 /* We ran out of slot, let's see if the host has 1756 * freed up some, by reading hwcur and hwtail from 1757 * the CSB. */ 1758 ptnet_sync_tail(pthg, kring); 1759 1760 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1761 /* Still no slots available. Reactivate the 1762 * interrupts so that we can be notified 1763 * when some free slots are made available by 1764 * the host. */ 1765 ptgh->guest_need_kick = 1; 1766 1767 /* Double-check. */ 1768 ptnet_sync_tail(pthg, kring); 1769 if (likely(PTNET_TX_NOSPACE(head, kring, 1770 minspace))) { 1771 break; 1772 } 1773 1774 RD(1, "Found more slots by doublecheck"); 1775 /* More slots were freed before reactivating 1776 * the interrupts. */ 1777 ptgh->guest_need_kick = 0; 1778 } 1779 } 1780 1781 mhead = drbr_peek(ifp, pq->bufring); 1782 if (!mhead) { 1783 break; 1784 } 1785 1786 /* Initialize transmission state variables. */ 1787 slot = ring->slot + head; 1788 nmbuf = NMB(na, slot); 1789 nmbuf_bytes = 0; 1790 1791 /* If needed, prepare the virtio-net header at the beginning 1792 * of the first slot. */ 1793 if (have_vnet_hdr) { 1794 struct virtio_net_hdr *vh = 1795 (struct virtio_net_hdr *)nmbuf; 1796 1797 /* For performance, we could replace this memset() with 1798 * two 8-bytes-wide writes. */ 1799 memset(nmbuf, 0, PTNET_HDR_SIZE); 1800 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1801 mhead = ptnet_tx_offload(ifp, mhead, false, 1802 vh); 1803 if (unlikely(!mhead)) { 1804 /* Packet dropped because errors 1805 * occurred while preparing the vnet 1806 * header. Let's go ahead with the next 1807 * packet. */ 1808 pq->stats.errors ++; 1809 drbr_advance(ifp, pq->bufring); 1810 continue; 1811 } 1812 } 1813 ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1814 "csum_start %u csum_ofs %u hdr_len = %u " 1815 "gso_size %u gso_type %x", __func__, 1816 mhead->m_pkthdr.csum_flags, vh->flags, 1817 vh->csum_start, vh->csum_offset, vh->hdr_len, 1818 vh->gso_size, vh->gso_type); 1819 1820 nmbuf += PTNET_HDR_SIZE; 1821 nmbuf_bytes += PTNET_HDR_SIZE; 1822 } 1823 1824 for (mf = mhead; mf; mf = mf->m_next) { 1825 uint8_t *mdata = mf->m_data; 1826 int mlen = mf->m_len; 1827 1828 for (;;) { 1829 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1830 1831 if (mlen < copy) { 1832 copy = mlen; 1833 } 1834 memcpy(nmbuf, mdata, copy); 1835 1836 mdata += copy; 1837 mlen -= copy; 1838 nmbuf += copy; 1839 nmbuf_bytes += copy; 1840 1841 if (!mlen) { 1842 break; 1843 } 1844 1845 slot->len = nmbuf_bytes; 1846 slot->flags = NS_MOREFRAG; 1847 1848 head = nm_next(head, lim); 1849 KASSERT(head != ring->tail, 1850 ("Unexpectedly run out of TX space")); 1851 slot = ring->slot + head; 1852 nmbuf = NMB(na, slot); 1853 nmbuf_bytes = 0; 1854 } 1855 } 1856 1857 /* Complete last slot and update head. */ 1858 slot->len = nmbuf_bytes; 1859 slot->flags = 0; 1860 head = nm_next(head, lim); 1861 1862 /* Consume the packet just processed. */ 1863 drbr_advance(ifp, pq->bufring); 1864 1865 /* Copy the packet to listeners. */ 1866 ETHER_BPF_MTAP(ifp, mhead); 1867 1868 pq->stats.packets ++; 1869 pq->stats.bytes += mhead->m_pkthdr.len; 1870 if (mhead->m_flags & M_MCAST) { 1871 pq->stats.mcasts ++; 1872 } 1873 1874 m_freem(mhead); 1875 1876 count ++; 1877 if (++batch_count == PTNET_TX_BATCH) { 1878 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1879 batch_count = 0; 1880 } 1881 } 1882 1883 if (batch_count) { 1884 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1885 } 1886 1887 if (count >= budget && may_resched) { 1888 DBG(RD(1, "out of budget: resched, %d mbufs pending\n", 1889 drbr_inuse(ifp, pq->bufring))); 1890 taskqueue_enqueue(pq->taskq, &pq->task); 1891 } 1892 1893 PTNET_Q_UNLOCK(pq); 1894 1895 return count; 1896 } 1897 1898 static int 1899 ptnet_transmit(if_t ifp, struct mbuf *m) 1900 { 1901 struct ptnet_softc *sc = if_getsoftc(ifp); 1902 struct ptnet_queue *pq; 1903 unsigned int queue_idx; 1904 int err; 1905 1906 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1907 1908 /* Insert 802.1Q header if needed. */ 1909 if (m->m_flags & M_VLANTAG) { 1910 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1911 if (m == NULL) { 1912 return ENOBUFS; 1913 } 1914 m->m_flags &= ~M_VLANTAG; 1915 } 1916 1917 /* Get the flow-id if available. */ 1918 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1919 m->m_pkthdr.flowid : curcpu; 1920 1921 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1922 queue_idx %= sc->num_tx_rings; 1923 } 1924 1925 pq = sc->queues + queue_idx; 1926 1927 err = drbr_enqueue(ifp, pq->bufring, m); 1928 if (err) { 1929 /* ENOBUFS when the bufring is full */ 1930 RD(1, "%s: drbr_enqueue() failed %d\n", 1931 __func__, err); 1932 pq->stats.errors ++; 1933 return err; 1934 } 1935 1936 if (ifp->if_capenable & IFCAP_POLLING) { 1937 /* If polling is on, the transmit queues will be 1938 * drained by the poller. */ 1939 return 0; 1940 } 1941 1942 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1943 1944 return (err < 0) ? err : 0; 1945 } 1946 1947 static unsigned int 1948 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1949 { 1950 struct netmap_ring *ring = kring->ring; 1951 struct netmap_slot *slot = ring->slot + head; 1952 1953 for (;;) { 1954 head = nm_next(head, kring->nkr_num_slots - 1); 1955 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1956 break; 1957 } 1958 slot = ring->slot + head; 1959 } 1960 1961 return head; 1962 } 1963 1964 static inline struct mbuf * 1965 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1966 { 1967 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1968 1969 do { 1970 unsigned int copy; 1971 1972 if (mtail->m_len == MCLBYTES) { 1973 struct mbuf *mf; 1974 1975 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1976 if (unlikely(!mf)) { 1977 return NULL; 1978 } 1979 1980 mtail->m_next = mf; 1981 mtail = mf; 1982 mdata = mtod(mtail, uint8_t *); 1983 mtail->m_len = 0; 1984 } 1985 1986 copy = MCLBYTES - mtail->m_len; 1987 if (nmbuf_len < copy) { 1988 copy = nmbuf_len; 1989 } 1990 1991 memcpy(mdata, nmbuf, copy); 1992 1993 nmbuf += copy; 1994 nmbuf_len -= copy; 1995 mdata += copy; 1996 mtail->m_len += copy; 1997 } while (nmbuf_len); 1998 1999 return mtail; 2000 } 2001 2002 static int 2003 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 2004 { 2005 struct ptnet_softc *sc = pq->sc; 2006 bool have_vnet_hdr = sc->vnet_hdr_len; 2007 struct ptnet_csb_gh *ptgh = pq->ptgh; 2008 struct ptnet_csb_hg *pthg = pq->pthg; 2009 struct netmap_adapter *na = &sc->ptna->dr.up; 2010 struct netmap_kring *kring = na->rx_rings + pq->kring_id; 2011 struct netmap_ring *ring = kring->ring; 2012 unsigned int const lim = kring->nkr_num_slots - 1; 2013 unsigned int batch_count = 0; 2014 if_t ifp = sc->ifp; 2015 unsigned int count = 0; 2016 uint32_t head; 2017 2018 PTNET_Q_LOCK(pq); 2019 2020 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2021 goto unlock; 2022 } 2023 2024 kring->nr_kflags &= ~NKR_PENDINTR; 2025 2026 head = ring->head; 2027 while (count < budget) { 2028 uint32_t prev_head = head; 2029 struct mbuf *mhead, *mtail; 2030 struct virtio_net_hdr *vh; 2031 struct netmap_slot *slot; 2032 unsigned int nmbuf_len; 2033 uint8_t *nmbuf; 2034 int deliver = 1; /* the mbuf to the network stack. */ 2035 host_sync: 2036 if (head == ring->tail) { 2037 /* We ran out of slot, let's see if the host has 2038 * added some, by reading hwcur and hwtail from 2039 * the CSB. */ 2040 ptnet_sync_tail(pthg, kring); 2041 2042 if (head == ring->tail) { 2043 /* Still no slots available. Reactivate 2044 * interrupts as they were disabled by the 2045 * host thread right before issuing the 2046 * last interrupt. */ 2047 ptgh->guest_need_kick = 1; 2048 2049 /* Double-check. */ 2050 ptnet_sync_tail(pthg, kring); 2051 if (likely(head == ring->tail)) { 2052 break; 2053 } 2054 ptgh->guest_need_kick = 0; 2055 } 2056 } 2057 2058 /* Initialize ring state variables, possibly grabbing the 2059 * virtio-net header. */ 2060 slot = ring->slot + head; 2061 nmbuf = NMB(na, slot); 2062 nmbuf_len = slot->len; 2063 2064 vh = (struct virtio_net_hdr *)nmbuf; 2065 if (have_vnet_hdr) { 2066 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 2067 /* There is no good reason why host should 2068 * put the header in multiple netmap slots. 2069 * If this is the case, discard. */ 2070 RD(1, "Fragmented vnet-hdr: dropping"); 2071 head = ptnet_rx_discard(kring, head); 2072 pq->stats.iqdrops ++; 2073 deliver = 0; 2074 goto skip; 2075 } 2076 ND(1, "%s: vnet hdr: flags %x csum_start %u " 2077 "csum_ofs %u hdr_len = %u gso_size %u " 2078 "gso_type %x", __func__, vh->flags, 2079 vh->csum_start, vh->csum_offset, vh->hdr_len, 2080 vh->gso_size, vh->gso_type); 2081 nmbuf += PTNET_HDR_SIZE; 2082 nmbuf_len -= PTNET_HDR_SIZE; 2083 } 2084 2085 /* Allocate the head of a new mbuf chain. 2086 * We use m_getcl() to allocate an mbuf with standard cluster 2087 * size (MCLBYTES). In the future we could use m_getjcl() 2088 * to choose different sizes. */ 2089 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2090 if (unlikely(mhead == NULL)) { 2091 device_printf(sc->dev, "%s: failed to allocate mbuf " 2092 "head\n", __func__); 2093 pq->stats.errors ++; 2094 break; 2095 } 2096 2097 /* Initialize the mbuf state variables. */ 2098 mhead->m_pkthdr.len = nmbuf_len; 2099 mtail->m_len = 0; 2100 2101 /* Scan all the netmap slots containing the current packet. */ 2102 for (;;) { 2103 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 2104 "len %u, flags %u\n", __func__, 2105 head, ring->tail, slot->len, 2106 slot->flags)); 2107 2108 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 2109 if (unlikely(!mtail)) { 2110 /* Ouch. We ran out of memory while processing 2111 * a packet. We have to restore the previous 2112 * head position, free the mbuf chain, and 2113 * schedule the taskqueue to give the packet 2114 * another chance. */ 2115 device_printf(sc->dev, "%s: failed to allocate" 2116 " mbuf frag, reset head %u --> %u\n", 2117 __func__, head, prev_head); 2118 head = prev_head; 2119 m_freem(mhead); 2120 pq->stats.errors ++; 2121 if (may_resched) { 2122 taskqueue_enqueue(pq->taskq, 2123 &pq->task); 2124 } 2125 goto escape; 2126 } 2127 2128 /* We have to increment head irrespective of the 2129 * NS_MOREFRAG being set or not. */ 2130 head = nm_next(head, lim); 2131 2132 if (!(slot->flags & NS_MOREFRAG)) { 2133 break; 2134 } 2135 2136 if (unlikely(head == ring->tail)) { 2137 /* The very last slot prepared by the host has 2138 * the NS_MOREFRAG set. Drop it and continue 2139 * the outer cycle (to do the double-check). */ 2140 RD(1, "Incomplete packet: dropping"); 2141 m_freem(mhead); 2142 pq->stats.iqdrops ++; 2143 goto host_sync; 2144 } 2145 2146 slot = ring->slot + head; 2147 nmbuf = NMB(na, slot); 2148 nmbuf_len = slot->len; 2149 mhead->m_pkthdr.len += nmbuf_len; 2150 } 2151 2152 mhead->m_pkthdr.rcvif = ifp; 2153 mhead->m_pkthdr.csum_flags = 0; 2154 2155 /* Store the queue idx in the packet header. */ 2156 mhead->m_pkthdr.flowid = pq->kring_id; 2157 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 2158 2159 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 2160 struct ether_header *eh; 2161 2162 eh = mtod(mhead, struct ether_header *); 2163 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2164 ptnet_vlan_tag_remove(mhead); 2165 /* 2166 * With the 802.1Q header removed, update the 2167 * checksum starting location accordingly. 2168 */ 2169 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2170 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 2171 } 2172 } 2173 2174 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM 2175 | VIRTIO_NET_HDR_F_DATA_VALID))) { 2176 if (unlikely(ptnet_rx_csum(mhead, vh))) { 2177 m_freem(mhead); 2178 RD(1, "Csum offload error: dropping"); 2179 pq->stats.iqdrops ++; 2180 deliver = 0; 2181 } 2182 } 2183 2184 skip: 2185 count ++; 2186 if (++batch_count >= PTNET_RX_BATCH) { 2187 /* Some packets have been (or will be) pushed to the network 2188 * stack. We need to update the CSB to tell the host about 2189 * the new ring->cur and ring->head (RX buffer refill). */ 2190 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2191 batch_count = 0; 2192 } 2193 2194 if (likely(deliver)) { 2195 pq->stats.packets ++; 2196 pq->stats.bytes += mhead->m_pkthdr.len; 2197 2198 PTNET_Q_UNLOCK(pq); 2199 (*ifp->if_input)(ifp, mhead); 2200 PTNET_Q_LOCK(pq); 2201 /* The ring->head index (and related indices) are 2202 * updated under pq lock by ptnet_ring_update(). 2203 * Since we dropped the lock to call if_input(), we 2204 * must reload ring->head and restart processing the 2205 * ring from there. */ 2206 head = ring->head; 2207 2208 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2209 /* The interface has gone down while we didn't 2210 * have the lock. Stop any processing and exit. */ 2211 goto unlock; 2212 } 2213 } 2214 } 2215 escape: 2216 if (batch_count) { 2217 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2218 2219 } 2220 2221 if (count >= budget && may_resched) { 2222 /* If we ran out of budget or the double-check found new 2223 * slots to process, schedule the taskqueue. */ 2224 DBG(RD(1, "out of budget: resched h %u t %u\n", 2225 head, ring->tail)); 2226 taskqueue_enqueue(pq->taskq, &pq->task); 2227 } 2228 unlock: 2229 PTNET_Q_UNLOCK(pq); 2230 2231 return count; 2232 } 2233 2234 static void 2235 ptnet_rx_task(void *context, int pending) 2236 { 2237 struct ptnet_queue *pq = context; 2238 2239 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2240 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 2241 } 2242 2243 static void 2244 ptnet_tx_task(void *context, int pending) 2245 { 2246 struct ptnet_queue *pq = context; 2247 2248 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2249 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 2250 } 2251 2252 #ifdef DEVICE_POLLING 2253 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 2254 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 2255 static int 2256 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 2257 { 2258 struct ptnet_softc *sc = if_getsoftc(ifp); 2259 unsigned int queue_budget; 2260 unsigned int count = 0; 2261 bool borrow = false; 2262 int i; 2263 2264 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 2265 queue_budget = MAX(budget / sc->num_rings, 1); 2266 RD(1, "Per-queue budget is %d", queue_budget); 2267 2268 while (budget) { 2269 unsigned int rcnt = 0; 2270 2271 for (i = 0; i < sc->num_rings; i++) { 2272 struct ptnet_queue *pq = sc->queues + i; 2273 2274 if (borrow) { 2275 queue_budget = MIN(queue_budget, budget); 2276 if (queue_budget == 0) { 2277 break; 2278 } 2279 } 2280 2281 if (i < sc->num_tx_rings) { 2282 rcnt += ptnet_drain_transmit_queue(pq, 2283 queue_budget, false); 2284 } else { 2285 rcnt += ptnet_rx_eof(pq, queue_budget, 2286 false); 2287 } 2288 } 2289 2290 if (!rcnt) { 2291 /* A scan of the queues gave no result, we can 2292 * stop here. */ 2293 break; 2294 } 2295 2296 if (rcnt > budget) { 2297 /* This may happen when initial budget < sc->num_rings, 2298 * since one packet budget is given to each queue 2299 * anyway. Just pretend we didn't eat "so much". */ 2300 rcnt = budget; 2301 } 2302 count += rcnt; 2303 budget -= rcnt; 2304 borrow = true; 2305 } 2306 2307 2308 return count; 2309 } 2310 #endif /* DEVICE_POLLING */ 2311