1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO network devices. */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/eventhandler.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/sockio.h> 37 #include <sys/mbuf.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/socket.h> 41 #include <sys/sysctl.h> 42 #include <sys/random.h> 43 #include <sys/sglist.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/taskqueue.h> 47 #include <sys/smp.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_var.h> 55 #include <net/if_arp.h> 56 #include <net/if_dl.h> 57 #include <net/if_types.h> 58 #include <net/if_media.h> 59 #include <net/if_vlan_var.h> 60 61 #include <net/bpf.h> 62 63 #include <netinet/in_systm.h> 64 #include <netinet/in.h> 65 #include <netinet/ip.h> 66 #include <netinet/ip6.h> 67 #include <netinet6/ip6_var.h> 68 #include <netinet/udp.h> 69 #include <netinet/tcp.h> 70 #include <netinet/sctp.h> 71 72 #include <machine/bus.h> 73 #include <machine/resource.h> 74 #include <sys/bus.h> 75 #include <sys/rman.h> 76 77 #include <dev/virtio/virtio.h> 78 #include <dev/virtio/virtqueue.h> 79 #include <dev/virtio/network/virtio_net.h> 80 #include <dev/virtio/network/if_vtnetvar.h> 81 82 #include "virtio_if.h" 83 84 #include "opt_inet.h" 85 #include "opt_inet6.h" 86 87 static int vtnet_modevent(module_t, int, void *); 88 89 static int vtnet_probe(device_t); 90 static int vtnet_attach(device_t); 91 static int vtnet_detach(device_t); 92 static int vtnet_suspend(device_t); 93 static int vtnet_resume(device_t); 94 static int vtnet_shutdown(device_t); 95 static int vtnet_attach_completed(device_t); 96 static int vtnet_config_change(device_t); 97 98 static void vtnet_negotiate_features(struct vtnet_softc *); 99 static void vtnet_setup_features(struct vtnet_softc *); 100 static int vtnet_init_rxq(struct vtnet_softc *, int); 101 static int vtnet_init_txq(struct vtnet_softc *, int); 102 static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); 103 static void vtnet_free_rxtx_queues(struct vtnet_softc *); 104 static int vtnet_alloc_rx_filters(struct vtnet_softc *); 105 static void vtnet_free_rx_filters(struct vtnet_softc *); 106 static int vtnet_alloc_virtqueues(struct vtnet_softc *); 107 static int vtnet_setup_interface(struct vtnet_softc *); 108 static int vtnet_change_mtu(struct vtnet_softc *, int); 109 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); 110 static uint64_t vtnet_get_counter(struct ifnet *, ift_counter); 111 112 static int vtnet_rxq_populate(struct vtnet_rxq *); 113 static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); 114 static struct mbuf * 115 vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); 116 static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *, 117 struct mbuf *, int); 118 static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); 119 static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); 120 static int vtnet_rxq_new_buf(struct vtnet_rxq *); 121 static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, 122 struct virtio_net_hdr *); 123 static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); 124 static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *); 125 static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int); 126 static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *, 127 struct virtio_net_hdr *); 128 static int vtnet_rxq_eof(struct vtnet_rxq *); 129 static void vtnet_rx_vq_intr(void *); 130 static void vtnet_rxq_tq_intr(void *, int); 131 132 static int vtnet_txq_below_threshold(struct vtnet_txq *); 133 static int vtnet_txq_notify(struct vtnet_txq *); 134 static void vtnet_txq_free_mbufs(struct vtnet_txq *); 135 static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *, 136 int *, int *, int *); 137 static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int, 138 int, struct virtio_net_hdr *); 139 static struct mbuf * 140 vtnet_txq_offload(struct vtnet_txq *, struct mbuf *, 141 struct virtio_net_hdr *); 142 static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **, 143 struct vtnet_tx_header *); 144 static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **); 145 #ifdef VTNET_LEGACY_TX 146 static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *); 147 static void vtnet_start(struct ifnet *); 148 #else 149 static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *); 150 static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *); 151 static void vtnet_txq_tq_deferred(void *, int); 152 #endif 153 static void vtnet_txq_start(struct vtnet_txq *); 154 static void vtnet_txq_tq_intr(void *, int); 155 static int vtnet_txq_eof(struct vtnet_txq *); 156 static void vtnet_tx_vq_intr(void *); 157 static void vtnet_tx_start_all(struct vtnet_softc *); 158 159 #ifndef VTNET_LEGACY_TX 160 static void vtnet_qflush(struct ifnet *); 161 #endif 162 163 static int vtnet_watchdog(struct vtnet_txq *); 164 static void vtnet_accum_stats(struct vtnet_softc *, 165 struct vtnet_rxq_stats *, struct vtnet_txq_stats *); 166 static void vtnet_tick(void *); 167 168 static void vtnet_start_taskqueues(struct vtnet_softc *); 169 static void vtnet_free_taskqueues(struct vtnet_softc *); 170 static void vtnet_drain_taskqueues(struct vtnet_softc *); 171 172 static void vtnet_drain_rxtx_queues(struct vtnet_softc *); 173 static void vtnet_stop_rendezvous(struct vtnet_softc *); 174 static void vtnet_stop(struct vtnet_softc *); 175 static int vtnet_virtio_reinit(struct vtnet_softc *); 176 static void vtnet_init_rx_filters(struct vtnet_softc *); 177 static int vtnet_init_rx_queues(struct vtnet_softc *); 178 static int vtnet_init_tx_queues(struct vtnet_softc *); 179 static int vtnet_init_rxtx_queues(struct vtnet_softc *); 180 static void vtnet_set_active_vq_pairs(struct vtnet_softc *); 181 static int vtnet_reinit(struct vtnet_softc *); 182 static void vtnet_init_locked(struct vtnet_softc *); 183 static void vtnet_init(void *); 184 185 static void vtnet_free_ctrl_vq(struct vtnet_softc *); 186 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, 187 struct sglist *, int, int); 188 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); 189 static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); 190 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); 191 static int vtnet_set_promisc(struct vtnet_softc *, int); 192 static int vtnet_set_allmulti(struct vtnet_softc *, int); 193 static void vtnet_attach_disable_promisc(struct vtnet_softc *); 194 static void vtnet_rx_filter(struct vtnet_softc *); 195 static void vtnet_rx_filter_mac(struct vtnet_softc *); 196 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); 197 static void vtnet_rx_filter_vlan(struct vtnet_softc *); 198 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t); 199 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); 200 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); 201 202 static int vtnet_is_link_up(struct vtnet_softc *); 203 static void vtnet_update_link_status(struct vtnet_softc *); 204 static int vtnet_ifmedia_upd(struct ifnet *); 205 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); 206 static void vtnet_get_hwaddr(struct vtnet_softc *); 207 static void vtnet_set_hwaddr(struct vtnet_softc *); 208 static void vtnet_vlan_tag_remove(struct mbuf *); 209 static void vtnet_set_rx_process_limit(struct vtnet_softc *); 210 static void vtnet_set_tx_intr_threshold(struct vtnet_softc *); 211 212 static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, 213 struct sysctl_oid_list *, struct vtnet_rxq *); 214 static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, 215 struct sysctl_oid_list *, struct vtnet_txq *); 216 static void vtnet_setup_queue_sysctl(struct vtnet_softc *); 217 static void vtnet_setup_sysctl(struct vtnet_softc *); 218 219 static int vtnet_rxq_enable_intr(struct vtnet_rxq *); 220 static void vtnet_rxq_disable_intr(struct vtnet_rxq *); 221 static int vtnet_txq_enable_intr(struct vtnet_txq *); 222 static void vtnet_txq_disable_intr(struct vtnet_txq *); 223 static void vtnet_enable_rx_interrupts(struct vtnet_softc *); 224 static void vtnet_enable_tx_interrupts(struct vtnet_softc *); 225 static void vtnet_enable_interrupts(struct vtnet_softc *); 226 static void vtnet_disable_rx_interrupts(struct vtnet_softc *); 227 static void vtnet_disable_tx_interrupts(struct vtnet_softc *); 228 static void vtnet_disable_interrupts(struct vtnet_softc *); 229 230 static int vtnet_tunable_int(struct vtnet_softc *, const char *, int); 231 232 /* Tunables. */ 233 static int vtnet_csum_disable = 0; 234 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); 235 static int vtnet_tso_disable = 0; 236 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); 237 static int vtnet_lro_disable = 0; 238 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); 239 static int vtnet_mq_disable = 0; 240 TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable); 241 static int vtnet_mq_max_pairs = 0; 242 TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs); 243 static int vtnet_rx_process_limit = 512; 244 TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit); 245 246 static uma_zone_t vtnet_tx_header_zone; 247 248 static struct virtio_feature_desc vtnet_feature_desc[] = { 249 { VIRTIO_NET_F_CSUM, "TxChecksum" }, 250 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, 251 { VIRTIO_NET_F_MAC, "MacAddress" }, 252 { VIRTIO_NET_F_GSO, "TxAllGSO" }, 253 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, 254 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, 255 { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, 256 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, 257 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, 258 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, 259 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, 260 { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, 261 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, 262 { VIRTIO_NET_F_STATUS, "Status" }, 263 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, 264 { VIRTIO_NET_F_CTRL_RX, "RxMode" }, 265 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, 266 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, 267 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, 268 { VIRTIO_NET_F_MQ, "Multiqueue" }, 269 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" }, 270 271 { 0, NULL } 272 }; 273 274 static device_method_t vtnet_methods[] = { 275 /* Device methods. */ 276 DEVMETHOD(device_probe, vtnet_probe), 277 DEVMETHOD(device_attach, vtnet_attach), 278 DEVMETHOD(device_detach, vtnet_detach), 279 DEVMETHOD(device_suspend, vtnet_suspend), 280 DEVMETHOD(device_resume, vtnet_resume), 281 DEVMETHOD(device_shutdown, vtnet_shutdown), 282 283 /* VirtIO methods. */ 284 DEVMETHOD(virtio_attach_completed, vtnet_attach_completed), 285 DEVMETHOD(virtio_config_change, vtnet_config_change), 286 287 DEVMETHOD_END 288 }; 289 290 #ifdef DEV_NETMAP 291 #include <dev/netmap/if_vtnet_netmap.h> 292 #endif /* DEV_NETMAP */ 293 294 static driver_t vtnet_driver = { 295 "vtnet", 296 vtnet_methods, 297 sizeof(struct vtnet_softc) 298 }; 299 static devclass_t vtnet_devclass; 300 301 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, 302 vtnet_modevent, 0); 303 MODULE_VERSION(vtnet, 1); 304 MODULE_DEPEND(vtnet, virtio, 1, 1, 1); 305 306 static int 307 vtnet_modevent(module_t mod, int type, void *unused) 308 { 309 int error; 310 311 error = 0; 312 313 switch (type) { 314 case MOD_LOAD: 315 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr", 316 sizeof(struct vtnet_tx_header), 317 NULL, NULL, NULL, NULL, 0, 0); 318 break; 319 case MOD_QUIESCE: 320 case MOD_UNLOAD: 321 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0) 322 error = EBUSY; 323 else if (type == MOD_UNLOAD) { 324 uma_zdestroy(vtnet_tx_header_zone); 325 vtnet_tx_header_zone = NULL; 326 } 327 break; 328 case MOD_SHUTDOWN: 329 break; 330 default: 331 error = EOPNOTSUPP; 332 break; 333 } 334 335 return (error); 336 } 337 338 static int 339 vtnet_probe(device_t dev) 340 { 341 342 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) 343 return (ENXIO); 344 345 device_set_desc(dev, "VirtIO Networking Adapter"); 346 347 return (BUS_PROBE_DEFAULT); 348 } 349 350 static int 351 vtnet_attach(device_t dev) 352 { 353 struct vtnet_softc *sc; 354 int error; 355 356 sc = device_get_softc(dev); 357 sc->vtnet_dev = dev; 358 359 /* Register our feature descriptions. */ 360 virtio_set_feature_desc(dev, vtnet_feature_desc); 361 362 VTNET_CORE_LOCK_INIT(sc); 363 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); 364 365 vtnet_setup_sysctl(sc); 366 vtnet_setup_features(sc); 367 368 error = vtnet_alloc_rx_filters(sc); 369 if (error) { 370 device_printf(dev, "cannot allocate Rx filters\n"); 371 goto fail; 372 } 373 374 error = vtnet_alloc_rxtx_queues(sc); 375 if (error) { 376 device_printf(dev, "cannot allocate queues\n"); 377 goto fail; 378 } 379 380 error = vtnet_alloc_virtqueues(sc); 381 if (error) { 382 device_printf(dev, "cannot allocate virtqueues\n"); 383 goto fail; 384 } 385 386 error = vtnet_setup_interface(sc); 387 if (error) { 388 device_printf(dev, "cannot setup interface\n"); 389 goto fail; 390 } 391 392 error = virtio_setup_intr(dev, INTR_TYPE_NET); 393 if (error) { 394 device_printf(dev, "cannot setup virtqueue interrupts\n"); 395 /* BMV: This will crash if during boot! */ 396 ether_ifdetach(sc->vtnet_ifp); 397 goto fail; 398 } 399 400 #ifdef DEV_NETMAP 401 vtnet_netmap_attach(sc); 402 #endif /* DEV_NETMAP */ 403 404 vtnet_start_taskqueues(sc); 405 406 fail: 407 if (error) 408 vtnet_detach(dev); 409 410 return (error); 411 } 412 413 static int 414 vtnet_detach(device_t dev) 415 { 416 struct vtnet_softc *sc; 417 struct ifnet *ifp; 418 419 sc = device_get_softc(dev); 420 ifp = sc->vtnet_ifp; 421 422 if (device_is_attached(dev)) { 423 VTNET_CORE_LOCK(sc); 424 vtnet_stop(sc); 425 VTNET_CORE_UNLOCK(sc); 426 427 callout_drain(&sc->vtnet_tick_ch); 428 vtnet_drain_taskqueues(sc); 429 430 ether_ifdetach(ifp); 431 } 432 433 #ifdef DEV_NETMAP 434 netmap_detach(ifp); 435 #endif /* DEV_NETMAP */ 436 437 vtnet_free_taskqueues(sc); 438 439 if (sc->vtnet_vlan_attach != NULL) { 440 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); 441 sc->vtnet_vlan_attach = NULL; 442 } 443 if (sc->vtnet_vlan_detach != NULL) { 444 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach); 445 sc->vtnet_vlan_detach = NULL; 446 } 447 448 ifmedia_removeall(&sc->vtnet_media); 449 450 if (ifp != NULL) { 451 if_free(ifp); 452 sc->vtnet_ifp = NULL; 453 } 454 455 vtnet_free_rxtx_queues(sc); 456 vtnet_free_rx_filters(sc); 457 458 if (sc->vtnet_ctrl_vq != NULL) 459 vtnet_free_ctrl_vq(sc); 460 461 VTNET_CORE_LOCK_DESTROY(sc); 462 463 return (0); 464 } 465 466 static int 467 vtnet_suspend(device_t dev) 468 { 469 struct vtnet_softc *sc; 470 471 sc = device_get_softc(dev); 472 473 VTNET_CORE_LOCK(sc); 474 vtnet_stop(sc); 475 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; 476 VTNET_CORE_UNLOCK(sc); 477 478 return (0); 479 } 480 481 static int 482 vtnet_resume(device_t dev) 483 { 484 struct vtnet_softc *sc; 485 struct ifnet *ifp; 486 487 sc = device_get_softc(dev); 488 ifp = sc->vtnet_ifp; 489 490 VTNET_CORE_LOCK(sc); 491 if (ifp->if_flags & IFF_UP) 492 vtnet_init_locked(sc); 493 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; 494 VTNET_CORE_UNLOCK(sc); 495 496 return (0); 497 } 498 499 static int 500 vtnet_shutdown(device_t dev) 501 { 502 503 /* 504 * Suspend already does all of what we need to 505 * do here; we just never expect to be resumed. 506 */ 507 return (vtnet_suspend(dev)); 508 } 509 510 static int 511 vtnet_attach_completed(device_t dev) 512 { 513 514 vtnet_attach_disable_promisc(device_get_softc(dev)); 515 516 return (0); 517 } 518 519 static int 520 vtnet_config_change(device_t dev) 521 { 522 struct vtnet_softc *sc; 523 524 sc = device_get_softc(dev); 525 526 VTNET_CORE_LOCK(sc); 527 vtnet_update_link_status(sc); 528 if (sc->vtnet_link_active != 0) 529 vtnet_tx_start_all(sc); 530 VTNET_CORE_UNLOCK(sc); 531 532 return (0); 533 } 534 535 static void 536 vtnet_negotiate_features(struct vtnet_softc *sc) 537 { 538 device_t dev; 539 uint64_t mask, features; 540 541 dev = sc->vtnet_dev; 542 mask = 0; 543 544 /* 545 * TSO and LRO are only available when their corresponding checksum 546 * offload feature is also negotiated. 547 */ 548 if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) { 549 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; 550 mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES; 551 } 552 if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) 553 mask |= VTNET_TSO_FEATURES; 554 if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) 555 mask |= VTNET_LRO_FEATURES; 556 #ifndef VTNET_LEGACY_TX 557 if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) 558 mask |= VIRTIO_NET_F_MQ; 559 #else 560 mask |= VIRTIO_NET_F_MQ; 561 #endif 562 563 features = VTNET_FEATURES & ~mask; 564 sc->vtnet_features = virtio_negotiate_features(dev, features); 565 566 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && 567 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { 568 /* 569 * LRO without mergeable buffers requires special care. This 570 * is not ideal because every receive buffer must be large 571 * enough to hold the maximum TCP packet, the Ethernet header, 572 * and the header. This requires up to 34 descriptors with 573 * MCLBYTES clusters. If we do not have indirect descriptors, 574 * LRO is disabled since the virtqueue will not contain very 575 * many receive buffers. 576 */ 577 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { 578 device_printf(dev, 579 "LRO disabled due to both mergeable buffers and " 580 "indirect descriptors not negotiated\n"); 581 582 features &= ~VTNET_LRO_FEATURES; 583 sc->vtnet_features = 584 virtio_negotiate_features(dev, features); 585 } else 586 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; 587 } 588 } 589 590 static void 591 vtnet_setup_features(struct vtnet_softc *sc) 592 { 593 device_t dev; 594 int max_pairs, max; 595 596 dev = sc->vtnet_dev; 597 598 vtnet_negotiate_features(sc); 599 600 if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) 601 sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX; 602 603 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { 604 /* This feature should always be negotiated. */ 605 sc->vtnet_flags |= VTNET_FLAG_MAC; 606 } 607 608 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { 609 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; 610 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 611 } else 612 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 613 614 if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) 615 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; 616 else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) 617 sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS; 618 else 619 sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS; 620 621 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || 622 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || 623 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 624 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; 625 else 626 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; 627 628 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { 629 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; 630 631 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 632 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; 633 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) 634 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; 635 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) 636 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; 637 } 638 639 if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) && 640 sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 641 max_pairs = virtio_read_dev_config_2(dev, 642 offsetof(struct virtio_net_config, max_virtqueue_pairs)); 643 if (max_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 644 max_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) 645 max_pairs = 1; 646 } else 647 max_pairs = 1; 648 649 if (max_pairs > 1) { 650 /* 651 * Limit the maximum number of queue pairs to the number of 652 * CPUs or the configured maximum. The actual number of 653 * queues that get used may be less. 654 */ 655 max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); 656 if (max > 0 && max_pairs > max) 657 max_pairs = max; 658 if (max_pairs > mp_ncpus) 659 max_pairs = mp_ncpus; 660 if (max_pairs > VTNET_MAX_QUEUE_PAIRS) 661 max_pairs = VTNET_MAX_QUEUE_PAIRS; 662 if (max_pairs > 1) 663 sc->vtnet_flags |= VTNET_FLAG_MULTIQ; 664 } 665 666 sc->vtnet_max_vq_pairs = max_pairs; 667 } 668 669 static int 670 vtnet_init_rxq(struct vtnet_softc *sc, int id) 671 { 672 struct vtnet_rxq *rxq; 673 674 rxq = &sc->vtnet_rxqs[id]; 675 676 snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d", 677 device_get_nameunit(sc->vtnet_dev), id); 678 mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF); 679 680 rxq->vtnrx_sc = sc; 681 rxq->vtnrx_id = id; 682 683 rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT); 684 if (rxq->vtnrx_sg == NULL) 685 return (ENOMEM); 686 687 TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); 688 rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, 689 taskqueue_thread_enqueue, &rxq->vtnrx_tq); 690 691 return (rxq->vtnrx_tq == NULL ? ENOMEM : 0); 692 } 693 694 static int 695 vtnet_init_txq(struct vtnet_softc *sc, int id) 696 { 697 struct vtnet_txq *txq; 698 699 txq = &sc->vtnet_txqs[id]; 700 701 snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d", 702 device_get_nameunit(sc->vtnet_dev), id); 703 mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF); 704 705 txq->vtntx_sc = sc; 706 txq->vtntx_id = id; 707 708 txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT); 709 if (txq->vtntx_sg == NULL) 710 return (ENOMEM); 711 712 #ifndef VTNET_LEGACY_TX 713 txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF, 714 M_NOWAIT, &txq->vtntx_mtx); 715 if (txq->vtntx_br == NULL) 716 return (ENOMEM); 717 718 TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq); 719 #endif 720 TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq); 721 txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT, 722 taskqueue_thread_enqueue, &txq->vtntx_tq); 723 if (txq->vtntx_tq == NULL) 724 return (ENOMEM); 725 726 return (0); 727 } 728 729 static int 730 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc) 731 { 732 int i, npairs, error; 733 734 npairs = sc->vtnet_max_vq_pairs; 735 736 sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF, 737 M_NOWAIT | M_ZERO); 738 sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF, 739 M_NOWAIT | M_ZERO); 740 if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL) 741 return (ENOMEM); 742 743 for (i = 0; i < npairs; i++) { 744 error = vtnet_init_rxq(sc, i); 745 if (error) 746 return (error); 747 error = vtnet_init_txq(sc, i); 748 if (error) 749 return (error); 750 } 751 752 vtnet_setup_queue_sysctl(sc); 753 754 return (0); 755 } 756 757 static void 758 vtnet_destroy_rxq(struct vtnet_rxq *rxq) 759 { 760 761 rxq->vtnrx_sc = NULL; 762 rxq->vtnrx_id = -1; 763 764 if (rxq->vtnrx_sg != NULL) { 765 sglist_free(rxq->vtnrx_sg); 766 rxq->vtnrx_sg = NULL; 767 } 768 769 if (mtx_initialized(&rxq->vtnrx_mtx) != 0) 770 mtx_destroy(&rxq->vtnrx_mtx); 771 } 772 773 static void 774 vtnet_destroy_txq(struct vtnet_txq *txq) 775 { 776 777 txq->vtntx_sc = NULL; 778 txq->vtntx_id = -1; 779 780 if (txq->vtntx_sg != NULL) { 781 sglist_free(txq->vtntx_sg); 782 txq->vtntx_sg = NULL; 783 } 784 785 #ifndef VTNET_LEGACY_TX 786 if (txq->vtntx_br != NULL) { 787 buf_ring_free(txq->vtntx_br, M_DEVBUF); 788 txq->vtntx_br = NULL; 789 } 790 #endif 791 792 if (mtx_initialized(&txq->vtntx_mtx) != 0) 793 mtx_destroy(&txq->vtntx_mtx); 794 } 795 796 static void 797 vtnet_free_rxtx_queues(struct vtnet_softc *sc) 798 { 799 int i; 800 801 if (sc->vtnet_rxqs != NULL) { 802 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) 803 vtnet_destroy_rxq(&sc->vtnet_rxqs[i]); 804 free(sc->vtnet_rxqs, M_DEVBUF); 805 sc->vtnet_rxqs = NULL; 806 } 807 808 if (sc->vtnet_txqs != NULL) { 809 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) 810 vtnet_destroy_txq(&sc->vtnet_txqs[i]); 811 free(sc->vtnet_txqs, M_DEVBUF); 812 sc->vtnet_txqs = NULL; 813 } 814 } 815 816 static int 817 vtnet_alloc_rx_filters(struct vtnet_softc *sc) 818 { 819 820 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 821 sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter), 822 M_DEVBUF, M_NOWAIT | M_ZERO); 823 if (sc->vtnet_mac_filter == NULL) 824 return (ENOMEM); 825 } 826 827 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 828 sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) * 829 VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO); 830 if (sc->vtnet_vlan_filter == NULL) 831 return (ENOMEM); 832 } 833 834 return (0); 835 } 836 837 static void 838 vtnet_free_rx_filters(struct vtnet_softc *sc) 839 { 840 841 if (sc->vtnet_mac_filter != NULL) { 842 free(sc->vtnet_mac_filter, M_DEVBUF); 843 sc->vtnet_mac_filter = NULL; 844 } 845 846 if (sc->vtnet_vlan_filter != NULL) { 847 free(sc->vtnet_vlan_filter, M_DEVBUF); 848 sc->vtnet_vlan_filter = NULL; 849 } 850 } 851 852 static int 853 vtnet_alloc_virtqueues(struct vtnet_softc *sc) 854 { 855 device_t dev; 856 struct vq_alloc_info *info; 857 struct vtnet_rxq *rxq; 858 struct vtnet_txq *txq; 859 int i, idx, flags, nvqs, error; 860 861 dev = sc->vtnet_dev; 862 flags = 0; 863 864 nvqs = sc->vtnet_max_vq_pairs * 2; 865 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) 866 nvqs++; 867 868 info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); 869 if (info == NULL) 870 return (ENOMEM); 871 872 for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) { 873 rxq = &sc->vtnet_rxqs[i]; 874 VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, 875 vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, 876 "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id); 877 878 txq = &sc->vtnet_txqs[i]; 879 VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, 880 vtnet_tx_vq_intr, txq, &txq->vtntx_vq, 881 "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id); 882 } 883 884 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 885 VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, 886 &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); 887 } 888 889 /* 890 * Enable interrupt binding if this is multiqueue. This only matters 891 * when per-vq MSIX is available. 892 */ 893 if (sc->vtnet_flags & VTNET_FLAG_MULTIQ) 894 flags |= 0; 895 896 error = virtio_alloc_virtqueues(dev, flags, nvqs, info); 897 free(info, M_TEMP); 898 899 return (error); 900 } 901 902 static int 903 vtnet_setup_interface(struct vtnet_softc *sc) 904 { 905 device_t dev; 906 struct ifnet *ifp; 907 908 dev = sc->vtnet_dev; 909 910 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); 911 if (ifp == NULL) { 912 device_printf(dev, "cannot allocate ifnet structure\n"); 913 return (ENOSPC); 914 } 915 916 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 917 ifp->if_baudrate = IF_Gbps(10); /* Approx. */ 918 ifp->if_softc = sc; 919 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 920 ifp->if_init = vtnet_init; 921 ifp->if_ioctl = vtnet_ioctl; 922 ifp->if_get_counter = vtnet_get_counter; 923 #ifndef VTNET_LEGACY_TX 924 ifp->if_transmit = vtnet_txq_mq_start; 925 ifp->if_qflush = vtnet_qflush; 926 #else 927 struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq; 928 ifp->if_start = vtnet_start; 929 IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1); 930 ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1; 931 IFQ_SET_READY(&ifp->if_snd); 932 #endif 933 934 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, 935 vtnet_ifmedia_sts); 936 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); 937 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); 938 939 /* Read (or generate) the MAC address for the adapter. */ 940 vtnet_get_hwaddr(sc); 941 942 ether_ifattach(ifp, sc->vtnet_hwaddr); 943 944 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) 945 ifp->if_capabilities |= IFCAP_LINKSTATE; 946 947 /* Tell the upper layer(s) we support long frames. */ 948 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 949 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 950 951 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { 952 ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6; 953 954 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) { 955 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; 956 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 957 } else { 958 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) 959 ifp->if_capabilities |= IFCAP_TSO4; 960 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 961 ifp->if_capabilities |= IFCAP_TSO6; 962 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) 963 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 964 } 965 966 if (ifp->if_capabilities & IFCAP_TSO) 967 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 968 } 969 970 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { 971 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; 972 973 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || 974 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) 975 ifp->if_capabilities |= IFCAP_LRO; 976 } 977 978 if (ifp->if_capabilities & IFCAP_HWCSUM) { 979 /* 980 * VirtIO does not support VLAN tagging, but we can fake 981 * it by inserting and removing the 802.1Q header during 982 * transmit and receive. We are then able to do checksum 983 * offloading of VLAN frames. 984 */ 985 ifp->if_capabilities |= 986 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 987 } 988 989 ifp->if_capenable = ifp->if_capabilities; 990 991 /* 992 * Capabilities after here are not enabled by default. 993 */ 994 995 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 996 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 997 998 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 999 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 1000 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 1001 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 1002 } 1003 1004 vtnet_set_rx_process_limit(sc); 1005 vtnet_set_tx_intr_threshold(sc); 1006 1007 return (0); 1008 } 1009 1010 static int 1011 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) 1012 { 1013 struct ifnet *ifp; 1014 int frame_size, clsize; 1015 1016 ifp = sc->vtnet_ifp; 1017 1018 if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU) 1019 return (EINVAL); 1020 1021 frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) + 1022 new_mtu; 1023 1024 /* 1025 * Based on the new MTU (and hence frame size) determine which 1026 * cluster size is most appropriate for the receive queues. 1027 */ 1028 if (frame_size <= MCLBYTES) { 1029 clsize = MCLBYTES; 1030 } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1031 /* Avoid going past 9K jumbos. */ 1032 if (frame_size > MJUM9BYTES) 1033 return (EINVAL); 1034 clsize = MJUM9BYTES; 1035 } else 1036 clsize = MJUMPAGESIZE; 1037 1038 ifp->if_mtu = new_mtu; 1039 sc->vtnet_rx_new_clsize = clsize; 1040 1041 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1042 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1043 vtnet_init_locked(sc); 1044 } 1045 1046 return (0); 1047 } 1048 1049 static int 1050 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1051 { 1052 struct vtnet_softc *sc; 1053 struct ifreq *ifr; 1054 int reinit, mask, error; 1055 1056 sc = ifp->if_softc; 1057 ifr = (struct ifreq *) data; 1058 error = 0; 1059 1060 switch (cmd) { 1061 case SIOCSIFMTU: 1062 if (ifp->if_mtu != ifr->ifr_mtu) { 1063 VTNET_CORE_LOCK(sc); 1064 error = vtnet_change_mtu(sc, ifr->ifr_mtu); 1065 VTNET_CORE_UNLOCK(sc); 1066 } 1067 break; 1068 1069 case SIOCSIFFLAGS: 1070 VTNET_CORE_LOCK(sc); 1071 if ((ifp->if_flags & IFF_UP) == 0) { 1072 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1073 vtnet_stop(sc); 1074 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1075 if ((ifp->if_flags ^ sc->vtnet_if_flags) & 1076 (IFF_PROMISC | IFF_ALLMULTI)) { 1077 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) 1078 vtnet_rx_filter(sc); 1079 else 1080 error = ENOTSUP; 1081 } 1082 } else 1083 vtnet_init_locked(sc); 1084 1085 if (error == 0) 1086 sc->vtnet_if_flags = ifp->if_flags; 1087 VTNET_CORE_UNLOCK(sc); 1088 break; 1089 1090 case SIOCADDMULTI: 1091 case SIOCDELMULTI: 1092 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) 1093 break; 1094 VTNET_CORE_LOCK(sc); 1095 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1096 vtnet_rx_filter_mac(sc); 1097 VTNET_CORE_UNLOCK(sc); 1098 break; 1099 1100 case SIOCSIFMEDIA: 1101 case SIOCGIFMEDIA: 1102 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); 1103 break; 1104 1105 case SIOCSIFCAP: 1106 VTNET_CORE_LOCK(sc); 1107 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1108 1109 if (mask & IFCAP_TXCSUM) 1110 ifp->if_capenable ^= IFCAP_TXCSUM; 1111 if (mask & IFCAP_TXCSUM_IPV6) 1112 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1113 if (mask & IFCAP_TSO4) 1114 ifp->if_capenable ^= IFCAP_TSO4; 1115 if (mask & IFCAP_TSO6) 1116 ifp->if_capenable ^= IFCAP_TSO6; 1117 1118 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | 1119 IFCAP_VLAN_HWFILTER)) { 1120 /* These Rx features require us to renegotiate. */ 1121 reinit = 1; 1122 1123 if (mask & IFCAP_RXCSUM) 1124 ifp->if_capenable ^= IFCAP_RXCSUM; 1125 if (mask & IFCAP_RXCSUM_IPV6) 1126 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1127 if (mask & IFCAP_LRO) 1128 ifp->if_capenable ^= IFCAP_LRO; 1129 if (mask & IFCAP_VLAN_HWFILTER) 1130 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 1131 } else 1132 reinit = 0; 1133 1134 if (mask & IFCAP_VLAN_HWTSO) 1135 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1136 if (mask & IFCAP_VLAN_HWTAGGING) 1137 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1138 1139 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1140 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1141 vtnet_init_locked(sc); 1142 } 1143 1144 VTNET_CORE_UNLOCK(sc); 1145 VLAN_CAPABILITIES(ifp); 1146 1147 break; 1148 1149 default: 1150 error = ether_ioctl(ifp, cmd, data); 1151 break; 1152 } 1153 1154 VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc); 1155 1156 return (error); 1157 } 1158 1159 static int 1160 vtnet_rxq_populate(struct vtnet_rxq *rxq) 1161 { 1162 struct virtqueue *vq; 1163 int nbufs, error; 1164 1165 vq = rxq->vtnrx_vq; 1166 error = ENOSPC; 1167 1168 for (nbufs = 0; !virtqueue_full(vq); nbufs++) { 1169 error = vtnet_rxq_new_buf(rxq); 1170 if (error) 1171 break; 1172 } 1173 1174 if (nbufs > 0) { 1175 virtqueue_notify(vq); 1176 /* 1177 * EMSGSIZE signifies the virtqueue did not have enough 1178 * entries available to hold the last mbuf. This is not 1179 * an error. 1180 */ 1181 if (error == EMSGSIZE) 1182 error = 0; 1183 } 1184 1185 return (error); 1186 } 1187 1188 static void 1189 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq) 1190 { 1191 struct virtqueue *vq; 1192 struct mbuf *m; 1193 int last; 1194 1195 vq = rxq->vtnrx_vq; 1196 last = 0; 1197 1198 while ((m = virtqueue_drain(vq, &last)) != NULL) 1199 m_freem(m); 1200 1201 KASSERT(virtqueue_empty(vq), 1202 ("%s: mbufs remaining in rx queue %p", __func__, rxq)); 1203 } 1204 1205 static struct mbuf * 1206 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) 1207 { 1208 struct mbuf *m_head, *m_tail, *m; 1209 int i, clsize; 1210 1211 clsize = sc->vtnet_rx_clsize; 1212 1213 KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1214 ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs)); 1215 1216 m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize); 1217 if (m_head == NULL) 1218 goto fail; 1219 1220 m_head->m_len = clsize; 1221 m_tail = m_head; 1222 1223 /* Allocate the rest of the chain. */ 1224 for (i = 1; i < nbufs; i++) { 1225 m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize); 1226 if (m == NULL) 1227 goto fail; 1228 1229 m->m_len = clsize; 1230 m_tail->m_next = m; 1231 m_tail = m; 1232 } 1233 1234 if (m_tailp != NULL) 1235 *m_tailp = m_tail; 1236 1237 return (m_head); 1238 1239 fail: 1240 sc->vtnet_stats.mbuf_alloc_failed++; 1241 m_freem(m_head); 1242 1243 return (NULL); 1244 } 1245 1246 /* 1247 * Slow path for when LRO without mergeable buffers is negotiated. 1248 */ 1249 static int 1250 vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0, 1251 int len0) 1252 { 1253 struct vtnet_softc *sc; 1254 struct mbuf *m, *m_prev; 1255 struct mbuf *m_new, *m_tail; 1256 int len, clsize, nreplace, error; 1257 1258 sc = rxq->vtnrx_sc; 1259 clsize = sc->vtnet_rx_clsize; 1260 1261 m_prev = NULL; 1262 m_tail = NULL; 1263 nreplace = 0; 1264 1265 m = m0; 1266 len = len0; 1267 1268 /* 1269 * Since these mbuf chains are so large, we avoid allocating an 1270 * entire replacement chain if possible. When the received frame 1271 * did not consume the entire chain, the unused mbufs are moved 1272 * to the replacement chain. 1273 */ 1274 while (len > 0) { 1275 /* 1276 * Something is seriously wrong if we received a frame 1277 * larger than the chain. Drop it. 1278 */ 1279 if (m == NULL) { 1280 sc->vtnet_stats.rx_frame_too_large++; 1281 return (EMSGSIZE); 1282 } 1283 1284 /* We always allocate the same cluster size. */ 1285 KASSERT(m->m_len == clsize, 1286 ("%s: mbuf size %d is not the cluster size %d", 1287 __func__, m->m_len, clsize)); 1288 1289 m->m_len = MIN(m->m_len, len); 1290 len -= m->m_len; 1291 1292 m_prev = m; 1293 m = m->m_next; 1294 nreplace++; 1295 } 1296 1297 KASSERT(nreplace <= sc->vtnet_rx_nmbufs, 1298 ("%s: too many replacement mbufs %d max %d", __func__, nreplace, 1299 sc->vtnet_rx_nmbufs)); 1300 1301 m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); 1302 if (m_new == NULL) { 1303 m_prev->m_len = clsize; 1304 return (ENOBUFS); 1305 } 1306 1307 /* 1308 * Move any unused mbufs from the received chain onto the end 1309 * of the new chain. 1310 */ 1311 if (m_prev->m_next != NULL) { 1312 m_tail->m_next = m_prev->m_next; 1313 m_prev->m_next = NULL; 1314 } 1315 1316 error = vtnet_rxq_enqueue_buf(rxq, m_new); 1317 if (error) { 1318 /* 1319 * BAD! We could not enqueue the replacement mbuf chain. We 1320 * must restore the m0 chain to the original state if it was 1321 * modified so we can subsequently discard it. 1322 * 1323 * NOTE: The replacement is suppose to be an identical copy 1324 * to the one just dequeued so this is an unexpected error. 1325 */ 1326 sc->vtnet_stats.rx_enq_replacement_failed++; 1327 1328 if (m_tail->m_next != NULL) { 1329 m_prev->m_next = m_tail->m_next; 1330 m_tail->m_next = NULL; 1331 } 1332 1333 m_prev->m_len = clsize; 1334 m_freem(m_new); 1335 } 1336 1337 return (error); 1338 } 1339 1340 static int 1341 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len) 1342 { 1343 struct vtnet_softc *sc; 1344 struct mbuf *m_new; 1345 int error; 1346 1347 sc = rxq->vtnrx_sc; 1348 1349 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, 1350 ("%s: chained mbuf without LRO_NOMRG", __func__)); 1351 1352 if (m->m_next == NULL) { 1353 /* Fast-path for the common case of just one mbuf. */ 1354 if (m->m_len < len) 1355 return (EINVAL); 1356 1357 m_new = vtnet_rx_alloc_buf(sc, 1, NULL); 1358 if (m_new == NULL) 1359 return (ENOBUFS); 1360 1361 error = vtnet_rxq_enqueue_buf(rxq, m_new); 1362 if (error) { 1363 /* 1364 * The new mbuf is suppose to be an identical 1365 * copy of the one just dequeued so this is an 1366 * unexpected error. 1367 */ 1368 m_freem(m_new); 1369 sc->vtnet_stats.rx_enq_replacement_failed++; 1370 } else 1371 m->m_len = len; 1372 } else 1373 error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len); 1374 1375 return (error); 1376 } 1377 1378 static int 1379 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m) 1380 { 1381 struct vtnet_softc *sc; 1382 struct sglist *sg; 1383 struct vtnet_rx_header *rxhdr; 1384 uint8_t *mdata; 1385 int offset, error; 1386 1387 sc = rxq->vtnrx_sc; 1388 sg = rxq->vtnrx_sg; 1389 mdata = mtod(m, uint8_t *); 1390 1391 VTNET_RXQ_LOCK_ASSERT(rxq); 1392 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, 1393 ("%s: chained mbuf without LRO_NOMRG", __func__)); 1394 KASSERT(m->m_len == sc->vtnet_rx_clsize, 1395 ("%s: unexpected cluster size %d/%d", __func__, m->m_len, 1396 sc->vtnet_rx_clsize)); 1397 1398 sglist_reset(sg); 1399 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1400 MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); 1401 rxhdr = (struct vtnet_rx_header *) mdata; 1402 sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); 1403 offset = sizeof(struct vtnet_rx_header); 1404 } else 1405 offset = 0; 1406 1407 sglist_append(sg, mdata + offset, m->m_len - offset); 1408 if (m->m_next != NULL) { 1409 error = sglist_append_mbuf(sg, m->m_next); 1410 MPASS(error == 0); 1411 } 1412 1413 error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg); 1414 1415 return (error); 1416 } 1417 1418 static int 1419 vtnet_rxq_new_buf(struct vtnet_rxq *rxq) 1420 { 1421 struct vtnet_softc *sc; 1422 struct mbuf *m; 1423 int error; 1424 1425 sc = rxq->vtnrx_sc; 1426 1427 m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL); 1428 if (m == NULL) 1429 return (ENOBUFS); 1430 1431 error = vtnet_rxq_enqueue_buf(rxq, m); 1432 if (error) 1433 m_freem(m); 1434 1435 return (error); 1436 } 1437 1438 /* 1439 * Use the checksum offset in the VirtIO header to set the 1440 * correct CSUM_* flags. 1441 */ 1442 static int 1443 vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m, 1444 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) 1445 { 1446 struct vtnet_softc *sc; 1447 #if defined(INET) || defined(INET6) 1448 int offset = hdr->csum_start + hdr->csum_offset; 1449 #endif 1450 1451 sc = rxq->vtnrx_sc; 1452 1453 /* Only do a basic sanity check on the offset. */ 1454 switch (eth_type) { 1455 #if defined(INET) 1456 case ETHERTYPE_IP: 1457 if (__predict_false(offset < ip_start + sizeof(struct ip))) 1458 return (1); 1459 break; 1460 #endif 1461 #if defined(INET6) 1462 case ETHERTYPE_IPV6: 1463 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1464 return (1); 1465 break; 1466 #endif 1467 default: 1468 sc->vtnet_stats.rx_csum_bad_ethtype++; 1469 return (1); 1470 } 1471 1472 /* 1473 * Use the offset to determine the appropriate CSUM_* flags. This is 1474 * a bit dirty, but we can get by with it since the checksum offsets 1475 * happen to be different. We assume the host host does not do IPv4 1476 * header checksum offloading. 1477 */ 1478 switch (hdr->csum_offset) { 1479 case offsetof(struct udphdr, uh_sum): 1480 case offsetof(struct tcphdr, th_sum): 1481 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1482 m->m_pkthdr.csum_data = 0xFFFF; 1483 break; 1484 case offsetof(struct sctphdr, checksum): 1485 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1486 break; 1487 default: 1488 sc->vtnet_stats.rx_csum_bad_offset++; 1489 return (1); 1490 } 1491 1492 return (0); 1493 } 1494 1495 static int 1496 vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m, 1497 uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) 1498 { 1499 struct vtnet_softc *sc; 1500 int offset, proto; 1501 1502 sc = rxq->vtnrx_sc; 1503 1504 switch (eth_type) { 1505 #if defined(INET) 1506 case ETHERTYPE_IP: { 1507 struct ip *ip; 1508 if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1509 return (1); 1510 ip = (struct ip *)(m->m_data + ip_start); 1511 proto = ip->ip_p; 1512 offset = ip_start + (ip->ip_hl << 2); 1513 break; 1514 } 1515 #endif 1516 #if defined(INET6) 1517 case ETHERTYPE_IPV6: 1518 if (__predict_false(m->m_len < ip_start + 1519 sizeof(struct ip6_hdr))) 1520 return (1); 1521 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1522 if (__predict_false(offset < 0)) 1523 return (1); 1524 break; 1525 #endif 1526 default: 1527 sc->vtnet_stats.rx_csum_bad_ethtype++; 1528 return (1); 1529 } 1530 1531 switch (proto) { 1532 case IPPROTO_TCP: 1533 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1534 return (1); 1535 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1536 m->m_pkthdr.csum_data = 0xFFFF; 1537 break; 1538 case IPPROTO_UDP: 1539 if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1540 return (1); 1541 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1542 m->m_pkthdr.csum_data = 0xFFFF; 1543 break; 1544 case IPPROTO_SCTP: 1545 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) 1546 return (1); 1547 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1548 break; 1549 default: 1550 /* 1551 * For the remaining protocols, FreeBSD does not support 1552 * checksum offloading, so the checksum will be recomputed. 1553 */ 1554 #if 0 1555 if_printf(sc->vtnet_ifp, "cksum offload of unsupported " 1556 "protocol eth_type=%#x proto=%d csum_start=%d " 1557 "csum_offset=%d\n", __func__, eth_type, proto, 1558 hdr->csum_start, hdr->csum_offset); 1559 #endif 1560 break; 1561 } 1562 1563 return (0); 1564 } 1565 1566 /* 1567 * Set the appropriate CSUM_* flags. Unfortunately, the information 1568 * provided is not directly useful to us. The VirtIO header gives the 1569 * offset of the checksum, which is all Linux needs, but this is not 1570 * how FreeBSD does things. We are forced to peek inside the packet 1571 * a bit. 1572 * 1573 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1574 * could accept the offsets and let the stack figure it out. 1575 */ 1576 static int 1577 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, 1578 struct virtio_net_hdr *hdr) 1579 { 1580 struct ether_header *eh; 1581 struct ether_vlan_header *evh; 1582 uint16_t eth_type; 1583 int offset, error; 1584 1585 eh = mtod(m, struct ether_header *); 1586 eth_type = ntohs(eh->ether_type); 1587 if (eth_type == ETHERTYPE_VLAN) { 1588 /* BMV: We should handle nested VLAN tags too. */ 1589 evh = mtod(m, struct ether_vlan_header *); 1590 eth_type = ntohs(evh->evl_proto); 1591 offset = sizeof(struct ether_vlan_header); 1592 } else 1593 offset = sizeof(struct ether_header); 1594 1595 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1596 error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr); 1597 else 1598 error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr); 1599 1600 return (error); 1601 } 1602 1603 static void 1604 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs) 1605 { 1606 struct mbuf *m; 1607 1608 while (--nbufs > 0) { 1609 m = virtqueue_dequeue(rxq->vtnrx_vq, NULL); 1610 if (m == NULL) 1611 break; 1612 vtnet_rxq_discard_buf(rxq, m); 1613 } 1614 } 1615 1616 static void 1617 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m) 1618 { 1619 int error; 1620 1621 /* 1622 * Requeue the discarded mbuf. This should always be successful 1623 * since it was just dequeued. 1624 */ 1625 error = vtnet_rxq_enqueue_buf(rxq, m); 1626 KASSERT(error == 0, 1627 ("%s: cannot requeue discarded mbuf %d", __func__, error)); 1628 } 1629 1630 static int 1631 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs) 1632 { 1633 struct vtnet_softc *sc; 1634 struct ifnet *ifp; 1635 struct virtqueue *vq; 1636 struct mbuf *m, *m_tail; 1637 int len; 1638 1639 sc = rxq->vtnrx_sc; 1640 vq = rxq->vtnrx_vq; 1641 ifp = sc->vtnet_ifp; 1642 m_tail = m_head; 1643 1644 while (--nbufs > 0) { 1645 m = virtqueue_dequeue(vq, &len); 1646 if (m == NULL) { 1647 rxq->vtnrx_stats.vrxs_ierrors++; 1648 goto fail; 1649 } 1650 1651 if (vtnet_rxq_new_buf(rxq) != 0) { 1652 rxq->vtnrx_stats.vrxs_iqdrops++; 1653 vtnet_rxq_discard_buf(rxq, m); 1654 if (nbufs > 1) 1655 vtnet_rxq_discard_merged_bufs(rxq, nbufs); 1656 goto fail; 1657 } 1658 1659 if (m->m_len < len) 1660 len = m->m_len; 1661 1662 m->m_len = len; 1663 m->m_flags &= ~M_PKTHDR; 1664 1665 m_head->m_pkthdr.len += len; 1666 m_tail->m_next = m; 1667 m_tail = m; 1668 } 1669 1670 return (0); 1671 1672 fail: 1673 sc->vtnet_stats.rx_mergeable_failed++; 1674 m_freem(m_head); 1675 1676 return (1); 1677 } 1678 1679 static void 1680 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, 1681 struct virtio_net_hdr *hdr) 1682 { 1683 struct vtnet_softc *sc; 1684 struct ifnet *ifp; 1685 struct ether_header *eh; 1686 1687 sc = rxq->vtnrx_sc; 1688 ifp = sc->vtnet_ifp; 1689 1690 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1691 eh = mtod(m, struct ether_header *); 1692 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1693 vtnet_vlan_tag_remove(m); 1694 /* 1695 * With the 802.1Q header removed, update the 1696 * checksum starting location accordingly. 1697 */ 1698 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1699 hdr->csum_start -= ETHER_VLAN_ENCAP_LEN; 1700 } 1701 } 1702 1703 m->m_pkthdr.flowid = rxq->vtnrx_id; 1704 m->m_flags |= M_FLOWID; 1705 1706 /* 1707 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum 1708 * distinction that Linux does. Need to reevaluate if performing 1709 * offloading for the NEEDS_CSUM case is really appropriate. 1710 */ 1711 if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | 1712 VIRTIO_NET_HDR_F_DATA_VALID)) { 1713 if (vtnet_rxq_csum(rxq, m, hdr) == 0) 1714 rxq->vtnrx_stats.vrxs_csum++; 1715 else 1716 rxq->vtnrx_stats.vrxs_csum_failed++; 1717 } 1718 1719 rxq->vtnrx_stats.vrxs_ipackets++; 1720 rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; 1721 1722 VTNET_RXQ_UNLOCK(rxq); 1723 (*ifp->if_input)(ifp, m); 1724 VTNET_RXQ_LOCK(rxq); 1725 } 1726 1727 static int 1728 vtnet_rxq_eof(struct vtnet_rxq *rxq) 1729 { 1730 struct virtio_net_hdr lhdr, *hdr; 1731 struct vtnet_softc *sc; 1732 struct ifnet *ifp; 1733 struct virtqueue *vq; 1734 struct mbuf *m; 1735 struct virtio_net_hdr_mrg_rxbuf *mhdr; 1736 int len, deq, nbufs, adjsz, count; 1737 1738 sc = rxq->vtnrx_sc; 1739 vq = rxq->vtnrx_vq; 1740 ifp = sc->vtnet_ifp; 1741 hdr = &lhdr; 1742 deq = 0; 1743 count = sc->vtnet_rx_process_limit; 1744 1745 VTNET_RXQ_LOCK_ASSERT(rxq); 1746 1747 #ifdef DEV_NETMAP 1748 if (netmap_rx_irq(ifp, 0, &deq)) { 1749 return (FALSE); 1750 } 1751 #endif /* DEV_NETMAP */ 1752 1753 while (count-- > 0) { 1754 m = virtqueue_dequeue(vq, &len); 1755 if (m == NULL) 1756 break; 1757 deq++; 1758 1759 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { 1760 rxq->vtnrx_stats.vrxs_ierrors++; 1761 vtnet_rxq_discard_buf(rxq, m); 1762 continue; 1763 } 1764 1765 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1766 nbufs = 1; 1767 adjsz = sizeof(struct vtnet_rx_header); 1768 /* 1769 * Account for our pad inserted between the header 1770 * and the actual start of the frame. 1771 */ 1772 len += VTNET_RX_HEADER_PAD; 1773 } else { 1774 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); 1775 nbufs = mhdr->num_buffers; 1776 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1777 } 1778 1779 if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { 1780 rxq->vtnrx_stats.vrxs_iqdrops++; 1781 vtnet_rxq_discard_buf(rxq, m); 1782 if (nbufs > 1) 1783 vtnet_rxq_discard_merged_bufs(rxq, nbufs); 1784 continue; 1785 } 1786 1787 m->m_pkthdr.len = len; 1788 m->m_pkthdr.rcvif = ifp; 1789 m->m_pkthdr.csum_flags = 0; 1790 1791 if (nbufs > 1) { 1792 /* Dequeue the rest of chain. */ 1793 if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0) 1794 continue; 1795 } 1796 1797 /* 1798 * Save copy of header before we strip it. For both mergeable 1799 * and non-mergeable, the header is at the beginning of the 1800 * mbuf data. We no longer need num_buffers, so always use a 1801 * regular header. 1802 * 1803 * BMV: Is this memcpy() expensive? We know the mbuf data is 1804 * still valid even after the m_adj(). 1805 */ 1806 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); 1807 m_adj(m, adjsz); 1808 1809 vtnet_rxq_input(rxq, m, hdr); 1810 1811 /* Must recheck after dropping the Rx lock. */ 1812 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1813 break; 1814 } 1815 1816 if (deq > 0) 1817 virtqueue_notify(vq); 1818 1819 return (count > 0 ? 0 : EAGAIN); 1820 } 1821 1822 static void 1823 vtnet_rx_vq_intr(void *xrxq) 1824 { 1825 struct vtnet_softc *sc; 1826 struct vtnet_rxq *rxq; 1827 struct ifnet *ifp; 1828 int tries, more; 1829 1830 rxq = xrxq; 1831 sc = rxq->vtnrx_sc; 1832 ifp = sc->vtnet_ifp; 1833 tries = 0; 1834 1835 if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) { 1836 /* 1837 * Ignore this interrupt. Either this is a spurious interrupt 1838 * or multiqueue without per-VQ MSIX so every queue needs to 1839 * be polled (a brain dead configuration we could try harder 1840 * to avoid). 1841 */ 1842 vtnet_rxq_disable_intr(rxq); 1843 return; 1844 } 1845 1846 VTNET_RXQ_LOCK(rxq); 1847 1848 again: 1849 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1850 VTNET_RXQ_UNLOCK(rxq); 1851 return; 1852 } 1853 1854 more = vtnet_rxq_eof(rxq); 1855 if (more || vtnet_rxq_enable_intr(rxq) != 0) { 1856 if (!more) 1857 vtnet_rxq_disable_intr(rxq); 1858 /* 1859 * This is an occasional condition or race (when !more), 1860 * so retry a few times before scheduling the taskqueue. 1861 */ 1862 if (tries++ < VTNET_INTR_DISABLE_RETRIES) 1863 goto again; 1864 1865 VTNET_RXQ_UNLOCK(rxq); 1866 rxq->vtnrx_stats.vrxs_rescheduled++; 1867 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); 1868 } else 1869 VTNET_RXQ_UNLOCK(rxq); 1870 } 1871 1872 static void 1873 vtnet_rxq_tq_intr(void *xrxq, int pending) 1874 { 1875 struct vtnet_softc *sc; 1876 struct vtnet_rxq *rxq; 1877 struct ifnet *ifp; 1878 int more; 1879 1880 rxq = xrxq; 1881 sc = rxq->vtnrx_sc; 1882 ifp = sc->vtnet_ifp; 1883 1884 VTNET_RXQ_LOCK(rxq); 1885 1886 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1887 VTNET_RXQ_UNLOCK(rxq); 1888 return; 1889 } 1890 1891 more = vtnet_rxq_eof(rxq); 1892 if (more || vtnet_rxq_enable_intr(rxq) != 0) { 1893 if (!more) 1894 vtnet_rxq_disable_intr(rxq); 1895 rxq->vtnrx_stats.vrxs_rescheduled++; 1896 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); 1897 } 1898 1899 VTNET_RXQ_UNLOCK(rxq); 1900 } 1901 1902 static int 1903 vtnet_txq_below_threshold(struct vtnet_txq *txq) 1904 { 1905 struct vtnet_softc *sc; 1906 struct virtqueue *vq; 1907 1908 sc = txq->vtntx_sc; 1909 vq = txq->vtntx_vq; 1910 1911 return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh); 1912 } 1913 1914 static int 1915 vtnet_txq_notify(struct vtnet_txq *txq) 1916 { 1917 struct virtqueue *vq; 1918 1919 vq = txq->vtntx_vq; 1920 1921 txq->vtntx_watchdog = VTNET_TX_TIMEOUT; 1922 virtqueue_notify(vq); 1923 1924 if (vtnet_txq_enable_intr(txq) == 0) 1925 return (0); 1926 1927 /* 1928 * Drain frames that were completed since last checked. If this 1929 * causes the queue to go above the threshold, the caller should 1930 * continue transmitting. 1931 */ 1932 if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) { 1933 virtqueue_disable_intr(vq); 1934 return (1); 1935 } 1936 1937 return (0); 1938 } 1939 1940 static void 1941 vtnet_txq_free_mbufs(struct vtnet_txq *txq) 1942 { 1943 struct virtqueue *vq; 1944 struct vtnet_tx_header *txhdr; 1945 int last; 1946 1947 vq = txq->vtntx_vq; 1948 last = 0; 1949 1950 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { 1951 m_freem(txhdr->vth_mbuf); 1952 uma_zfree(vtnet_tx_header_zone, txhdr); 1953 } 1954 1955 KASSERT(virtqueue_empty(vq), 1956 ("%s: mbufs remaining in tx queue %p", __func__, txq)); 1957 } 1958 1959 /* 1960 * BMV: Much of this can go away once we finally have offsets in 1961 * the mbuf packet header. Bug andre@. 1962 */ 1963 static int 1964 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, 1965 int *etype, int *proto, int *start) 1966 { 1967 struct vtnet_softc *sc; 1968 struct ether_vlan_header *evh; 1969 int offset; 1970 1971 sc = txq->vtntx_sc; 1972 1973 evh = mtod(m, struct ether_vlan_header *); 1974 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1975 /* BMV: We should handle nested VLAN tags too. */ 1976 *etype = ntohs(evh->evl_proto); 1977 offset = sizeof(struct ether_vlan_header); 1978 } else { 1979 *etype = ntohs(evh->evl_encap_proto); 1980 offset = sizeof(struct ether_header); 1981 } 1982 1983 switch (*etype) { 1984 #if defined(INET) 1985 case ETHERTYPE_IP: { 1986 struct ip *ip, iphdr; 1987 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1988 m_copydata(m, offset, sizeof(struct ip), 1989 (caddr_t) &iphdr); 1990 ip = &iphdr; 1991 } else 1992 ip = (struct ip *)(m->m_data + offset); 1993 *proto = ip->ip_p; 1994 *start = offset + (ip->ip_hl << 2); 1995 break; 1996 } 1997 #endif 1998 #if defined(INET6) 1999 case ETHERTYPE_IPV6: 2000 *proto = -1; 2001 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 2002 /* Assert the network stack sent us a valid packet. */ 2003 KASSERT(*start > offset, 2004 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 2005 *start, offset, *proto)); 2006 break; 2007 #endif 2008 default: 2009 sc->vtnet_stats.tx_csum_bad_ethtype++; 2010 return (EINVAL); 2011 } 2012 2013 return (0); 2014 } 2015 2016 static int 2017 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type, 2018 int offset, struct virtio_net_hdr *hdr) 2019 { 2020 static struct timeval lastecn; 2021 static int curecn; 2022 struct vtnet_softc *sc; 2023 struct tcphdr *tcp, tcphdr; 2024 2025 sc = txq->vtntx_sc; 2026 2027 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 2028 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 2029 tcp = &tcphdr; 2030 } else 2031 tcp = (struct tcphdr *)(m->m_data + offset); 2032 2033 hdr->hdr_len = offset + (tcp->th_off << 2); 2034 hdr->gso_size = m->m_pkthdr.tso_segsz; 2035 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 2036 VIRTIO_NET_HDR_GSO_TCPV6; 2037 2038 if (tcp->th_flags & TH_CWR) { 2039 /* 2040 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 2041 * ECN support is not on a per-interface basis, but globally via 2042 * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 2043 */ 2044 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { 2045 if (ppsratecheck(&lastecn, &curecn, 1)) 2046 if_printf(sc->vtnet_ifp, 2047 "TSO with ECN not negotiated with host\n"); 2048 return (ENOTSUP); 2049 } 2050 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 2051 } 2052 2053 txq->vtntx_stats.vtxs_tso++; 2054 2055 return (0); 2056 } 2057 2058 static struct mbuf * 2059 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m, 2060 struct virtio_net_hdr *hdr) 2061 { 2062 struct vtnet_softc *sc; 2063 int flags, etype, csum_start, proto, error; 2064 2065 sc = txq->vtntx_sc; 2066 flags = m->m_pkthdr.csum_flags; 2067 2068 error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start); 2069 if (error) 2070 goto drop; 2071 2072 if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) || 2073 (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) { 2074 /* 2075 * We could compare the IP protocol vs the CSUM_ flag too, 2076 * but that really should not be necessary. 2077 */ 2078 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 2079 hdr->csum_start = csum_start; 2080 hdr->csum_offset = m->m_pkthdr.csum_data; 2081 txq->vtntx_stats.vtxs_csum++; 2082 } 2083 2084 if (flags & CSUM_TSO) { 2085 if (__predict_false(proto != IPPROTO_TCP)) { 2086 /* Likely failed to correctly parse the mbuf. */ 2087 sc->vtnet_stats.tx_tso_not_tcp++; 2088 goto drop; 2089 } 2090 2091 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 2092 ("%s: mbuf %p TSO without checksum offload %#x", 2093 __func__, m, flags)); 2094 2095 error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); 2096 if (error) 2097 goto drop; 2098 } 2099 2100 return (m); 2101 2102 drop: 2103 m_freem(m); 2104 return (NULL); 2105 } 2106 2107 static int 2108 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, 2109 struct vtnet_tx_header *txhdr) 2110 { 2111 struct vtnet_softc *sc; 2112 struct virtqueue *vq; 2113 struct sglist *sg; 2114 struct mbuf *m; 2115 int error; 2116 2117 sc = txq->vtntx_sc; 2118 vq = txq->vtntx_vq; 2119 sg = txq->vtntx_sg; 2120 m = *m_head; 2121 2122 sglist_reset(sg); 2123 error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); 2124 KASSERT(error == 0 && sg->sg_nseg == 1, 2125 ("%s: error %d adding header to sglist", __func__, error)); 2126 2127 error = sglist_append_mbuf(sg, m); 2128 if (error) { 2129 m = m_defrag(m, M_NOWAIT); 2130 if (m == NULL) 2131 goto fail; 2132 2133 *m_head = m; 2134 sc->vtnet_stats.tx_defragged++; 2135 2136 error = sglist_append_mbuf(sg, m); 2137 if (error) 2138 goto fail; 2139 } 2140 2141 txhdr->vth_mbuf = m; 2142 error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0); 2143 2144 return (error); 2145 2146 fail: 2147 sc->vtnet_stats.tx_defrag_failed++; 2148 m_freem(*m_head); 2149 *m_head = NULL; 2150 2151 return (ENOBUFS); 2152 } 2153 2154 static int 2155 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head) 2156 { 2157 struct vtnet_tx_header *txhdr; 2158 struct virtio_net_hdr *hdr; 2159 struct mbuf *m; 2160 int error; 2161 2162 m = *m_head; 2163 M_ASSERTPKTHDR(m); 2164 2165 txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO); 2166 if (txhdr == NULL) { 2167 m_freem(m); 2168 *m_head = NULL; 2169 return (ENOMEM); 2170 } 2171 2172 /* 2173 * Always use the non-mergeable header, regardless if the feature 2174 * was negotiated. For transmit, num_buffers is always zero. The 2175 * vtnet_hdr_size is used to enqueue the correct header size. 2176 */ 2177 hdr = &txhdr->vth_uhdr.hdr; 2178 2179 if (m->m_flags & M_VLANTAG) { 2180 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 2181 if ((*m_head = m) == NULL) { 2182 error = ENOBUFS; 2183 goto fail; 2184 } 2185 m->m_flags &= ~M_VLANTAG; 2186 } 2187 2188 if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) { 2189 m = vtnet_txq_offload(txq, m, hdr); 2190 if ((*m_head = m) == NULL) { 2191 error = ENOBUFS; 2192 goto fail; 2193 } 2194 } 2195 2196 error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); 2197 if (error == 0) 2198 return (0); 2199 2200 fail: 2201 uma_zfree(vtnet_tx_header_zone, txhdr); 2202 2203 return (error); 2204 } 2205 2206 #ifdef VTNET_LEGACY_TX 2207 2208 static void 2209 vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp) 2210 { 2211 struct vtnet_softc *sc; 2212 struct virtqueue *vq; 2213 struct mbuf *m0; 2214 int tries, enq; 2215 2216 sc = txq->vtntx_sc; 2217 vq = txq->vtntx_vq; 2218 tries = 0; 2219 2220 VTNET_TXQ_LOCK_ASSERT(txq); 2221 2222 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2223 sc->vtnet_link_active == 0) 2224 return; 2225 2226 vtnet_txq_eof(txq); 2227 2228 again: 2229 enq = 0; 2230 2231 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 2232 if (virtqueue_full(vq)) 2233 break; 2234 2235 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2236 if (m0 == NULL) 2237 break; 2238 2239 if (vtnet_txq_encap(txq, &m0) != 0) { 2240 if (m0 != NULL) 2241 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2242 break; 2243 } 2244 2245 enq++; 2246 ETHER_BPF_MTAP(ifp, m0); 2247 } 2248 2249 if (enq > 0 && vtnet_txq_notify(txq) != 0) { 2250 if (tries++ < VTNET_NOTIFY_RETRIES) 2251 goto again; 2252 2253 txq->vtntx_stats.vtxs_rescheduled++; 2254 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); 2255 } 2256 } 2257 2258 static void 2259 vtnet_start(struct ifnet *ifp) 2260 { 2261 struct vtnet_softc *sc; 2262 struct vtnet_txq *txq; 2263 2264 sc = ifp->if_softc; 2265 txq = &sc->vtnet_txqs[0]; 2266 2267 VTNET_TXQ_LOCK(txq); 2268 vtnet_start_locked(txq, ifp); 2269 VTNET_TXQ_UNLOCK(txq); 2270 } 2271 2272 #else /* !VTNET_LEGACY_TX */ 2273 2274 static int 2275 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m) 2276 { 2277 struct vtnet_softc *sc; 2278 struct virtqueue *vq; 2279 struct buf_ring *br; 2280 struct ifnet *ifp; 2281 int enq, tries, error; 2282 2283 sc = txq->vtntx_sc; 2284 vq = txq->vtntx_vq; 2285 br = txq->vtntx_br; 2286 ifp = sc->vtnet_ifp; 2287 tries = 0; 2288 error = 0; 2289 2290 VTNET_TXQ_LOCK_ASSERT(txq); 2291 2292 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2293 sc->vtnet_link_active == 0) { 2294 if (m != NULL) 2295 error = drbr_enqueue(ifp, br, m); 2296 return (error); 2297 } 2298 2299 if (m != NULL) { 2300 error = drbr_enqueue(ifp, br, m); 2301 if (error) 2302 return (error); 2303 } 2304 2305 vtnet_txq_eof(txq); 2306 2307 again: 2308 enq = 0; 2309 2310 while ((m = drbr_peek(ifp, br)) != NULL) { 2311 if (virtqueue_full(vq)) { 2312 drbr_putback(ifp, br, m); 2313 break; 2314 } 2315 2316 if (vtnet_txq_encap(txq, &m) != 0) { 2317 if (m != NULL) 2318 drbr_putback(ifp, br, m); 2319 else 2320 drbr_advance(ifp, br); 2321 break; 2322 } 2323 drbr_advance(ifp, br); 2324 2325 enq++; 2326 ETHER_BPF_MTAP(ifp, m); 2327 } 2328 2329 if (enq > 0 && vtnet_txq_notify(txq) != 0) { 2330 if (tries++ < VTNET_NOTIFY_RETRIES) 2331 goto again; 2332 2333 txq->vtntx_stats.vtxs_rescheduled++; 2334 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); 2335 } 2336 2337 return (0); 2338 } 2339 2340 static int 2341 vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m) 2342 { 2343 struct vtnet_softc *sc; 2344 struct vtnet_txq *txq; 2345 int i, npairs, error; 2346 2347 sc = ifp->if_softc; 2348 npairs = sc->vtnet_act_vq_pairs; 2349 2350 if (m->m_flags & M_FLOWID) 2351 i = m->m_pkthdr.flowid % npairs; 2352 else 2353 i = curcpu % npairs; 2354 2355 txq = &sc->vtnet_txqs[i]; 2356 2357 if (VTNET_TXQ_TRYLOCK(txq) != 0) { 2358 error = vtnet_txq_mq_start_locked(txq, m); 2359 VTNET_TXQ_UNLOCK(txq); 2360 } else { 2361 error = drbr_enqueue(ifp, txq->vtntx_br, m); 2362 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask); 2363 } 2364 2365 return (error); 2366 } 2367 2368 static void 2369 vtnet_txq_tq_deferred(void *xtxq, int pending) 2370 { 2371 struct vtnet_softc *sc; 2372 struct vtnet_txq *txq; 2373 2374 txq = xtxq; 2375 sc = txq->vtntx_sc; 2376 2377 VTNET_TXQ_LOCK(txq); 2378 if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br)) 2379 vtnet_txq_mq_start_locked(txq, NULL); 2380 VTNET_TXQ_UNLOCK(txq); 2381 } 2382 2383 #endif /* VTNET_LEGACY_TX */ 2384 2385 static void 2386 vtnet_txq_start(struct vtnet_txq *txq) 2387 { 2388 struct vtnet_softc *sc; 2389 struct ifnet *ifp; 2390 2391 sc = txq->vtntx_sc; 2392 ifp = sc->vtnet_ifp; 2393 2394 #ifdef VTNET_LEGACY_TX 2395 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2396 vtnet_start_locked(txq, ifp); 2397 #else 2398 if (!drbr_empty(ifp, txq->vtntx_br)) 2399 vtnet_txq_mq_start_locked(txq, NULL); 2400 #endif 2401 } 2402 2403 static void 2404 vtnet_txq_tq_intr(void *xtxq, int pending) 2405 { 2406 struct vtnet_softc *sc; 2407 struct vtnet_txq *txq; 2408 struct ifnet *ifp; 2409 2410 txq = xtxq; 2411 sc = txq->vtntx_sc; 2412 ifp = sc->vtnet_ifp; 2413 2414 VTNET_TXQ_LOCK(txq); 2415 2416 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2417 VTNET_TXQ_UNLOCK(txq); 2418 return; 2419 } 2420 2421 vtnet_txq_eof(txq); 2422 vtnet_txq_start(txq); 2423 2424 VTNET_TXQ_UNLOCK(txq); 2425 } 2426 2427 static int 2428 vtnet_txq_eof(struct vtnet_txq *txq) 2429 { 2430 struct virtqueue *vq; 2431 struct vtnet_tx_header *txhdr; 2432 struct mbuf *m; 2433 int deq; 2434 2435 vq = txq->vtntx_vq; 2436 deq = 0; 2437 VTNET_TXQ_LOCK_ASSERT(txq); 2438 2439 #ifdef DEV_NETMAP 2440 if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) { 2441 virtqueue_disable_intr(vq); // XXX luigi 2442 return 0; // XXX or 1 ? 2443 } 2444 #endif /* DEV_NETMAP */ 2445 2446 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { 2447 m = txhdr->vth_mbuf; 2448 deq++; 2449 2450 txq->vtntx_stats.vtxs_opackets++; 2451 txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len; 2452 if (m->m_flags & M_MCAST) 2453 txq->vtntx_stats.vtxs_omcasts++; 2454 2455 m_freem(m); 2456 uma_zfree(vtnet_tx_header_zone, txhdr); 2457 } 2458 2459 if (virtqueue_empty(vq)) 2460 txq->vtntx_watchdog = 0; 2461 2462 return (deq); 2463 } 2464 2465 static void 2466 vtnet_tx_vq_intr(void *xtxq) 2467 { 2468 struct vtnet_softc *sc; 2469 struct vtnet_txq *txq; 2470 struct ifnet *ifp; 2471 2472 txq = xtxq; 2473 sc = txq->vtntx_sc; 2474 ifp = sc->vtnet_ifp; 2475 2476 if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) { 2477 /* 2478 * Ignore this interrupt. Either this is a spurious interrupt 2479 * or multiqueue without per-VQ MSIX so every queue needs to 2480 * be polled (a brain dead configuration we could try harder 2481 * to avoid). 2482 */ 2483 vtnet_txq_disable_intr(txq); 2484 return; 2485 } 2486 2487 VTNET_TXQ_LOCK(txq); 2488 2489 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2490 VTNET_TXQ_UNLOCK(txq); 2491 return; 2492 } 2493 2494 vtnet_txq_eof(txq); 2495 vtnet_txq_start(txq); 2496 2497 VTNET_TXQ_UNLOCK(txq); 2498 } 2499 2500 static void 2501 vtnet_tx_start_all(struct vtnet_softc *sc) 2502 { 2503 struct vtnet_txq *txq; 2504 int i; 2505 2506 VTNET_CORE_LOCK_ASSERT(sc); 2507 2508 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 2509 txq = &sc->vtnet_txqs[i]; 2510 2511 VTNET_TXQ_LOCK(txq); 2512 vtnet_txq_start(txq); 2513 VTNET_TXQ_UNLOCK(txq); 2514 } 2515 } 2516 2517 #ifndef VTNET_LEGACY_TX 2518 static void 2519 vtnet_qflush(struct ifnet *ifp) 2520 { 2521 struct vtnet_softc *sc; 2522 struct vtnet_txq *txq; 2523 struct mbuf *m; 2524 int i; 2525 2526 sc = ifp->if_softc; 2527 2528 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 2529 txq = &sc->vtnet_txqs[i]; 2530 2531 VTNET_TXQ_LOCK(txq); 2532 while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL) 2533 m_freem(m); 2534 VTNET_TXQ_UNLOCK(txq); 2535 } 2536 2537 if_qflush(ifp); 2538 } 2539 #endif 2540 2541 static int 2542 vtnet_watchdog(struct vtnet_txq *txq) 2543 { 2544 struct ifnet *ifp; 2545 2546 ifp = txq->vtntx_sc->vtnet_ifp; 2547 2548 VTNET_TXQ_LOCK(txq); 2549 if (txq->vtntx_watchdog == 1) { 2550 /* 2551 * Only drain completed frames if the watchdog is about to 2552 * expire. If any frames were drained, there may be enough 2553 * free descriptors now available to transmit queued frames. 2554 * In that case, the timer will immediately be decremented 2555 * below, but the timeout is generous enough that should not 2556 * be a problem. 2557 */ 2558 if (vtnet_txq_eof(txq) != 0) 2559 vtnet_txq_start(txq); 2560 } 2561 2562 if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) { 2563 VTNET_TXQ_UNLOCK(txq); 2564 return (0); 2565 } 2566 VTNET_TXQ_UNLOCK(txq); 2567 2568 if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id); 2569 return (1); 2570 } 2571 2572 static void 2573 vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc, 2574 struct vtnet_txq_stats *txacc) 2575 { 2576 2577 bzero(rxacc, sizeof(struct vtnet_rxq_stats)); 2578 bzero(txacc, sizeof(struct vtnet_txq_stats)); 2579 2580 for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) { 2581 struct vtnet_rxq_stats *rxst; 2582 struct vtnet_txq_stats *txst; 2583 2584 rxst = &sc->vtnet_rxqs[i].vtnrx_stats; 2585 rxacc->vrxs_ipackets += rxst->vrxs_ipackets; 2586 rxacc->vrxs_ibytes += rxst->vrxs_ibytes; 2587 rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops; 2588 rxacc->vrxs_csum += rxst->vrxs_csum; 2589 rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed; 2590 rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled; 2591 2592 txst = &sc->vtnet_txqs[i].vtntx_stats; 2593 txacc->vtxs_opackets += txst->vtxs_opackets; 2594 txacc->vtxs_obytes += txst->vtxs_obytes; 2595 txacc->vtxs_csum += txst->vtxs_csum; 2596 txacc->vtxs_tso += txst->vtxs_tso; 2597 txacc->vtxs_rescheduled += txst->vtxs_rescheduled; 2598 } 2599 } 2600 2601 static uint64_t 2602 vtnet_get_counter(if_t ifp, ift_counter cnt) 2603 { 2604 struct vtnet_softc *sc; 2605 struct vtnet_rxq_stats rxaccum; 2606 struct vtnet_txq_stats txaccum; 2607 2608 sc = if_getsoftc(ifp); 2609 vtnet_accum_stats(sc, &rxaccum, &txaccum); 2610 2611 switch (cnt) { 2612 case IFCOUNTER_IPACKETS: 2613 return (rxaccum.vrxs_ipackets); 2614 case IFCOUNTER_IQDROPS: 2615 return (rxaccum.vrxs_iqdrops); 2616 case IFCOUNTER_IERRORS: 2617 return (rxaccum.vrxs_ierrors); 2618 case IFCOUNTER_OPACKETS: 2619 return (txaccum.vtxs_opackets); 2620 #ifndef VTNET_LEGACY_TX 2621 case IFCOUNTER_OBYTES: 2622 return (txaccum.vtxs_obytes); 2623 case IFCOUNTER_OMCASTS: 2624 return (txaccum.vtxs_omcasts); 2625 #endif 2626 default: 2627 return (if_get_counter_default(ifp, cnt)); 2628 } 2629 } 2630 2631 static void 2632 vtnet_tick(void *xsc) 2633 { 2634 struct vtnet_softc *sc; 2635 struct ifnet *ifp; 2636 int i, timedout; 2637 2638 sc = xsc; 2639 ifp = sc->vtnet_ifp; 2640 timedout = 0; 2641 2642 VTNET_CORE_LOCK_ASSERT(sc); 2643 2644 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) 2645 timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]); 2646 2647 if (timedout != 0) { 2648 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2649 vtnet_init_locked(sc); 2650 } else 2651 callout_schedule(&sc->vtnet_tick_ch, hz); 2652 } 2653 2654 static void 2655 vtnet_start_taskqueues(struct vtnet_softc *sc) 2656 { 2657 device_t dev; 2658 struct vtnet_rxq *rxq; 2659 struct vtnet_txq *txq; 2660 int i, error; 2661 2662 dev = sc->vtnet_dev; 2663 2664 /* 2665 * Errors here are very difficult to recover from - we cannot 2666 * easily fail because, if this is during boot, we will hang 2667 * when freeing any successfully started taskqueues because 2668 * the scheduler isn't up yet. 2669 * 2670 * Most drivers just ignore the return value - it only fails 2671 * with ENOMEM so an error is not likely. 2672 */ 2673 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 2674 rxq = &sc->vtnet_rxqs[i]; 2675 error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, 2676 "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); 2677 if (error) { 2678 device_printf(dev, "failed to start rx taskq %d\n", 2679 rxq->vtnrx_id); 2680 } 2681 2682 txq = &sc->vtnet_txqs[i]; 2683 error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET, 2684 "%s txq %d", device_get_nameunit(dev), txq->vtntx_id); 2685 if (error) { 2686 device_printf(dev, "failed to start tx taskq %d\n", 2687 txq->vtntx_id); 2688 } 2689 } 2690 } 2691 2692 static void 2693 vtnet_free_taskqueues(struct vtnet_softc *sc) 2694 { 2695 struct vtnet_rxq *rxq; 2696 struct vtnet_txq *txq; 2697 int i; 2698 2699 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 2700 rxq = &sc->vtnet_rxqs[i]; 2701 if (rxq->vtnrx_tq != NULL) { 2702 taskqueue_free(rxq->vtnrx_tq); 2703 rxq->vtnrx_vq = NULL; 2704 } 2705 2706 txq = &sc->vtnet_txqs[i]; 2707 if (txq->vtntx_tq != NULL) { 2708 taskqueue_free(txq->vtntx_tq); 2709 txq->vtntx_tq = NULL; 2710 } 2711 } 2712 } 2713 2714 static void 2715 vtnet_drain_taskqueues(struct vtnet_softc *sc) 2716 { 2717 struct vtnet_rxq *rxq; 2718 struct vtnet_txq *txq; 2719 int i; 2720 2721 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 2722 rxq = &sc->vtnet_rxqs[i]; 2723 if (rxq->vtnrx_tq != NULL) 2724 taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); 2725 2726 txq = &sc->vtnet_txqs[i]; 2727 if (txq->vtntx_tq != NULL) { 2728 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask); 2729 #ifndef VTNET_LEGACY_TX 2730 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask); 2731 #endif 2732 } 2733 } 2734 } 2735 2736 static void 2737 vtnet_drain_rxtx_queues(struct vtnet_softc *sc) 2738 { 2739 struct vtnet_rxq *rxq; 2740 struct vtnet_txq *txq; 2741 int i; 2742 2743 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 2744 rxq = &sc->vtnet_rxqs[i]; 2745 vtnet_rxq_free_mbufs(rxq); 2746 2747 txq = &sc->vtnet_txqs[i]; 2748 vtnet_txq_free_mbufs(txq); 2749 } 2750 } 2751 2752 static void 2753 vtnet_stop_rendezvous(struct vtnet_softc *sc) 2754 { 2755 struct vtnet_rxq *rxq; 2756 struct vtnet_txq *txq; 2757 int i; 2758 2759 /* 2760 * Lock and unlock the per-queue mutex so we known the stop 2761 * state is visible. Doing only the active queues should be 2762 * sufficient, but it does not cost much extra to do all the 2763 * queues. Note we hold the core mutex here too. 2764 */ 2765 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 2766 rxq = &sc->vtnet_rxqs[i]; 2767 VTNET_RXQ_LOCK(rxq); 2768 VTNET_RXQ_UNLOCK(rxq); 2769 2770 txq = &sc->vtnet_txqs[i]; 2771 VTNET_TXQ_LOCK(txq); 2772 VTNET_TXQ_UNLOCK(txq); 2773 } 2774 } 2775 2776 static void 2777 vtnet_stop(struct vtnet_softc *sc) 2778 { 2779 device_t dev; 2780 struct ifnet *ifp; 2781 2782 dev = sc->vtnet_dev; 2783 ifp = sc->vtnet_ifp; 2784 2785 VTNET_CORE_LOCK_ASSERT(sc); 2786 2787 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2788 sc->vtnet_link_active = 0; 2789 callout_stop(&sc->vtnet_tick_ch); 2790 2791 /* Only advisory. */ 2792 vtnet_disable_interrupts(sc); 2793 2794 /* 2795 * Stop the host adapter. This resets it to the pre-initialized 2796 * state. It will not generate any interrupts until after it is 2797 * reinitialized. 2798 */ 2799 virtio_stop(dev); 2800 vtnet_stop_rendezvous(sc); 2801 2802 /* Free any mbufs left in the virtqueues. */ 2803 vtnet_drain_rxtx_queues(sc); 2804 } 2805 2806 static int 2807 vtnet_virtio_reinit(struct vtnet_softc *sc) 2808 { 2809 device_t dev; 2810 struct ifnet *ifp; 2811 uint64_t features; 2812 int mask, error; 2813 2814 dev = sc->vtnet_dev; 2815 ifp = sc->vtnet_ifp; 2816 features = sc->vtnet_features; 2817 2818 mask = 0; 2819 #if defined(INET) 2820 mask |= IFCAP_RXCSUM; 2821 #endif 2822 #if defined (INET6) 2823 mask |= IFCAP_RXCSUM_IPV6; 2824 #endif 2825 2826 /* 2827 * Re-negotiate with the host, removing any disabled receive 2828 * features. Transmit features are disabled only on our side 2829 * via if_capenable and if_hwassist. 2830 */ 2831 2832 if (ifp->if_capabilities & mask) { 2833 /* 2834 * We require both IPv4 and IPv6 offloading to be enabled 2835 * in order to negotiated it: VirtIO does not distinguish 2836 * between the two. 2837 */ 2838 if ((ifp->if_capenable & mask) != mask) 2839 features &= ~VIRTIO_NET_F_GUEST_CSUM; 2840 } 2841 2842 if (ifp->if_capabilities & IFCAP_LRO) { 2843 if ((ifp->if_capenable & IFCAP_LRO) == 0) 2844 features &= ~VTNET_LRO_FEATURES; 2845 } 2846 2847 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { 2848 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 2849 features &= ~VIRTIO_NET_F_CTRL_VLAN; 2850 } 2851 2852 error = virtio_reinit(dev, features); 2853 if (error) 2854 device_printf(dev, "virtio reinit error %d\n", error); 2855 2856 return (error); 2857 } 2858 2859 static void 2860 vtnet_init_rx_filters(struct vtnet_softc *sc) 2861 { 2862 struct ifnet *ifp; 2863 2864 ifp = sc->vtnet_ifp; 2865 2866 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 2867 /* Restore promiscuous and all-multicast modes. */ 2868 vtnet_rx_filter(sc); 2869 /* Restore filtered MAC addresses. */ 2870 vtnet_rx_filter_mac(sc); 2871 } 2872 2873 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2874 vtnet_rx_filter_vlan(sc); 2875 } 2876 2877 static int 2878 vtnet_init_rx_queues(struct vtnet_softc *sc) 2879 { 2880 device_t dev; 2881 struct vtnet_rxq *rxq; 2882 int i, clsize, error; 2883 2884 dev = sc->vtnet_dev; 2885 2886 /* 2887 * Use the new cluster size if one has been set (via a MTU 2888 * change). Otherwise, use the standard 2K clusters. 2889 * 2890 * BMV: It might make sense to use page sized clusters as 2891 * the default (depending on the features negotiated). 2892 */ 2893 if (sc->vtnet_rx_new_clsize != 0) { 2894 clsize = sc->vtnet_rx_new_clsize; 2895 sc->vtnet_rx_new_clsize = 0; 2896 } else 2897 clsize = MCLBYTES; 2898 2899 sc->vtnet_rx_clsize = clsize; 2900 sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize); 2901 2902 KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS || 2903 sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, 2904 ("%s: too many rx mbufs %d for %d segments", __func__, 2905 sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); 2906 2907 #ifdef DEV_NETMAP 2908 if (vtnet_netmap_init_rx_buffers(sc)) 2909 return 0; 2910 #endif /* DEV_NETMAP */ 2911 2912 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 2913 rxq = &sc->vtnet_rxqs[i]; 2914 2915 /* Hold the lock to satisfy asserts. */ 2916 VTNET_RXQ_LOCK(rxq); 2917 error = vtnet_rxq_populate(rxq); 2918 VTNET_RXQ_UNLOCK(rxq); 2919 2920 if (error) { 2921 device_printf(dev, 2922 "cannot allocate mbufs for Rx queue %d\n", i); 2923 return (error); 2924 } 2925 } 2926 2927 return (0); 2928 } 2929 2930 static int 2931 vtnet_init_tx_queues(struct vtnet_softc *sc) 2932 { 2933 struct vtnet_txq *txq; 2934 int i; 2935 2936 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 2937 txq = &sc->vtnet_txqs[i]; 2938 txq->vtntx_watchdog = 0; 2939 } 2940 2941 return (0); 2942 } 2943 2944 static int 2945 vtnet_init_rxtx_queues(struct vtnet_softc *sc) 2946 { 2947 int error; 2948 2949 error = vtnet_init_rx_queues(sc); 2950 if (error) 2951 return (error); 2952 2953 error = vtnet_init_tx_queues(sc); 2954 if (error) 2955 return (error); 2956 2957 return (0); 2958 } 2959 2960 static void 2961 vtnet_set_active_vq_pairs(struct vtnet_softc *sc) 2962 { 2963 device_t dev; 2964 int npairs; 2965 2966 dev = sc->vtnet_dev; 2967 2968 if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) { 2969 MPASS(sc->vtnet_max_vq_pairs == 1); 2970 sc->vtnet_act_vq_pairs = 1; 2971 return; 2972 } 2973 2974 /* BMV: Just use the maximum configured for now. */ 2975 npairs = sc->vtnet_max_vq_pairs; 2976 2977 if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { 2978 device_printf(dev, 2979 "cannot set active queue pairs to %d\n", npairs); 2980 npairs = 1; 2981 } 2982 2983 sc->vtnet_act_vq_pairs = npairs; 2984 } 2985 2986 static int 2987 vtnet_reinit(struct vtnet_softc *sc) 2988 { 2989 struct ifnet *ifp; 2990 int error; 2991 2992 ifp = sc->vtnet_ifp; 2993 2994 /* Use the current MAC address. */ 2995 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); 2996 vtnet_set_hwaddr(sc); 2997 2998 vtnet_set_active_vq_pairs(sc); 2999 3000 ifp->if_hwassist = 0; 3001 if (ifp->if_capenable & IFCAP_TXCSUM) 3002 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 3003 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 3004 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6; 3005 if (ifp->if_capenable & IFCAP_TSO4) 3006 ifp->if_hwassist |= CSUM_TSO; 3007 if (ifp->if_capenable & IFCAP_TSO6) 3008 ifp->if_hwassist |= CSUM_TSO; /* No CSUM_TSO_IPV6. */ 3009 3010 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) 3011 vtnet_init_rx_filters(sc); 3012 3013 error = vtnet_init_rxtx_queues(sc); 3014 if (error) 3015 return (error); 3016 3017 vtnet_enable_interrupts(sc); 3018 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3019 3020 return (0); 3021 } 3022 3023 static void 3024 vtnet_init_locked(struct vtnet_softc *sc) 3025 { 3026 device_t dev; 3027 struct ifnet *ifp; 3028 3029 dev = sc->vtnet_dev; 3030 ifp = sc->vtnet_ifp; 3031 3032 VTNET_CORE_LOCK_ASSERT(sc); 3033 3034 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3035 return; 3036 3037 vtnet_stop(sc); 3038 3039 /* Reinitialize with the host. */ 3040 if (vtnet_virtio_reinit(sc) != 0) 3041 goto fail; 3042 3043 if (vtnet_reinit(sc) != 0) 3044 goto fail; 3045 3046 virtio_reinit_complete(dev); 3047 3048 vtnet_update_link_status(sc); 3049 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 3050 3051 return; 3052 3053 fail: 3054 vtnet_stop(sc); 3055 } 3056 3057 static void 3058 vtnet_init(void *xsc) 3059 { 3060 struct vtnet_softc *sc; 3061 3062 sc = xsc; 3063 3064 #ifdef DEV_NETMAP 3065 if (!NA(sc->vtnet_ifp)) { 3066 D("try to attach again"); 3067 vtnet_netmap_attach(sc); 3068 } 3069 #endif /* DEV_NETMAP */ 3070 3071 VTNET_CORE_LOCK(sc); 3072 vtnet_init_locked(sc); 3073 VTNET_CORE_UNLOCK(sc); 3074 } 3075 3076 static void 3077 vtnet_free_ctrl_vq(struct vtnet_softc *sc) 3078 { 3079 struct virtqueue *vq; 3080 3081 vq = sc->vtnet_ctrl_vq; 3082 3083 /* 3084 * The control virtqueue is only polled and therefore it should 3085 * already be empty. 3086 */ 3087 KASSERT(virtqueue_empty(vq), 3088 ("%s: ctrl vq %p not empty", __func__, vq)); 3089 } 3090 3091 static void 3092 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, 3093 struct sglist *sg, int readable, int writable) 3094 { 3095 struct virtqueue *vq; 3096 3097 vq = sc->vtnet_ctrl_vq; 3098 3099 VTNET_CORE_LOCK_ASSERT(sc); 3100 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, 3101 ("%s: CTRL_VQ feature not negotiated", __func__)); 3102 3103 if (!virtqueue_empty(vq)) 3104 return; 3105 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) 3106 return; 3107 3108 /* 3109 * Poll for the response, but the command is likely already 3110 * done when we return from the notify. 3111 */ 3112 virtqueue_notify(vq); 3113 virtqueue_poll(vq, NULL); 3114 } 3115 3116 static int 3117 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) 3118 { 3119 struct virtio_net_ctrl_hdr hdr __aligned(2); 3120 struct sglist_seg segs[3]; 3121 struct sglist sg; 3122 uint8_t ack; 3123 int error; 3124 3125 hdr.class = VIRTIO_NET_CTRL_MAC; 3126 hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; 3127 ack = VIRTIO_NET_ERR; 3128 3129 sglist_init(&sg, 3, segs); 3130 error = 0; 3131 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 3132 error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN); 3133 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 3134 KASSERT(error == 0 && sg.sg_nseg == 3, 3135 ("%s: error %d adding set MAC msg to sglist", __func__, error)); 3136 3137 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 3138 3139 return (ack == VIRTIO_NET_OK ? 0 : EIO); 3140 } 3141 3142 static int 3143 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs) 3144 { 3145 struct sglist_seg segs[3]; 3146 struct sglist sg; 3147 struct { 3148 struct virtio_net_ctrl_hdr hdr; 3149 uint8_t pad1; 3150 struct virtio_net_ctrl_mq mq; 3151 uint8_t pad2; 3152 uint8_t ack; 3153 } s __aligned(2); 3154 int error; 3155 3156 s.hdr.class = VIRTIO_NET_CTRL_MQ; 3157 s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; 3158 s.mq.virtqueue_pairs = npairs; 3159 s.ack = VIRTIO_NET_ERR; 3160 3161 sglist_init(&sg, 3, segs); 3162 error = 0; 3163 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 3164 error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); 3165 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 3166 KASSERT(error == 0 && sg.sg_nseg == 3, 3167 ("%s: error %d adding MQ message to sglist", __func__, error)); 3168 3169 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 3170 3171 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 3172 } 3173 3174 static int 3175 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) 3176 { 3177 struct sglist_seg segs[3]; 3178 struct sglist sg; 3179 struct { 3180 struct virtio_net_ctrl_hdr hdr; 3181 uint8_t pad1; 3182 uint8_t onoff; 3183 uint8_t pad2; 3184 uint8_t ack; 3185 } s __aligned(2); 3186 int error; 3187 3188 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 3189 ("%s: CTRL_RX feature not negotiated", __func__)); 3190 3191 s.hdr.class = VIRTIO_NET_CTRL_RX; 3192 s.hdr.cmd = cmd; 3193 s.onoff = !!on; 3194 s.ack = VIRTIO_NET_ERR; 3195 3196 sglist_init(&sg, 3, segs); 3197 error = 0; 3198 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 3199 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); 3200 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 3201 KASSERT(error == 0 && sg.sg_nseg == 3, 3202 ("%s: error %d adding Rx message to sglist", __func__, error)); 3203 3204 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 3205 3206 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 3207 } 3208 3209 static int 3210 vtnet_set_promisc(struct vtnet_softc *sc, int on) 3211 { 3212 3213 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); 3214 } 3215 3216 static int 3217 vtnet_set_allmulti(struct vtnet_softc *sc, int on) 3218 { 3219 3220 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); 3221 } 3222 3223 /* 3224 * The device defaults to promiscuous mode for backwards compatibility. 3225 * Turn it off at attach time if possible. 3226 */ 3227 static void 3228 vtnet_attach_disable_promisc(struct vtnet_softc *sc) 3229 { 3230 struct ifnet *ifp; 3231 3232 ifp = sc->vtnet_ifp; 3233 3234 VTNET_CORE_LOCK(sc); 3235 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) { 3236 ifp->if_flags |= IFF_PROMISC; 3237 } else if (vtnet_set_promisc(sc, 0) != 0) { 3238 ifp->if_flags |= IFF_PROMISC; 3239 device_printf(sc->vtnet_dev, 3240 "cannot disable default promiscuous mode\n"); 3241 } 3242 VTNET_CORE_UNLOCK(sc); 3243 } 3244 3245 static void 3246 vtnet_rx_filter(struct vtnet_softc *sc) 3247 { 3248 device_t dev; 3249 struct ifnet *ifp; 3250 3251 dev = sc->vtnet_dev; 3252 ifp = sc->vtnet_ifp; 3253 3254 VTNET_CORE_LOCK_ASSERT(sc); 3255 3256 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) 3257 device_printf(dev, "cannot %s promiscuous mode\n", 3258 ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); 3259 3260 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) 3261 device_printf(dev, "cannot %s all-multicast mode\n", 3262 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); 3263 } 3264 3265 static void 3266 vtnet_rx_filter_mac(struct vtnet_softc *sc) 3267 { 3268 struct virtio_net_ctrl_hdr hdr __aligned(2); 3269 struct vtnet_mac_filter *filter; 3270 struct sglist_seg segs[4]; 3271 struct sglist sg; 3272 struct ifnet *ifp; 3273 struct ifaddr *ifa; 3274 struct ifmultiaddr *ifma; 3275 int ucnt, mcnt, promisc, allmulti, error; 3276 uint8_t ack; 3277 3278 ifp = sc->vtnet_ifp; 3279 filter = sc->vtnet_mac_filter; 3280 ucnt = 0; 3281 mcnt = 0; 3282 promisc = 0; 3283 allmulti = 0; 3284 3285 VTNET_CORE_LOCK_ASSERT(sc); 3286 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 3287 ("%s: CTRL_RX feature not negotiated", __func__)); 3288 3289 /* Unicast MAC addresses: */ 3290 if_addr_rlock(ifp); 3291 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 3292 if (ifa->ifa_addr->sa_family != AF_LINK) 3293 continue; 3294 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 3295 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0) 3296 continue; 3297 else if (ucnt == VTNET_MAX_MAC_ENTRIES) { 3298 promisc = 1; 3299 break; 3300 } 3301 3302 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 3303 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); 3304 ucnt++; 3305 } 3306 if_addr_runlock(ifp); 3307 3308 if (promisc != 0) { 3309 filter->vmf_unicast.nentries = 0; 3310 if_printf(ifp, "more than %d MAC addresses assigned, " 3311 "falling back to promiscuous mode\n", 3312 VTNET_MAX_MAC_ENTRIES); 3313 } else 3314 filter->vmf_unicast.nentries = ucnt; 3315 3316 /* Multicast MAC addresses: */ 3317 if_maddr_rlock(ifp); 3318 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3319 if (ifma->ifma_addr->sa_family != AF_LINK) 3320 continue; 3321 else if (mcnt == VTNET_MAX_MAC_ENTRIES) { 3322 allmulti = 1; 3323 break; 3324 } 3325 3326 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 3327 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); 3328 mcnt++; 3329 } 3330 if_maddr_runlock(ifp); 3331 3332 if (allmulti != 0) { 3333 filter->vmf_multicast.nentries = 0; 3334 if_printf(ifp, "more than %d multicast MAC addresses " 3335 "assigned, falling back to all-multicast mode\n", 3336 VTNET_MAX_MAC_ENTRIES); 3337 } else 3338 filter->vmf_multicast.nentries = mcnt; 3339 3340 if (promisc != 0 && allmulti != 0) 3341 goto out; 3342 3343 hdr.class = VIRTIO_NET_CTRL_MAC; 3344 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 3345 ack = VIRTIO_NET_ERR; 3346 3347 sglist_init(&sg, 4, segs); 3348 error = 0; 3349 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 3350 error |= sglist_append(&sg, &filter->vmf_unicast, 3351 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN); 3352 error |= sglist_append(&sg, &filter->vmf_multicast, 3353 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN); 3354 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 3355 KASSERT(error == 0 && sg.sg_nseg == 4, 3356 ("%s: error %d adding MAC filter msg to sglist", __func__, error)); 3357 3358 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 3359 3360 if (ack != VIRTIO_NET_OK) 3361 if_printf(ifp, "error setting host MAC filter table\n"); 3362 3363 out: 3364 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0) 3365 if_printf(ifp, "cannot enable promiscuous mode\n"); 3366 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0) 3367 if_printf(ifp, "cannot enable all-multicast mode\n"); 3368 } 3369 3370 static int 3371 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 3372 { 3373 struct sglist_seg segs[3]; 3374 struct sglist sg; 3375 struct { 3376 struct virtio_net_ctrl_hdr hdr; 3377 uint8_t pad1; 3378 uint16_t tag; 3379 uint8_t pad2; 3380 uint8_t ack; 3381 } s __aligned(2); 3382 int error; 3383 3384 s.hdr.class = VIRTIO_NET_CTRL_VLAN; 3385 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 3386 s.tag = tag; 3387 s.ack = VIRTIO_NET_ERR; 3388 3389 sglist_init(&sg, 3, segs); 3390 error = 0; 3391 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 3392 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); 3393 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 3394 KASSERT(error == 0 && sg.sg_nseg == 3, 3395 ("%s: error %d adding VLAN message to sglist", __func__, error)); 3396 3397 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 3398 3399 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 3400 } 3401 3402 static void 3403 vtnet_rx_filter_vlan(struct vtnet_softc *sc) 3404 { 3405 uint32_t w; 3406 uint16_t tag; 3407 int i, bit; 3408 3409 VTNET_CORE_LOCK_ASSERT(sc); 3410 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 3411 ("%s: VLAN_FILTER feature not negotiated", __func__)); 3412 3413 /* Enable the filter for each configured VLAN. */ 3414 for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { 3415 w = sc->vtnet_vlan_filter[i]; 3416 3417 while ((bit = ffs(w) - 1) != -1) { 3418 w &= ~(1 << bit); 3419 tag = sizeof(w) * CHAR_BIT * i + bit; 3420 3421 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) { 3422 device_printf(sc->vtnet_dev, 3423 "cannot enable VLAN %d filter\n", tag); 3424 } 3425 } 3426 } 3427 } 3428 3429 static void 3430 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 3431 { 3432 struct ifnet *ifp; 3433 int idx, bit; 3434 3435 ifp = sc->vtnet_ifp; 3436 idx = (tag >> 5) & 0x7F; 3437 bit = tag & 0x1F; 3438 3439 if (tag == 0 || tag > 4095) 3440 return; 3441 3442 VTNET_CORE_LOCK(sc); 3443 3444 if (add) 3445 sc->vtnet_vlan_filter[idx] |= (1 << bit); 3446 else 3447 sc->vtnet_vlan_filter[idx] &= ~(1 << bit); 3448 3449 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER && 3450 vtnet_exec_vlan_filter(sc, add, tag) != 0) { 3451 device_printf(sc->vtnet_dev, 3452 "cannot %s VLAN %d %s the host filter table\n", 3453 add ? "add" : "remove", tag, add ? "to" : "from"); 3454 } 3455 3456 VTNET_CORE_UNLOCK(sc); 3457 } 3458 3459 static void 3460 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 3461 { 3462 3463 if (ifp->if_softc != arg) 3464 return; 3465 3466 vtnet_update_vlan_filter(arg, 1, tag); 3467 } 3468 3469 static void 3470 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 3471 { 3472 3473 if (ifp->if_softc != arg) 3474 return; 3475 3476 vtnet_update_vlan_filter(arg, 0, tag); 3477 } 3478 3479 static int 3480 vtnet_is_link_up(struct vtnet_softc *sc) 3481 { 3482 device_t dev; 3483 struct ifnet *ifp; 3484 uint16_t status; 3485 3486 dev = sc->vtnet_dev; 3487 ifp = sc->vtnet_ifp; 3488 3489 if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0) 3490 status = VIRTIO_NET_S_LINK_UP; 3491 else 3492 status = virtio_read_dev_config_2(dev, 3493 offsetof(struct virtio_net_config, status)); 3494 3495 return ((status & VIRTIO_NET_S_LINK_UP) != 0); 3496 } 3497 3498 static void 3499 vtnet_update_link_status(struct vtnet_softc *sc) 3500 { 3501 struct ifnet *ifp; 3502 int link; 3503 3504 ifp = sc->vtnet_ifp; 3505 3506 VTNET_CORE_LOCK_ASSERT(sc); 3507 link = vtnet_is_link_up(sc); 3508 3509 /* Notify if the link status has changed. */ 3510 if (link != 0 && sc->vtnet_link_active == 0) { 3511 sc->vtnet_link_active = 1; 3512 if_link_state_change(ifp, LINK_STATE_UP); 3513 } else if (link == 0 && sc->vtnet_link_active != 0) { 3514 sc->vtnet_link_active = 0; 3515 if_link_state_change(ifp, LINK_STATE_DOWN); 3516 } 3517 } 3518 3519 static int 3520 vtnet_ifmedia_upd(struct ifnet *ifp) 3521 { 3522 struct vtnet_softc *sc; 3523 struct ifmedia *ifm; 3524 3525 sc = ifp->if_softc; 3526 ifm = &sc->vtnet_media; 3527 3528 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3529 return (EINVAL); 3530 3531 return (0); 3532 } 3533 3534 static void 3535 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3536 { 3537 struct vtnet_softc *sc; 3538 3539 sc = ifp->if_softc; 3540 3541 ifmr->ifm_status = IFM_AVALID; 3542 ifmr->ifm_active = IFM_ETHER; 3543 3544 VTNET_CORE_LOCK(sc); 3545 if (vtnet_is_link_up(sc) != 0) { 3546 ifmr->ifm_status |= IFM_ACTIVE; 3547 ifmr->ifm_active |= VTNET_MEDIATYPE; 3548 } else 3549 ifmr->ifm_active |= IFM_NONE; 3550 VTNET_CORE_UNLOCK(sc); 3551 } 3552 3553 static void 3554 vtnet_set_hwaddr(struct vtnet_softc *sc) 3555 { 3556 device_t dev; 3557 int i; 3558 3559 dev = sc->vtnet_dev; 3560 3561 if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { 3562 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) 3563 device_printf(dev, "unable to set MAC address\n"); 3564 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) { 3565 for (i = 0; i < ETHER_ADDR_LEN; i++) { 3566 virtio_write_dev_config_1(dev, 3567 offsetof(struct virtio_net_config, mac) + i, 3568 sc->vtnet_hwaddr[i]); 3569 } 3570 } 3571 } 3572 3573 static void 3574 vtnet_get_hwaddr(struct vtnet_softc *sc) 3575 { 3576 device_t dev; 3577 int i; 3578 3579 dev = sc->vtnet_dev; 3580 3581 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { 3582 /* 3583 * Generate a random locally administered unicast address. 3584 * 3585 * It would be nice to generate the same MAC address across 3586 * reboots, but it seems all the hosts currently available 3587 * support the MAC feature, so this isn't too important. 3588 */ 3589 sc->vtnet_hwaddr[0] = 0xB2; 3590 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); 3591 vtnet_set_hwaddr(sc); 3592 return; 3593 } 3594 3595 for (i = 0; i < ETHER_ADDR_LEN; i++) { 3596 sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev, 3597 offsetof(struct virtio_net_config, mac) + i); 3598 } 3599 } 3600 3601 static void 3602 vtnet_vlan_tag_remove(struct mbuf *m) 3603 { 3604 struct ether_vlan_header *evh; 3605 3606 evh = mtod(m, struct ether_vlan_header *); 3607 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 3608 m->m_flags |= M_VLANTAG; 3609 3610 /* Strip the 802.1Q header. */ 3611 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 3612 ETHER_HDR_LEN - ETHER_TYPE_LEN); 3613 m_adj(m, ETHER_VLAN_ENCAP_LEN); 3614 } 3615 3616 static void 3617 vtnet_set_rx_process_limit(struct vtnet_softc *sc) 3618 { 3619 int limit; 3620 3621 limit = vtnet_tunable_int(sc, "rx_process_limit", 3622 vtnet_rx_process_limit); 3623 if (limit < 0) 3624 limit = INT_MAX; 3625 sc->vtnet_rx_process_limit = limit; 3626 } 3627 3628 static void 3629 vtnet_set_tx_intr_threshold(struct vtnet_softc *sc) 3630 { 3631 device_t dev; 3632 int size, thresh; 3633 3634 dev = sc->vtnet_dev; 3635 size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq); 3636 3637 /* 3638 * The Tx interrupt is disabled until the queue free count falls 3639 * below our threshold. Completed frames are drained from the Tx 3640 * virtqueue before transmitting new frames and in the watchdog 3641 * callout, so the frequency of Tx interrupts is greatly reduced, 3642 * at the cost of not freeing mbufs as quickly as they otherwise 3643 * would be. 3644 * 3645 * N.B. We assume all the Tx queues are the same size. 3646 */ 3647 thresh = size / 4; 3648 3649 /* 3650 * Without indirect descriptors, leave enough room for the most 3651 * segments we handle. 3652 */ 3653 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0 && 3654 thresh < sc->vtnet_tx_nsegs) 3655 thresh = sc->vtnet_tx_nsegs; 3656 3657 sc->vtnet_tx_intr_thresh = thresh; 3658 } 3659 3660 static void 3661 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, 3662 struct sysctl_oid_list *child, struct vtnet_rxq *rxq) 3663 { 3664 struct sysctl_oid *node; 3665 struct sysctl_oid_list *list; 3666 struct vtnet_rxq_stats *stats; 3667 char namebuf[16]; 3668 3669 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id); 3670 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 3671 CTLFLAG_RD, NULL, "Receive Queue"); 3672 list = SYSCTL_CHILDREN(node); 3673 3674 stats = &rxq->vtnrx_stats; 3675 3676 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, 3677 &stats->vrxs_ipackets, "Receive packets"); 3678 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, 3679 &stats->vrxs_ibytes, "Receive bytes"); 3680 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, 3681 &stats->vrxs_iqdrops, "Receive drops"); 3682 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, 3683 &stats->vrxs_ierrors, "Receive errors"); 3684 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, 3685 &stats->vrxs_csum, "Receive checksum offloaded"); 3686 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, 3687 &stats->vrxs_csum_failed, "Receive checksum offload failed"); 3688 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, 3689 &stats->vrxs_rescheduled, 3690 "Receive interrupt handler rescheduled"); 3691 } 3692 3693 static void 3694 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx, 3695 struct sysctl_oid_list *child, struct vtnet_txq *txq) 3696 { 3697 struct sysctl_oid *node; 3698 struct sysctl_oid_list *list; 3699 struct vtnet_txq_stats *stats; 3700 char namebuf[16]; 3701 3702 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id); 3703 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 3704 CTLFLAG_RD, NULL, "Transmit Queue"); 3705 list = SYSCTL_CHILDREN(node); 3706 3707 stats = &txq->vtntx_stats; 3708 3709 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, 3710 &stats->vtxs_opackets, "Transmit packets"); 3711 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, 3712 &stats->vtxs_obytes, "Transmit bytes"); 3713 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, 3714 &stats->vtxs_omcasts, "Transmit multicasts"); 3715 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, 3716 &stats->vtxs_csum, "Transmit checksum offloaded"); 3717 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, 3718 &stats->vtxs_tso, "Transmit segmentation offloaded"); 3719 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, 3720 &stats->vtxs_rescheduled, 3721 "Transmit interrupt handler rescheduled"); 3722 } 3723 3724 static void 3725 vtnet_setup_queue_sysctl(struct vtnet_softc *sc) 3726 { 3727 device_t dev; 3728 struct sysctl_ctx_list *ctx; 3729 struct sysctl_oid *tree; 3730 struct sysctl_oid_list *child; 3731 int i; 3732 3733 dev = sc->vtnet_dev; 3734 ctx = device_get_sysctl_ctx(dev); 3735 tree = device_get_sysctl_tree(dev); 3736 child = SYSCTL_CHILDREN(tree); 3737 3738 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 3739 vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); 3740 vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); 3741 } 3742 } 3743 3744 static void 3745 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx, 3746 struct sysctl_oid_list *child, struct vtnet_softc *sc) 3747 { 3748 struct vtnet_statistics *stats; 3749 struct vtnet_rxq_stats rxaccum; 3750 struct vtnet_txq_stats txaccum; 3751 3752 vtnet_accum_stats(sc, &rxaccum, &txaccum); 3753 3754 stats = &sc->vtnet_stats; 3755 stats->rx_csum_offloaded = rxaccum.vrxs_csum; 3756 stats->rx_csum_failed = rxaccum.vrxs_csum_failed; 3757 stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled; 3758 stats->tx_csum_offloaded = txaccum.vtxs_csum; 3759 stats->tx_tso_offloaded = txaccum.vtxs_tso; 3760 stats->tx_task_rescheduled = txaccum.vtxs_rescheduled; 3761 3762 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", 3763 CTLFLAG_RD, &stats->mbuf_alloc_failed, 3764 "Mbuf cluster allocation failures"); 3765 3766 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", 3767 CTLFLAG_RD, &stats->rx_frame_too_large, 3768 "Received frame larger than the mbuf chain"); 3769 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", 3770 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 3771 "Enqueuing the replacement receive mbuf failed"); 3772 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", 3773 CTLFLAG_RD, &stats->rx_mergeable_failed, 3774 "Mergeable buffers receive failures"); 3775 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", 3776 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 3777 "Received checksum offloaded buffer with unsupported " 3778 "Ethernet type"); 3779 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", 3780 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 3781 "Received checksum offloaded buffer with incorrect IP protocol"); 3782 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", 3783 CTLFLAG_RD, &stats->rx_csum_bad_offset, 3784 "Received checksum offloaded buffer with incorrect offset"); 3785 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto", 3786 CTLFLAG_RD, &stats->rx_csum_bad_proto, 3787 "Received checksum offloaded buffer with incorrect protocol"); 3788 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed", 3789 CTLFLAG_RD, &stats->rx_csum_failed, 3790 "Received buffer checksum offload failed"); 3791 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded", 3792 CTLFLAG_RD, &stats->rx_csum_offloaded, 3793 "Received buffer checksum offload succeeded"); 3794 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled", 3795 CTLFLAG_RD, &stats->rx_task_rescheduled, 3796 "Times the receive interrupt task rescheduled itself"); 3797 3798 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", 3799 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 3800 "Aborted transmit of checksum offloaded buffer with unknown " 3801 "Ethernet type"); 3802 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", 3803 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 3804 "Aborted transmit of TSO buffer with unknown Ethernet type"); 3805 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", 3806 CTLFLAG_RD, &stats->tx_tso_not_tcp, 3807 "Aborted transmit of TSO buffer with non TCP protocol"); 3808 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", 3809 CTLFLAG_RD, &stats->tx_defragged, 3810 "Transmit mbufs defragged"); 3811 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", 3812 CTLFLAG_RD, &stats->tx_defrag_failed, 3813 "Aborted transmit of buffer because defrag failed"); 3814 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", 3815 CTLFLAG_RD, &stats->tx_csum_offloaded, 3816 "Offloaded checksum of transmitted buffer"); 3817 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded", 3818 CTLFLAG_RD, &stats->tx_tso_offloaded, 3819 "Segmentation offload of transmitted buffer"); 3820 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled", 3821 CTLFLAG_RD, &stats->tx_task_rescheduled, 3822 "Times the transmit interrupt task rescheduled itself"); 3823 } 3824 3825 static void 3826 vtnet_setup_sysctl(struct vtnet_softc *sc) 3827 { 3828 device_t dev; 3829 struct sysctl_ctx_list *ctx; 3830 struct sysctl_oid *tree; 3831 struct sysctl_oid_list *child; 3832 3833 dev = sc->vtnet_dev; 3834 ctx = device_get_sysctl_ctx(dev); 3835 tree = device_get_sysctl_tree(dev); 3836 child = SYSCTL_CHILDREN(tree); 3837 3838 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", 3839 CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, 3840 "Maximum number of supported virtqueue pairs"); 3841 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", 3842 CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, 3843 "Number of active virtqueue pairs"); 3844 3845 vtnet_setup_stat_sysctl(ctx, child, sc); 3846 } 3847 3848 static int 3849 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) 3850 { 3851 3852 return (virtqueue_enable_intr(rxq->vtnrx_vq)); 3853 } 3854 3855 static void 3856 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq) 3857 { 3858 3859 virtqueue_disable_intr(rxq->vtnrx_vq); 3860 } 3861 3862 static int 3863 vtnet_txq_enable_intr(struct vtnet_txq *txq) 3864 { 3865 struct virtqueue *vq; 3866 3867 vq = txq->vtntx_vq; 3868 3869 if (vtnet_txq_below_threshold(txq) != 0) 3870 return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG)); 3871 3872 /* 3873 * The free count is above our threshold. Keep the Tx interrupt 3874 * disabled until the queue is fuller. 3875 */ 3876 return (0); 3877 } 3878 3879 static void 3880 vtnet_txq_disable_intr(struct vtnet_txq *txq) 3881 { 3882 3883 virtqueue_disable_intr(txq->vtntx_vq); 3884 } 3885 3886 static void 3887 vtnet_enable_rx_interrupts(struct vtnet_softc *sc) 3888 { 3889 int i; 3890 3891 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) 3892 vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]); 3893 } 3894 3895 static void 3896 vtnet_enable_tx_interrupts(struct vtnet_softc *sc) 3897 { 3898 int i; 3899 3900 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) 3901 vtnet_txq_enable_intr(&sc->vtnet_txqs[i]); 3902 } 3903 3904 static void 3905 vtnet_enable_interrupts(struct vtnet_softc *sc) 3906 { 3907 3908 vtnet_enable_rx_interrupts(sc); 3909 vtnet_enable_tx_interrupts(sc); 3910 } 3911 3912 static void 3913 vtnet_disable_rx_interrupts(struct vtnet_softc *sc) 3914 { 3915 int i; 3916 3917 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) 3918 vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); 3919 } 3920 3921 static void 3922 vtnet_disable_tx_interrupts(struct vtnet_softc *sc) 3923 { 3924 int i; 3925 3926 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) 3927 vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); 3928 } 3929 3930 static void 3931 vtnet_disable_interrupts(struct vtnet_softc *sc) 3932 { 3933 3934 vtnet_disable_rx_interrupts(sc); 3935 vtnet_disable_tx_interrupts(sc); 3936 } 3937 3938 static int 3939 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def) 3940 { 3941 char path[64]; 3942 3943 snprintf(path, sizeof(path), 3944 "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob); 3945 TUNABLE_INT_FETCH(path, &def); 3946 3947 return (def); 3948 } 3949