1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO network devices. */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #ifdef HAVE_KERNEL_OPTION_HEADERS 33 #include "opt_device_polling.h" 34 #endif 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/malloc.h> 42 #include <sys/module.h> 43 #include <sys/socket.h> 44 #include <sys/sysctl.h> 45 #include <sys/taskqueue.h> 46 #include <sys/random.h> 47 #include <sys/sglist.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 51 #include <vm/uma.h> 52 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_arp.h> 56 #include <net/if_dl.h> 57 #include <net/if_types.h> 58 #include <net/if_media.h> 59 #include <net/if_vlan_var.h> 60 61 #include <net/bpf.h> 62 63 #include <netinet/in_systm.h> 64 #include <netinet/in.h> 65 #include <netinet/ip.h> 66 #include <netinet/ip6.h> 67 #include <netinet/udp.h> 68 #include <netinet/tcp.h> 69 #include <netinet/sctp.h> 70 71 #include <machine/bus.h> 72 #include <machine/resource.h> 73 #include <sys/bus.h> 74 #include <sys/rman.h> 75 76 #include <dev/virtio/virtio.h> 77 #include <dev/virtio/virtqueue.h> 78 #include <dev/virtio/network/virtio_net.h> 79 #include <dev/virtio/network/if_vtnetvar.h> 80 81 #include "virtio_if.h" 82 83 static int vtnet_modevent(module_t, int, void *); 84 85 static int vtnet_probe(device_t); 86 static int vtnet_attach(device_t); 87 static int vtnet_detach(device_t); 88 static int vtnet_suspend(device_t); 89 static int vtnet_resume(device_t); 90 static int vtnet_shutdown(device_t); 91 static int vtnet_config_change(device_t); 92 93 static void vtnet_negotiate_features(struct vtnet_softc *); 94 static int vtnet_alloc_virtqueues(struct vtnet_softc *); 95 static void vtnet_get_hwaddr(struct vtnet_softc *); 96 static void vtnet_set_hwaddr(struct vtnet_softc *); 97 static int vtnet_is_link_up(struct vtnet_softc *); 98 static void vtnet_update_link_status(struct vtnet_softc *); 99 static void vtnet_watchdog(struct vtnet_softc *); 100 static void vtnet_config_change_task(void *, int); 101 static int vtnet_change_mtu(struct vtnet_softc *, int); 102 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); 103 104 static int vtnet_init_rx_vq(struct vtnet_softc *); 105 static void vtnet_free_rx_mbufs(struct vtnet_softc *); 106 static void vtnet_free_tx_mbufs(struct vtnet_softc *); 107 static void vtnet_free_ctrl_vq(struct vtnet_softc *); 108 109 #ifdef DEVICE_POLLING 110 static poll_handler_t vtnet_poll; 111 #endif 112 113 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int, 114 struct mbuf **); 115 static int vtnet_replace_rxbuf(struct vtnet_softc *, 116 struct mbuf *, int); 117 static int vtnet_newbuf(struct vtnet_softc *); 118 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int); 119 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *); 120 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *); 121 static void vtnet_vlan_tag_remove(struct mbuf *); 122 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *, 123 struct virtio_net_hdr *); 124 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int); 125 static int vtnet_rxeof(struct vtnet_softc *, int, int *); 126 static void vtnet_rx_intr_task(void *, int); 127 static int vtnet_rx_vq_intr(void *); 128 129 static void vtnet_txeof(struct vtnet_softc *); 130 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *, 131 struct virtio_net_hdr *); 132 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **, 133 struct vtnet_tx_header *); 134 static int vtnet_encap(struct vtnet_softc *, struct mbuf **); 135 static void vtnet_start_locked(struct ifnet *); 136 static void vtnet_start(struct ifnet *); 137 static void vtnet_tick(void *); 138 static void vtnet_tx_intr_task(void *, int); 139 static int vtnet_tx_vq_intr(void *); 140 141 static void vtnet_stop(struct vtnet_softc *); 142 static int vtnet_reinit(struct vtnet_softc *); 143 static void vtnet_init_locked(struct vtnet_softc *); 144 static void vtnet_init(void *); 145 146 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, 147 struct sglist *, int, int); 148 149 static void vtnet_rx_filter(struct vtnet_softc *sc); 150 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); 151 static int vtnet_set_promisc(struct vtnet_softc *, int); 152 static int vtnet_set_allmulti(struct vtnet_softc *, int); 153 static void vtnet_rx_filter_mac(struct vtnet_softc *); 154 155 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); 156 static void vtnet_rx_filter_vlan(struct vtnet_softc *); 157 static void vtnet_set_vlan_filter(struct vtnet_softc *, int, uint16_t); 158 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); 159 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); 160 161 static int vtnet_ifmedia_upd(struct ifnet *); 162 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); 163 164 static void vtnet_add_statistics(struct vtnet_softc *); 165 166 static int vtnet_enable_rx_intr(struct vtnet_softc *); 167 static int vtnet_enable_tx_intr(struct vtnet_softc *); 168 static void vtnet_disable_rx_intr(struct vtnet_softc *); 169 static void vtnet_disable_tx_intr(struct vtnet_softc *); 170 171 /* Tunables. */ 172 static int vtnet_csum_disable = 0; 173 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); 174 static int vtnet_tso_disable = 0; 175 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); 176 static int vtnet_lro_disable = 0; 177 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); 178 179 /* 180 * Reducing the number of transmit completed interrupts can 181 * improve performance. To do so, the define below keeps the 182 * Tx vq interrupt disabled and adds calls to vtnet_txeof() 183 * in the start and watchdog paths. The price to pay for this 184 * is the m_free'ing of transmitted mbufs may be delayed until 185 * the watchdog fires. 186 */ 187 #define VTNET_TX_INTR_MODERATION 188 189 static uma_zone_t vtnet_tx_header_zone; 190 191 static struct virtio_feature_desc vtnet_feature_desc[] = { 192 { VIRTIO_NET_F_CSUM, "TxChecksum" }, 193 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, 194 { VIRTIO_NET_F_MAC, "MacAddress" }, 195 { VIRTIO_NET_F_GSO, "TxAllGSO" }, 196 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, 197 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, 198 { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, 199 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, 200 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, 201 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, 202 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, 203 { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, 204 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, 205 { VIRTIO_NET_F_STATUS, "Status" }, 206 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, 207 { VIRTIO_NET_F_CTRL_RX, "RxMode" }, 208 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, 209 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, 210 211 { 0, NULL } 212 }; 213 214 static device_method_t vtnet_methods[] = { 215 /* Device methods. */ 216 DEVMETHOD(device_probe, vtnet_probe), 217 DEVMETHOD(device_attach, vtnet_attach), 218 DEVMETHOD(device_detach, vtnet_detach), 219 DEVMETHOD(device_suspend, vtnet_suspend), 220 DEVMETHOD(device_resume, vtnet_resume), 221 DEVMETHOD(device_shutdown, vtnet_shutdown), 222 223 /* VirtIO methods. */ 224 DEVMETHOD(virtio_config_change, vtnet_config_change), 225 226 DEVMETHOD_END 227 }; 228 229 static driver_t vtnet_driver = { 230 "vtnet", 231 vtnet_methods, 232 sizeof(struct vtnet_softc) 233 }; 234 static devclass_t vtnet_devclass; 235 236 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, 237 vtnet_modevent, 0); 238 MODULE_VERSION(vtnet, 1); 239 MODULE_DEPEND(vtnet, virtio, 1, 1, 1); 240 241 static int 242 vtnet_modevent(module_t mod, int type, void *unused) 243 { 244 int error; 245 246 error = 0; 247 248 switch (type) { 249 case MOD_LOAD: 250 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr", 251 sizeof(struct vtnet_tx_header), 252 NULL, NULL, NULL, NULL, 0, 0); 253 break; 254 case MOD_QUIESCE: 255 case MOD_UNLOAD: 256 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0) 257 error = EBUSY; 258 else if (type == MOD_UNLOAD) { 259 uma_zdestroy(vtnet_tx_header_zone); 260 vtnet_tx_header_zone = NULL; 261 } 262 break; 263 case MOD_SHUTDOWN: 264 break; 265 default: 266 error = EOPNOTSUPP; 267 break; 268 } 269 270 return (error); 271 } 272 273 static int 274 vtnet_probe(device_t dev) 275 { 276 277 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) 278 return (ENXIO); 279 280 device_set_desc(dev, "VirtIO Networking Adapter"); 281 282 return (BUS_PROBE_DEFAULT); 283 } 284 285 static int 286 vtnet_attach(device_t dev) 287 { 288 struct vtnet_softc *sc; 289 struct ifnet *ifp; 290 int tx_size, error; 291 292 sc = device_get_softc(dev); 293 sc->vtnet_dev = dev; 294 295 VTNET_LOCK_INIT(sc); 296 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_MTX(sc), 0); 297 298 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, 299 vtnet_ifmedia_sts); 300 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); 301 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); 302 303 vtnet_add_statistics(sc); 304 305 virtio_set_feature_desc(dev, vtnet_feature_desc); 306 vtnet_negotiate_features(sc); 307 308 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { 309 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; 310 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 311 } else 312 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 313 314 sc->vtnet_rx_mbuf_size = MCLBYTES; 315 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 316 317 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { 318 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; 319 320 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) { 321 sc->vtnet_mac_filter = malloc( 322 sizeof(struct vtnet_mac_filter), M_DEVBUF, 323 M_NOWAIT | M_ZERO); 324 if (sc->vtnet_mac_filter == NULL) { 325 device_printf(dev, 326 "cannot allocate mac filter table\n"); 327 error = ENOMEM; 328 goto fail; 329 } 330 331 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; 332 } 333 334 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) 335 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; 336 } 337 338 vtnet_get_hwaddr(sc); 339 340 error = vtnet_alloc_virtqueues(sc); 341 if (error) { 342 device_printf(dev, "cannot allocate virtqueues\n"); 343 goto fail; 344 } 345 346 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); 347 if (ifp == NULL) { 348 device_printf(dev, "cannot allocate ifnet structure\n"); 349 error = ENOSPC; 350 goto fail; 351 } 352 353 ifp->if_softc = sc; 354 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 356 ifp->if_init = vtnet_init; 357 ifp->if_start = vtnet_start; 358 ifp->if_ioctl = vtnet_ioctl; 359 360 sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq); 361 sc->vtnet_rx_process_limit = sc->vtnet_rx_size; 362 363 tx_size = virtqueue_size(sc->vtnet_tx_vq); 364 sc->vtnet_tx_size = tx_size; 365 IFQ_SET_MAXLEN(&ifp->if_snd, tx_size - 1); 366 ifp->if_snd.ifq_drv_maxlen = tx_size - 1; 367 IFQ_SET_READY(&ifp->if_snd); 368 369 ether_ifattach(ifp, sc->vtnet_hwaddr); 370 371 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) 372 ifp->if_capabilities |= IFCAP_LINKSTATE; 373 374 /* Tell the upper layer(s) we support long frames. */ 375 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 376 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 377 378 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { 379 ifp->if_capabilities |= IFCAP_TXCSUM; 380 381 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) 382 ifp->if_capabilities |= IFCAP_TSO4; 383 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 384 ifp->if_capabilities |= IFCAP_TSO6; 385 if (ifp->if_capabilities & IFCAP_TSO) 386 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 387 388 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) 389 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 390 } 391 392 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { 393 ifp->if_capabilities |= IFCAP_RXCSUM; 394 395 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || 396 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) 397 ifp->if_capabilities |= IFCAP_LRO; 398 } 399 400 if (ifp->if_capabilities & IFCAP_HWCSUM) { 401 /* 402 * VirtIO does not support VLAN tagging, but we can fake 403 * it by inserting and removing the 802.1Q header during 404 * transmit and receive. We are then able to do checksum 405 * offloading of VLAN frames. 406 */ 407 ifp->if_capabilities |= 408 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 409 } 410 411 ifp->if_capenable = ifp->if_capabilities; 412 413 /* 414 * Capabilities after here are not enabled by default. 415 */ 416 417 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 418 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 419 420 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 421 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 422 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 423 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 424 } 425 426 #ifdef DEVICE_POLLING 427 ifp->if_capabilities |= IFCAP_POLLING; 428 #endif 429 430 TASK_INIT(&sc->vtnet_rx_intr_task, 0, vtnet_rx_intr_task, sc); 431 TASK_INIT(&sc->vtnet_tx_intr_task, 0, vtnet_tx_intr_task, sc); 432 TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc); 433 434 sc->vtnet_tq = taskqueue_create_fast("vtnet_taskq", M_NOWAIT, 435 taskqueue_thread_enqueue, &sc->vtnet_tq); 436 if (sc->vtnet_tq == NULL) { 437 error = ENOMEM; 438 device_printf(dev, "cannot allocate taskqueue\n"); 439 ether_ifdetach(ifp); 440 goto fail; 441 } 442 taskqueue_start_threads(&sc->vtnet_tq, 1, PI_NET, "%s taskq", 443 device_get_nameunit(dev)); 444 445 error = virtio_setup_intr(dev, INTR_TYPE_NET); 446 if (error) { 447 device_printf(dev, "cannot setup virtqueue interrupts\n"); 448 taskqueue_free(sc->vtnet_tq); 449 sc->vtnet_tq = NULL; 450 ether_ifdetach(ifp); 451 goto fail; 452 } 453 454 /* 455 * Device defaults to promiscuous mode for backwards 456 * compatibility. Turn it off if possible. 457 */ 458 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 459 VTNET_LOCK(sc); 460 if (vtnet_set_promisc(sc, 0) != 0) { 461 ifp->if_flags |= IFF_PROMISC; 462 device_printf(dev, 463 "cannot disable promiscuous mode\n"); 464 } 465 VTNET_UNLOCK(sc); 466 } else 467 ifp->if_flags |= IFF_PROMISC; 468 469 fail: 470 if (error) 471 vtnet_detach(dev); 472 473 return (error); 474 } 475 476 static int 477 vtnet_detach(device_t dev) 478 { 479 struct vtnet_softc *sc; 480 struct ifnet *ifp; 481 482 sc = device_get_softc(dev); 483 ifp = sc->vtnet_ifp; 484 485 KASSERT(mtx_initialized(VTNET_MTX(sc)), 486 ("vtnet mutex not initialized")); 487 488 #ifdef DEVICE_POLLING 489 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 490 ether_poll_deregister(ifp); 491 #endif 492 493 if (device_is_attached(dev)) { 494 VTNET_LOCK(sc); 495 vtnet_stop(sc); 496 VTNET_UNLOCK(sc); 497 498 callout_drain(&sc->vtnet_tick_ch); 499 taskqueue_drain(taskqueue_fast, &sc->vtnet_cfgchg_task); 500 501 ether_ifdetach(ifp); 502 } 503 504 if (sc->vtnet_tq != NULL) { 505 taskqueue_drain(sc->vtnet_tq, &sc->vtnet_rx_intr_task); 506 taskqueue_drain(sc->vtnet_tq, &sc->vtnet_tx_intr_task); 507 taskqueue_free(sc->vtnet_tq); 508 sc->vtnet_tq = NULL; 509 } 510 511 if (sc->vtnet_vlan_attach != NULL) { 512 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); 513 sc->vtnet_vlan_attach = NULL; 514 } 515 if (sc->vtnet_vlan_detach != NULL) { 516 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach); 517 sc->vtnet_vlan_detach = NULL; 518 } 519 520 if (sc->vtnet_mac_filter != NULL) { 521 free(sc->vtnet_mac_filter, M_DEVBUF); 522 sc->vtnet_mac_filter = NULL; 523 } 524 525 if (ifp != NULL) { 526 if_free(ifp); 527 sc->vtnet_ifp = NULL; 528 } 529 530 if (sc->vtnet_rx_vq != NULL) 531 vtnet_free_rx_mbufs(sc); 532 if (sc->vtnet_tx_vq != NULL) 533 vtnet_free_tx_mbufs(sc); 534 if (sc->vtnet_ctrl_vq != NULL) 535 vtnet_free_ctrl_vq(sc); 536 537 ifmedia_removeall(&sc->vtnet_media); 538 VTNET_LOCK_DESTROY(sc); 539 540 return (0); 541 } 542 543 static int 544 vtnet_suspend(device_t dev) 545 { 546 struct vtnet_softc *sc; 547 548 sc = device_get_softc(dev); 549 550 VTNET_LOCK(sc); 551 vtnet_stop(sc); 552 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; 553 VTNET_UNLOCK(sc); 554 555 return (0); 556 } 557 558 static int 559 vtnet_resume(device_t dev) 560 { 561 struct vtnet_softc *sc; 562 struct ifnet *ifp; 563 564 sc = device_get_softc(dev); 565 ifp = sc->vtnet_ifp; 566 567 VTNET_LOCK(sc); 568 if (ifp->if_flags & IFF_UP) 569 vtnet_init_locked(sc); 570 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; 571 VTNET_UNLOCK(sc); 572 573 return (0); 574 } 575 576 static int 577 vtnet_shutdown(device_t dev) 578 { 579 580 /* 581 * Suspend already does all of what we need to 582 * do here; we just never expect to be resumed. 583 */ 584 return (vtnet_suspend(dev)); 585 } 586 587 static int 588 vtnet_config_change(device_t dev) 589 { 590 struct vtnet_softc *sc; 591 592 sc = device_get_softc(dev); 593 594 taskqueue_enqueue_fast(taskqueue_fast, &sc->vtnet_cfgchg_task); 595 596 return (1); 597 } 598 599 static void 600 vtnet_negotiate_features(struct vtnet_softc *sc) 601 { 602 device_t dev; 603 uint64_t mask, features; 604 605 dev = sc->vtnet_dev; 606 mask = 0; 607 608 if (vtnet_csum_disable) 609 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; 610 611 /* 612 * TSO and LRO are only available when their corresponding 613 * checksum offload feature is also negotiated. 614 */ 615 616 if (vtnet_csum_disable || vtnet_tso_disable) 617 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | 618 VIRTIO_NET_F_HOST_ECN; 619 620 if (vtnet_csum_disable || vtnet_lro_disable) 621 mask |= VTNET_LRO_FEATURES; 622 623 features = VTNET_FEATURES & ~mask; 624 #ifdef VTNET_TX_INTR_MODERATION 625 features |= VIRTIO_F_NOTIFY_ON_EMPTY; 626 #endif 627 sc->vtnet_features = virtio_negotiate_features(dev, features); 628 629 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0 && 630 virtio_with_feature(dev, VTNET_LRO_FEATURES)) { 631 /* 632 * LRO without mergeable buffers requires special care. This 633 * is not ideal because every receive buffer must be large 634 * enough to hold the maximum TCP packet, the Ethernet header, 635 * and the vtnet_rx_header. This requires up to 34 descriptors 636 * when using MCLBYTES clusters. If we do not have indirect 637 * descriptors, LRO is disabled since the virtqueue will not 638 * be able to contain very many receive buffers. 639 */ 640 if (virtio_with_feature(dev, 641 VIRTIO_RING_F_INDIRECT_DESC) == 0) { 642 device_printf(dev, 643 "LRO disabled due to lack of both mergeable " 644 "buffers and indirect descriptors\n"); 645 646 sc->vtnet_features = virtio_negotiate_features(dev, 647 features & ~VTNET_LRO_FEATURES); 648 } else 649 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; 650 } 651 } 652 653 static int 654 vtnet_alloc_virtqueues(struct vtnet_softc *sc) 655 { 656 device_t dev; 657 struct vq_alloc_info vq_info[3]; 658 int nvqs, rxsegs; 659 660 dev = sc->vtnet_dev; 661 nvqs = 2; 662 663 /* 664 * Indirect descriptors are not needed for the Rx 665 * virtqueue when mergeable buffers are negotiated. 666 * The header is placed inline with the data, not 667 * in a separate descriptor, and mbuf clusters are 668 * always physically contiguous. 669 */ 670 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 671 rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ? 672 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS; 673 } else 674 rxsegs = 0; 675 676 VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs, 677 vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq, 678 "%s receive", device_get_nameunit(dev)); 679 680 VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS, 681 vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq, 682 "%s transmit", device_get_nameunit(dev)); 683 684 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 685 nvqs++; 686 687 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL, 688 &sc->vtnet_ctrl_vq, "%s control", 689 device_get_nameunit(dev)); 690 } 691 692 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); 693 } 694 695 static void 696 vtnet_get_hwaddr(struct vtnet_softc *sc) 697 { 698 device_t dev; 699 700 dev = sc->vtnet_dev; 701 702 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { 703 virtio_read_device_config(dev, 704 offsetof(struct virtio_net_config, mac), 705 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 706 } else { 707 /* Generate random locally administered unicast address. */ 708 sc->vtnet_hwaddr[0] = 0xB2; 709 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); 710 711 vtnet_set_hwaddr(sc); 712 } 713 } 714 715 static void 716 vtnet_set_hwaddr(struct vtnet_softc *sc) 717 { 718 device_t dev; 719 720 dev = sc->vtnet_dev; 721 722 virtio_write_device_config(dev, 723 offsetof(struct virtio_net_config, mac), 724 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 725 } 726 727 static int 728 vtnet_is_link_up(struct vtnet_softc *sc) 729 { 730 device_t dev; 731 struct ifnet *ifp; 732 uint16_t status; 733 734 dev = sc->vtnet_dev; 735 ifp = sc->vtnet_ifp; 736 737 VTNET_LOCK_ASSERT(sc); 738 739 if ((ifp->if_capenable & IFCAP_LINKSTATE) == 0) 740 return (1); 741 742 status = virtio_read_dev_config_2(dev, 743 offsetof(struct virtio_net_config, status)); 744 745 return ((status & VIRTIO_NET_S_LINK_UP) != 0); 746 } 747 748 static void 749 vtnet_update_link_status(struct vtnet_softc *sc) 750 { 751 struct ifnet *ifp; 752 int link; 753 754 ifp = sc->vtnet_ifp; 755 756 link = vtnet_is_link_up(sc); 757 758 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) { 759 sc->vtnet_flags |= VTNET_FLAG_LINK; 760 if_link_state_change(ifp, LINK_STATE_UP); 761 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 762 vtnet_start_locked(ifp); 763 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) { 764 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 765 if_link_state_change(ifp, LINK_STATE_DOWN); 766 } 767 } 768 769 static void 770 vtnet_watchdog(struct vtnet_softc *sc) 771 { 772 struct ifnet *ifp; 773 774 ifp = sc->vtnet_ifp; 775 776 #ifdef VTNET_TX_INTR_MODERATION 777 vtnet_txeof(sc); 778 #endif 779 780 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer) 781 return; 782 783 if_printf(ifp, "watchdog timeout -- resetting\n"); 784 #ifdef VTNET_DEBUG 785 virtqueue_dump(sc->vtnet_tx_vq); 786 #endif 787 ifp->if_oerrors++; 788 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 789 vtnet_init_locked(sc); 790 } 791 792 static void 793 vtnet_config_change_task(void *arg, int pending) 794 { 795 struct vtnet_softc *sc; 796 797 sc = arg; 798 799 VTNET_LOCK(sc); 800 vtnet_update_link_status(sc); 801 VTNET_UNLOCK(sc); 802 } 803 804 static int 805 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 806 { 807 struct vtnet_softc *sc; 808 struct ifreq *ifr; 809 int reinit, mask, error; 810 811 sc = ifp->if_softc; 812 ifr = (struct ifreq *) data; 813 reinit = 0; 814 error = 0; 815 816 switch (cmd) { 817 case SIOCSIFMTU: 818 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU) 819 error = EINVAL; 820 else if (ifp->if_mtu != ifr->ifr_mtu) { 821 VTNET_LOCK(sc); 822 error = vtnet_change_mtu(sc, ifr->ifr_mtu); 823 VTNET_UNLOCK(sc); 824 } 825 break; 826 827 case SIOCSIFFLAGS: 828 VTNET_LOCK(sc); 829 if ((ifp->if_flags & IFF_UP) == 0) { 830 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 831 vtnet_stop(sc); 832 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 833 if ((ifp->if_flags ^ sc->vtnet_if_flags) & 834 (IFF_PROMISC | IFF_ALLMULTI)) { 835 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) 836 vtnet_rx_filter(sc); 837 else 838 error = ENOTSUP; 839 } 840 } else 841 vtnet_init_locked(sc); 842 843 if (error == 0) 844 sc->vtnet_if_flags = ifp->if_flags; 845 VTNET_UNLOCK(sc); 846 break; 847 848 case SIOCADDMULTI: 849 case SIOCDELMULTI: 850 VTNET_LOCK(sc); 851 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) && 852 (ifp->if_drv_flags & IFF_DRV_RUNNING)) 853 vtnet_rx_filter_mac(sc); 854 VTNET_UNLOCK(sc); 855 break; 856 857 case SIOCSIFMEDIA: 858 case SIOCGIFMEDIA: 859 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); 860 break; 861 862 case SIOCSIFCAP: 863 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 864 865 #ifdef DEVICE_POLLING 866 if (mask & IFCAP_POLLING) { 867 if (ifr->ifr_reqcap & IFCAP_POLLING) { 868 error = ether_poll_register(vtnet_poll, ifp); 869 if (error) 870 break; 871 872 VTNET_LOCK(sc); 873 vtnet_disable_rx_intr(sc); 874 vtnet_disable_tx_intr(sc); 875 ifp->if_capenable |= IFCAP_POLLING; 876 VTNET_UNLOCK(sc); 877 } else { 878 error = ether_poll_deregister(ifp); 879 880 /* Enable interrupts even in error case. */ 881 VTNET_LOCK(sc); 882 vtnet_enable_tx_intr(sc); 883 vtnet_enable_rx_intr(sc); 884 ifp->if_capenable &= ~IFCAP_POLLING; 885 VTNET_UNLOCK(sc); 886 } 887 } 888 #endif 889 VTNET_LOCK(sc); 890 891 if (mask & IFCAP_TXCSUM) { 892 ifp->if_capenable ^= IFCAP_TXCSUM; 893 if (ifp->if_capenable & IFCAP_TXCSUM) 894 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 895 else 896 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD; 897 } 898 899 if (mask & IFCAP_TSO4) { 900 ifp->if_capenable ^= IFCAP_TSO4; 901 if (ifp->if_capenable & IFCAP_TSO4) 902 ifp->if_hwassist |= CSUM_TSO; 903 else 904 ifp->if_hwassist &= ~CSUM_TSO; 905 } 906 907 if (mask & IFCAP_RXCSUM) { 908 ifp->if_capenable ^= IFCAP_RXCSUM; 909 reinit = 1; 910 } 911 912 if (mask & IFCAP_LRO) { 913 ifp->if_capenable ^= IFCAP_LRO; 914 reinit = 1; 915 } 916 917 if (mask & IFCAP_VLAN_HWFILTER) { 918 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 919 reinit = 1; 920 } 921 922 if (mask & IFCAP_VLAN_HWTSO) 923 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 924 925 if (mask & IFCAP_VLAN_HWTAGGING) 926 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 927 928 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 929 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 930 vtnet_init_locked(sc); 931 } 932 VLAN_CAPABILITIES(ifp); 933 934 VTNET_UNLOCK(sc); 935 break; 936 937 default: 938 error = ether_ioctl(ifp, cmd, data); 939 break; 940 } 941 942 VTNET_LOCK_ASSERT_NOTOWNED(sc); 943 944 return (error); 945 } 946 947 static int 948 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) 949 { 950 struct ifnet *ifp; 951 int new_frame_size, clsize; 952 953 ifp = sc->vtnet_ifp; 954 955 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 956 new_frame_size = sizeof(struct vtnet_rx_header) + 957 sizeof(struct ether_vlan_header) + new_mtu; 958 959 if (new_frame_size > MJUM9BYTES) 960 return (EINVAL); 961 962 if (new_frame_size <= MCLBYTES) 963 clsize = MCLBYTES; 964 else 965 clsize = MJUM9BYTES; 966 } else { 967 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) + 968 sizeof(struct ether_vlan_header) + new_mtu; 969 970 if (new_frame_size <= MCLBYTES) 971 clsize = MCLBYTES; 972 else 973 clsize = MJUMPAGESIZE; 974 } 975 976 sc->vtnet_rx_mbuf_size = clsize; 977 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 978 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS, 979 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count)); 980 981 ifp->if_mtu = new_mtu; 982 983 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 984 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 985 vtnet_init_locked(sc); 986 } 987 988 return (0); 989 } 990 991 static int 992 vtnet_init_rx_vq(struct vtnet_softc *sc) 993 { 994 struct virtqueue *vq; 995 int nbufs, error; 996 997 vq = sc->vtnet_rx_vq; 998 nbufs = 0; 999 error = ENOSPC; 1000 1001 while (!virtqueue_full(vq)) { 1002 if ((error = vtnet_newbuf(sc)) != 0) 1003 break; 1004 nbufs++; 1005 } 1006 1007 if (nbufs > 0) { 1008 virtqueue_notify(vq); 1009 1010 /* 1011 * EMSGSIZE signifies the virtqueue did not have enough 1012 * entries available to hold the last mbuf. This is not 1013 * an error. We should not get ENOSPC since we check if 1014 * the virtqueue is full before attempting to add a 1015 * buffer. 1016 */ 1017 if (error == EMSGSIZE) 1018 error = 0; 1019 } 1020 1021 return (error); 1022 } 1023 1024 static void 1025 vtnet_free_rx_mbufs(struct vtnet_softc *sc) 1026 { 1027 struct virtqueue *vq; 1028 struct mbuf *m; 1029 int last; 1030 1031 vq = sc->vtnet_rx_vq; 1032 last = 0; 1033 1034 while ((m = virtqueue_drain(vq, &last)) != NULL) 1035 m_freem(m); 1036 1037 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq")); 1038 } 1039 1040 static void 1041 vtnet_free_tx_mbufs(struct vtnet_softc *sc) 1042 { 1043 struct virtqueue *vq; 1044 struct vtnet_tx_header *txhdr; 1045 int last; 1046 1047 vq = sc->vtnet_tx_vq; 1048 last = 0; 1049 1050 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { 1051 m_freem(txhdr->vth_mbuf); 1052 uma_zfree(vtnet_tx_header_zone, txhdr); 1053 } 1054 1055 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq")); 1056 } 1057 1058 static void 1059 vtnet_free_ctrl_vq(struct vtnet_softc *sc) 1060 { 1061 1062 /* 1063 * The control virtqueue is only polled, therefore 1064 * it should already be empty. 1065 */ 1066 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), 1067 ("Ctrl Vq not empty")); 1068 } 1069 1070 #ifdef DEVICE_POLLING 1071 static int 1072 vtnet_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1073 { 1074 struct vtnet_softc *sc; 1075 int rx_done; 1076 1077 sc = ifp->if_softc; 1078 rx_done = 0; 1079 1080 VTNET_LOCK(sc); 1081 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1082 if (cmd == POLL_AND_CHECK_STATUS) 1083 vtnet_update_link_status(sc); 1084 1085 if (virtqueue_nused(sc->vtnet_rx_vq) > 0) 1086 vtnet_rxeof(sc, count, &rx_done); 1087 1088 vtnet_txeof(sc); 1089 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1090 vtnet_start_locked(ifp); 1091 } 1092 VTNET_UNLOCK(sc); 1093 1094 return (rx_done); 1095 } 1096 #endif /* DEVICE_POLLING */ 1097 1098 static struct mbuf * 1099 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) 1100 { 1101 struct mbuf *m_head, *m_tail, *m; 1102 int i, clsize; 1103 1104 clsize = sc->vtnet_rx_mbuf_size; 1105 1106 m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize); 1107 if (m_head == NULL) 1108 goto fail; 1109 1110 m_head->m_len = clsize; 1111 m_tail = m_head; 1112 1113 if (nbufs > 1) { 1114 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1115 ("chained Rx mbuf requested without LRO_NOMRG")); 1116 1117 for (i = 1; i < nbufs; i++) { 1118 m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize); 1119 if (m == NULL) 1120 goto fail; 1121 1122 m->m_len = clsize; 1123 m_tail->m_next = m; 1124 m_tail = m; 1125 } 1126 } 1127 1128 if (m_tailp != NULL) 1129 *m_tailp = m_tail; 1130 1131 return (m_head); 1132 1133 fail: 1134 sc->vtnet_stats.mbuf_alloc_failed++; 1135 m_freem(m_head); 1136 1137 return (NULL); 1138 } 1139 1140 static int 1141 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0) 1142 { 1143 struct mbuf *m, *m_prev; 1144 struct mbuf *m_new, *m_tail; 1145 int len, clsize, nreplace, error; 1146 1147 m = m0; 1148 m_prev = NULL; 1149 len = len0; 1150 1151 m_tail = NULL; 1152 clsize = sc->vtnet_rx_mbuf_size; 1153 nreplace = 0; 1154 1155 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || 1156 m->m_next == NULL, ("chained Rx mbuf without LRO_NOMRG")); 1157 1158 /* 1159 * Since LRO_NOMRG mbuf chains are so large, we want to avoid 1160 * allocating an entire chain for each received frame. When 1161 * the received frame's length is less than that of the chain, 1162 * the unused mbufs are reassigned to the new chain. 1163 */ 1164 while (len > 0) { 1165 /* 1166 * Something is seriously wrong if we received 1167 * a frame larger than the mbuf chain. Drop it. 1168 */ 1169 if (m == NULL) { 1170 sc->vtnet_stats.rx_frame_too_large++; 1171 return (EMSGSIZE); 1172 } 1173 1174 KASSERT(m->m_len == clsize, 1175 ("mbuf length not expected cluster size: %d", 1176 m->m_len)); 1177 1178 m->m_len = MIN(m->m_len, len); 1179 len -= m->m_len; 1180 1181 m_prev = m; 1182 m = m->m_next; 1183 nreplace++; 1184 } 1185 1186 KASSERT(m_prev != NULL, ("m_prev == NULL")); 1187 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count, 1188 ("too many replacement mbufs: %d/%d", nreplace, 1189 sc->vtnet_rx_mbuf_count)); 1190 1191 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail); 1192 if (m_new == NULL) { 1193 m_prev->m_len = clsize; 1194 return (ENOBUFS); 1195 } 1196 1197 /* 1198 * Move unused mbufs, if any, from the original chain 1199 * onto the end of the new chain. 1200 */ 1201 if (m_prev->m_next != NULL) { 1202 m_tail->m_next = m_prev->m_next; 1203 m_prev->m_next = NULL; 1204 } 1205 1206 error = vtnet_enqueue_rxbuf(sc, m_new); 1207 if (error) { 1208 /* 1209 * BAD! We could not enqueue the replacement mbuf chain. We 1210 * must restore the m0 chain to the original state if it was 1211 * modified so we can subsequently discard it. 1212 * 1213 * NOTE: The replacement is suppose to be an identical copy 1214 * to the one just dequeued so this is an unexpected error. 1215 */ 1216 sc->vtnet_stats.rx_enq_replacement_failed++; 1217 1218 if (m_tail->m_next != NULL) { 1219 m_prev->m_next = m_tail->m_next; 1220 m_tail->m_next = NULL; 1221 } 1222 1223 m_prev->m_len = clsize; 1224 m_freem(m_new); 1225 } 1226 1227 return (error); 1228 } 1229 1230 static int 1231 vtnet_newbuf(struct vtnet_softc *sc) 1232 { 1233 struct mbuf *m; 1234 int error; 1235 1236 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL); 1237 if (m == NULL) 1238 return (ENOBUFS); 1239 1240 error = vtnet_enqueue_rxbuf(sc, m); 1241 if (error) 1242 m_freem(m); 1243 1244 return (error); 1245 } 1246 1247 static void 1248 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs) 1249 { 1250 struct virtqueue *vq; 1251 struct mbuf *m; 1252 1253 vq = sc->vtnet_rx_vq; 1254 1255 while (--nbufs > 0) { 1256 if ((m = virtqueue_dequeue(vq, NULL)) == NULL) 1257 break; 1258 vtnet_discard_rxbuf(sc, m); 1259 } 1260 } 1261 1262 static void 1263 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1264 { 1265 int error; 1266 1267 /* 1268 * Requeue the discarded mbuf. This should always be 1269 * successful since it was just dequeued. 1270 */ 1271 error = vtnet_enqueue_rxbuf(sc, m); 1272 KASSERT(error == 0, ("cannot requeue discarded mbuf")); 1273 } 1274 1275 static int 1276 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1277 { 1278 struct sglist sg; 1279 struct sglist_seg segs[VTNET_MAX_RX_SEGS]; 1280 struct vtnet_rx_header *rxhdr; 1281 struct virtio_net_hdr *hdr; 1282 uint8_t *mdata; 1283 int offset, error; 1284 1285 VTNET_LOCK_ASSERT(sc); 1286 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || 1287 m->m_next == NULL, ("chained Rx mbuf without LRO_NOMRG")); 1288 1289 sglist_init(&sg, VTNET_MAX_RX_SEGS, segs); 1290 1291 mdata = mtod(m, uint8_t *); 1292 offset = 0; 1293 1294 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1295 rxhdr = (struct vtnet_rx_header *) mdata; 1296 hdr = &rxhdr->vrh_hdr; 1297 offset += sizeof(struct vtnet_rx_header); 1298 1299 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size); 1300 KASSERT(error == 0, ("cannot add header to sglist")); 1301 } 1302 1303 error = sglist_append(&sg, mdata + offset, m->m_len - offset); 1304 if (error) 1305 return (error); 1306 1307 if (m->m_next != NULL) { 1308 error = sglist_append_mbuf(&sg, m->m_next); 1309 if (error) 1310 return (error); 1311 } 1312 1313 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg)); 1314 } 1315 1316 static void 1317 vtnet_vlan_tag_remove(struct mbuf *m) 1318 { 1319 struct ether_vlan_header *evl; 1320 1321 evl = mtod(m, struct ether_vlan_header *); 1322 1323 m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag); 1324 m->m_flags |= M_VLANTAG; 1325 1326 /* Strip the 802.1Q header. */ 1327 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN, 1328 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1329 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1330 } 1331 1332 #ifdef notyet 1333 static int 1334 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, 1335 struct virtio_net_hdr *hdr) 1336 { 1337 struct ether_header *eh; 1338 struct ether_vlan_header *evh; 1339 struct ip *ip; 1340 struct ip6_hdr *ip6; 1341 struct udphdr *udp; 1342 int ip_offset, csum_start, csum_offset, hlen; 1343 uint16_t eth_type; 1344 uint8_t ip_proto; 1345 1346 /* 1347 * Convert the VirtIO checksum interface to FreeBSD's interface. 1348 * The host only provides us with the offset at which to start 1349 * checksumming, and the offset from that to place the completed 1350 * checksum. While this maps well with how Linux does checksums, 1351 * for FreeBSD, we must parse the received packet in order to set 1352 * the appropriate CSUM_* flags. 1353 */ 1354 1355 /* 1356 * Every mbuf added to the receive virtqueue is always at least 1357 * MCLBYTES big, so assume something is amiss if the first mbuf 1358 * does not contain both the Ethernet and protocol headers. 1359 */ 1360 ip_offset = sizeof(struct ether_header); 1361 if (m->m_len < ip_offset) 1362 return (1); 1363 1364 eh = mtod(m, struct ether_header *); 1365 eth_type = ntohs(eh->ether_type); 1366 if (eth_type == ETHERTYPE_VLAN) { 1367 ip_offset = sizeof(struct ether_vlan_header); 1368 if (m->m_len < ip_offset) 1369 return (1); 1370 evh = mtod(m, struct ether_vlan_header *); 1371 eth_type = ntohs(evh->evl_proto); 1372 } 1373 1374 switch (eth_type) { 1375 case ETHERTYPE_IP: 1376 if (m->m_len < ip_offset + sizeof(struct ip)) 1377 return (1); 1378 1379 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); 1380 /* Sanity check the IP header. */ 1381 if (ip->ip_v != IPVERSION) 1382 return (1); 1383 hlen = ip->ip_hl << 2; 1384 if (hlen < sizeof(struct ip)) 1385 return (1); 1386 if (ntohs(ip->ip_len) < hlen) 1387 return (1); 1388 if (ntohs(ip->ip_len) != (m->m_pkthdr.len - ip_offset)) 1389 return (1); 1390 1391 ip_proto = ip->ip_p; 1392 csum_start = ip_offset + hlen; 1393 break; 1394 1395 case ETHERTYPE_IPV6: 1396 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) 1397 return (1); 1398 1399 /* 1400 * XXX FreeBSD does not handle any IPv6 checksum offloading 1401 * at the moment. 1402 */ 1403 1404 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); 1405 /* XXX Assume no extension headers are present. */ 1406 ip_proto = ip6->ip6_nxt; 1407 csum_start = ip_offset + sizeof(struct ip6_hdr); 1408 break; 1409 1410 default: 1411 sc->vtnet_stats.rx_csum_bad_ethtype++; 1412 return (1); 1413 } 1414 1415 /* Assume checksum begins right after the IP header. */ 1416 if (hdr->csum_start != csum_start) { 1417 sc->vtnet_stats.rx_csum_bad_start++; 1418 return (1); 1419 } 1420 1421 switch (ip_proto) { 1422 case IPPROTO_TCP: 1423 csum_offset = offsetof(struct tcphdr, th_sum); 1424 break; 1425 1426 case IPPROTO_UDP: 1427 csum_offset = offsetof(struct udphdr, uh_sum); 1428 break; 1429 1430 case IPPROTO_SCTP: 1431 csum_offset = offsetof(struct sctphdr, checksum); 1432 break; 1433 1434 default: 1435 sc->vtnet_stats.rx_csum_bad_ipproto++; 1436 return (1); 1437 } 1438 1439 if (hdr->csum_offset != csum_offset) { 1440 sc->vtnet_stats.rx_csum_bad_offset++; 1441 return (1); 1442 } 1443 1444 /* 1445 * The IP header checksum is almost certainly valid but I'm 1446 * uncertain if that is guaranteed. 1447 * 1448 * m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 1449 */ 1450 1451 switch (ip_proto) { 1452 case IPPROTO_UDP: 1453 if (m->m_len < csum_start + sizeof(struct udphdr)) 1454 return (1); 1455 1456 udp = (struct udphdr *)(mtod(m, uint8_t *) + csum_start); 1457 if (udp->uh_sum == 0) 1458 return (0); 1459 1460 /* FALLTHROUGH */ 1461 1462 case IPPROTO_TCP: 1463 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1464 m->m_pkthdr.csum_data = 0xFFFF; 1465 break; 1466 1467 case IPPROTO_SCTP: 1468 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1469 break; 1470 } 1471 1472 sc->vtnet_stats.rx_csum_offloaded++; 1473 1474 return (0); 1475 } 1476 #endif 1477 1478 /* 1479 * Alternative method of doing receive checksum offloading. Rather 1480 * than parsing the received frame down to the IP header, use the 1481 * csum_offset to determine which CSUM_* flags are appropriate. We 1482 * can get by with doing this only because the checksum offsets are 1483 * unique for the things we care about. 1484 */ 1485 static int 1486 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, 1487 struct virtio_net_hdr *hdr) 1488 { 1489 struct ether_header *eh; 1490 struct ether_vlan_header *evh; 1491 struct udphdr *udp; 1492 int csum_len; 1493 uint16_t eth_type; 1494 1495 csum_len = hdr->csum_start + hdr->csum_offset; 1496 1497 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip)) 1498 return (1); 1499 if (m->m_len < csum_len) 1500 return (1); 1501 1502 eh = mtod(m, struct ether_header *); 1503 eth_type = ntohs(eh->ether_type); 1504 if (eth_type == ETHERTYPE_VLAN) { 1505 evh = mtod(m, struct ether_vlan_header *); 1506 eth_type = ntohs(evh->evl_proto); 1507 } 1508 1509 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) { 1510 sc->vtnet_stats.rx_csum_bad_ethtype++; 1511 return (1); 1512 } 1513 1514 /* Use the offset to determine the appropriate CSUM_* flags. */ 1515 switch (hdr->csum_offset) { 1516 case offsetof(struct udphdr, uh_sum): 1517 if (m->m_len < hdr->csum_start + sizeof(struct udphdr)) 1518 return (1); 1519 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start); 1520 if (udp->uh_sum == 0) 1521 return (0); 1522 1523 /* FALLTHROUGH */ 1524 1525 case offsetof(struct tcphdr, th_sum): 1526 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1527 m->m_pkthdr.csum_data = 0xFFFF; 1528 break; 1529 1530 case offsetof(struct sctphdr, checksum): 1531 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1532 break; 1533 1534 default: 1535 sc->vtnet_stats.rx_csum_bad_offset++; 1536 return (1); 1537 } 1538 1539 sc->vtnet_stats.rx_csum_offloaded++; 1540 1541 return (0); 1542 } 1543 1544 static int 1545 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs) 1546 { 1547 struct ifnet *ifp; 1548 struct virtqueue *vq; 1549 struct mbuf *m, *m_tail; 1550 int len; 1551 1552 ifp = sc->vtnet_ifp; 1553 vq = sc->vtnet_rx_vq; 1554 m_tail = m_head; 1555 1556 while (--nbufs > 0) { 1557 m = virtqueue_dequeue(vq, &len); 1558 if (m == NULL) { 1559 ifp->if_ierrors++; 1560 goto fail; 1561 } 1562 1563 if (vtnet_newbuf(sc) != 0) { 1564 ifp->if_iqdrops++; 1565 vtnet_discard_rxbuf(sc, m); 1566 if (nbufs > 1) 1567 vtnet_discard_merged_rxbuf(sc, nbufs); 1568 goto fail; 1569 } 1570 1571 if (m->m_len < len) 1572 len = m->m_len; 1573 1574 m->m_len = len; 1575 m->m_flags &= ~M_PKTHDR; 1576 1577 m_head->m_pkthdr.len += len; 1578 m_tail->m_next = m; 1579 m_tail = m; 1580 } 1581 1582 return (0); 1583 1584 fail: 1585 sc->vtnet_stats.rx_mergeable_failed++; 1586 m_freem(m_head); 1587 1588 return (1); 1589 } 1590 1591 static int 1592 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp) 1593 { 1594 struct virtio_net_hdr lhdr; 1595 struct ifnet *ifp; 1596 struct virtqueue *vq; 1597 struct mbuf *m; 1598 struct ether_header *eh; 1599 struct virtio_net_hdr *hdr; 1600 struct virtio_net_hdr_mrg_rxbuf *mhdr; 1601 int len, deq, nbufs, adjsz, rx_npkts; 1602 1603 ifp = sc->vtnet_ifp; 1604 vq = sc->vtnet_rx_vq; 1605 hdr = &lhdr; 1606 deq = 0; 1607 rx_npkts = 0; 1608 1609 VTNET_LOCK_ASSERT(sc); 1610 1611 while (--count >= 0) { 1612 m = virtqueue_dequeue(vq, &len); 1613 if (m == NULL) 1614 break; 1615 deq++; 1616 1617 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { 1618 ifp->if_ierrors++; 1619 vtnet_discard_rxbuf(sc, m); 1620 continue; 1621 } 1622 1623 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1624 nbufs = 1; 1625 adjsz = sizeof(struct vtnet_rx_header); 1626 /* 1627 * Account for our pad between the header and 1628 * the actual start of the frame. 1629 */ 1630 len += VTNET_RX_HEADER_PAD; 1631 } else { 1632 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); 1633 nbufs = mhdr->num_buffers; 1634 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1635 } 1636 1637 if (vtnet_replace_rxbuf(sc, m, len) != 0) { 1638 ifp->if_iqdrops++; 1639 vtnet_discard_rxbuf(sc, m); 1640 if (nbufs > 1) 1641 vtnet_discard_merged_rxbuf(sc, nbufs); 1642 continue; 1643 } 1644 1645 m->m_pkthdr.len = len; 1646 m->m_pkthdr.rcvif = ifp; 1647 m->m_pkthdr.csum_flags = 0; 1648 1649 if (nbufs > 1) { 1650 if (vtnet_rxeof_merged(sc, m, nbufs) != 0) 1651 continue; 1652 } 1653 1654 ifp->if_ipackets++; 1655 1656 /* 1657 * Save copy of header before we strip it. For both mergeable 1658 * and non-mergeable, the VirtIO header is placed first in the 1659 * mbuf's data. We no longer need num_buffers, so always use a 1660 * virtio_net_hdr. 1661 */ 1662 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); 1663 m_adj(m, adjsz); 1664 1665 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1666 eh = mtod(m, struct ether_header *); 1667 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1668 vtnet_vlan_tag_remove(m); 1669 1670 /* 1671 * With the 802.1Q header removed, update the 1672 * checksum starting location accordingly. 1673 */ 1674 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1675 hdr->csum_start -= 1676 ETHER_VLAN_ENCAP_LEN; 1677 } 1678 } 1679 1680 if (ifp->if_capenable & IFCAP_RXCSUM && 1681 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1682 if (vtnet_rx_csum(sc, m, hdr) != 0) 1683 sc->vtnet_stats.rx_csum_failed++; 1684 } 1685 1686 VTNET_UNLOCK(sc); 1687 rx_npkts++; 1688 (*ifp->if_input)(ifp, m); 1689 VTNET_LOCK(sc); 1690 1691 /* 1692 * The interface may have been stopped while we were 1693 * passing the packet up the network stack. 1694 */ 1695 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1696 break; 1697 } 1698 1699 if (deq > 0) 1700 virtqueue_notify(vq); 1701 1702 if (rx_npktsp != NULL) 1703 *rx_npktsp = rx_npkts; 1704 1705 return (count > 0 ? 0 : EAGAIN); 1706 } 1707 1708 static void 1709 vtnet_rx_intr_task(void *arg, int pending) 1710 { 1711 struct vtnet_softc *sc; 1712 struct ifnet *ifp; 1713 int more; 1714 1715 sc = arg; 1716 ifp = sc->vtnet_ifp; 1717 1718 VTNET_LOCK(sc); 1719 1720 #ifdef DEVICE_POLLING 1721 if (ifp->if_capenable & IFCAP_POLLING) { 1722 VTNET_UNLOCK(sc); 1723 return; 1724 } 1725 #endif 1726 1727 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1728 vtnet_enable_rx_intr(sc); 1729 VTNET_UNLOCK(sc); 1730 return; 1731 } 1732 1733 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL); 1734 if (!more && vtnet_enable_rx_intr(sc) != 0) { 1735 vtnet_disable_rx_intr(sc); 1736 more = 1; 1737 } 1738 1739 VTNET_UNLOCK(sc); 1740 1741 if (more) { 1742 sc->vtnet_stats.rx_task_rescheduled++; 1743 taskqueue_enqueue_fast(sc->vtnet_tq, 1744 &sc->vtnet_rx_intr_task); 1745 } 1746 } 1747 1748 static int 1749 vtnet_rx_vq_intr(void *xsc) 1750 { 1751 struct vtnet_softc *sc; 1752 1753 sc = xsc; 1754 1755 vtnet_disable_rx_intr(sc); 1756 taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_rx_intr_task); 1757 1758 return (1); 1759 } 1760 1761 static void 1762 vtnet_txeof(struct vtnet_softc *sc) 1763 { 1764 struct virtqueue *vq; 1765 struct ifnet *ifp; 1766 struct vtnet_tx_header *txhdr; 1767 int deq; 1768 1769 vq = sc->vtnet_tx_vq; 1770 ifp = sc->vtnet_ifp; 1771 deq = 0; 1772 1773 VTNET_LOCK_ASSERT(sc); 1774 1775 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { 1776 deq++; 1777 ifp->if_opackets++; 1778 m_freem(txhdr->vth_mbuf); 1779 uma_zfree(vtnet_tx_header_zone, txhdr); 1780 } 1781 1782 if (deq > 0) { 1783 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1784 if (virtqueue_empty(vq)) 1785 sc->vtnet_watchdog_timer = 0; 1786 } 1787 } 1788 1789 static struct mbuf * 1790 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m, 1791 struct virtio_net_hdr *hdr) 1792 { 1793 struct ifnet *ifp; 1794 struct ether_header *eh; 1795 struct ether_vlan_header *evh; 1796 struct ip *ip; 1797 struct ip6_hdr *ip6; 1798 struct tcphdr *tcp; 1799 int ip_offset; 1800 uint16_t eth_type, csum_start; 1801 uint8_t ip_proto, gso_type; 1802 1803 ifp = sc->vtnet_ifp; 1804 M_ASSERTPKTHDR(m); 1805 1806 ip_offset = sizeof(struct ether_header); 1807 if (m->m_len < ip_offset) { 1808 if ((m = m_pullup(m, ip_offset)) == NULL) 1809 return (NULL); 1810 } 1811 1812 eh = mtod(m, struct ether_header *); 1813 eth_type = ntohs(eh->ether_type); 1814 if (eth_type == ETHERTYPE_VLAN) { 1815 ip_offset = sizeof(struct ether_vlan_header); 1816 if (m->m_len < ip_offset) { 1817 if ((m = m_pullup(m, ip_offset)) == NULL) 1818 return (NULL); 1819 } 1820 evh = mtod(m, struct ether_vlan_header *); 1821 eth_type = ntohs(evh->evl_proto); 1822 } 1823 1824 switch (eth_type) { 1825 case ETHERTYPE_IP: 1826 if (m->m_len < ip_offset + sizeof(struct ip)) { 1827 m = m_pullup(m, ip_offset + sizeof(struct ip)); 1828 if (m == NULL) 1829 return (NULL); 1830 } 1831 1832 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); 1833 ip_proto = ip->ip_p; 1834 csum_start = ip_offset + (ip->ip_hl << 2); 1835 gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1836 break; 1837 1838 case ETHERTYPE_IPV6: 1839 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) { 1840 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr)); 1841 if (m == NULL) 1842 return (NULL); 1843 } 1844 1845 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); 1846 /* 1847 * XXX Assume no extension headers are present. Presently, 1848 * this will always be true in the case of TSO, and FreeBSD 1849 * does not perform checksum offloading of IPv6 yet. 1850 */ 1851 ip_proto = ip6->ip6_nxt; 1852 csum_start = ip_offset + sizeof(struct ip6_hdr); 1853 gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1854 break; 1855 1856 default: 1857 return (m); 1858 } 1859 1860 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) { 1861 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1862 hdr->csum_start = csum_start; 1863 hdr->csum_offset = m->m_pkthdr.csum_data; 1864 1865 sc->vtnet_stats.tx_csum_offloaded++; 1866 } 1867 1868 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1869 if (ip_proto != IPPROTO_TCP) 1870 return (m); 1871 1872 if (m->m_len < csum_start + sizeof(struct tcphdr)) { 1873 m = m_pullup(m, csum_start + sizeof(struct tcphdr)); 1874 if (m == NULL) 1875 return (NULL); 1876 } 1877 1878 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start); 1879 hdr->gso_type = gso_type; 1880 hdr->hdr_len = csum_start + (tcp->th_off << 2); 1881 hdr->gso_size = m->m_pkthdr.tso_segsz; 1882 1883 if (tcp->th_flags & TH_CWR) { 1884 /* 1885 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN. 1886 * ECN support is only configurable globally with the 1887 * net.inet.tcp.ecn.enable sysctl knob. 1888 */ 1889 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { 1890 if_printf(ifp, "TSO with ECN not supported " 1891 "by host\n"); 1892 m_freem(m); 1893 return (NULL); 1894 } 1895 1896 hdr->flags |= VIRTIO_NET_HDR_GSO_ECN; 1897 } 1898 1899 sc->vtnet_stats.tx_tso_offloaded++; 1900 } 1901 1902 return (m); 1903 } 1904 1905 static int 1906 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head, 1907 struct vtnet_tx_header *txhdr) 1908 { 1909 struct sglist sg; 1910 struct sglist_seg segs[VTNET_MAX_TX_SEGS]; 1911 struct virtqueue *vq; 1912 struct mbuf *m; 1913 int collapsed, error; 1914 1915 vq = sc->vtnet_tx_vq; 1916 m = *m_head; 1917 collapsed = 0; 1918 1919 sglist_init(&sg, VTNET_MAX_TX_SEGS, segs); 1920 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); 1921 KASSERT(error == 0 && sg.sg_nseg == 1, 1922 ("cannot add header to sglist")); 1923 1924 again: 1925 error = sglist_append_mbuf(&sg, m); 1926 if (error) { 1927 if (collapsed) 1928 goto fail; 1929 1930 m = m_collapse(m, M_DONTWAIT, VTNET_MAX_TX_SEGS - 1); 1931 if (m == NULL) 1932 goto fail; 1933 1934 *m_head = m; 1935 collapsed = 1; 1936 goto again; 1937 } 1938 1939 txhdr->vth_mbuf = m; 1940 1941 return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0)); 1942 1943 fail: 1944 m_freem(*m_head); 1945 *m_head = NULL; 1946 1947 return (ENOBUFS); 1948 } 1949 1950 static int 1951 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head) 1952 { 1953 struct vtnet_tx_header *txhdr; 1954 struct virtio_net_hdr *hdr; 1955 struct mbuf *m; 1956 int error; 1957 1958 m = *m_head; 1959 1960 txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO); 1961 if (txhdr == NULL) { 1962 *m_head = NULL; 1963 m_freem(m); 1964 return (ENOMEM); 1965 } 1966 1967 /* 1968 * Always use the non-mergeable header to simplify things. When 1969 * the mergeable feature is negotiated, the num_buffers field 1970 * must be set to zero. We use vtnet_hdr_size later to enqueue 1971 * the correct header size to the host. 1972 */ 1973 hdr = &txhdr->vth_uhdr.hdr; 1974 1975 if (m->m_flags & M_VLANTAG) { 1976 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1977 if ((*m_head = m) == NULL) { 1978 error = ENOBUFS; 1979 goto fail; 1980 } 1981 m->m_flags &= ~M_VLANTAG; 1982 } 1983 1984 if (m->m_pkthdr.csum_flags != 0) { 1985 m = vtnet_tx_offload(sc, m, hdr); 1986 if ((*m_head = m) == NULL) { 1987 error = ENOBUFS; 1988 goto fail; 1989 } 1990 } 1991 1992 error = vtnet_enqueue_txbuf(sc, m_head, txhdr); 1993 fail: 1994 if (error) 1995 uma_zfree(vtnet_tx_header_zone, txhdr); 1996 1997 return (error); 1998 } 1999 2000 static void 2001 vtnet_start(struct ifnet *ifp) 2002 { 2003 struct vtnet_softc *sc; 2004 2005 sc = ifp->if_softc; 2006 2007 VTNET_LOCK(sc); 2008 vtnet_start_locked(ifp); 2009 VTNET_UNLOCK(sc); 2010 } 2011 2012 static void 2013 vtnet_start_locked(struct ifnet *ifp) 2014 { 2015 struct vtnet_softc *sc; 2016 struct virtqueue *vq; 2017 struct mbuf *m0; 2018 int enq; 2019 2020 sc = ifp->if_softc; 2021 vq = sc->vtnet_tx_vq; 2022 enq = 0; 2023 2024 VTNET_LOCK_ASSERT(sc); 2025 2026 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2027 IFF_DRV_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) 2028 return; 2029 2030 #ifdef VTNET_TX_INTR_MODERATION 2031 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2) 2032 vtnet_txeof(sc); 2033 #endif 2034 2035 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 2036 if (virtqueue_full(vq)) { 2037 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2038 break; 2039 } 2040 2041 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 2042 if (m0 == NULL) 2043 break; 2044 2045 if (vtnet_encap(sc, &m0) != 0) { 2046 if (m0 == NULL) 2047 break; 2048 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 2049 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2050 break; 2051 } 2052 2053 enq++; 2054 ETHER_BPF_MTAP(ifp, m0); 2055 } 2056 2057 if (enq > 0) { 2058 virtqueue_notify(vq); 2059 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT; 2060 } 2061 } 2062 2063 static void 2064 vtnet_tick(void *xsc) 2065 { 2066 struct vtnet_softc *sc; 2067 2068 sc = xsc; 2069 2070 VTNET_LOCK_ASSERT(sc); 2071 #ifdef VTNET_DEBUG 2072 virtqueue_dump(sc->vtnet_rx_vq); 2073 virtqueue_dump(sc->vtnet_tx_vq); 2074 #endif 2075 2076 vtnet_watchdog(sc); 2077 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2078 } 2079 2080 static void 2081 vtnet_tx_intr_task(void *arg, int pending) 2082 { 2083 struct vtnet_softc *sc; 2084 struct ifnet *ifp; 2085 2086 sc = arg; 2087 ifp = sc->vtnet_ifp; 2088 2089 VTNET_LOCK(sc); 2090 2091 #ifdef DEVICE_POLLING 2092 if (ifp->if_capenable & IFCAP_POLLING) { 2093 VTNET_UNLOCK(sc); 2094 return; 2095 } 2096 #endif 2097 2098 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2099 vtnet_enable_tx_intr(sc); 2100 VTNET_UNLOCK(sc); 2101 return; 2102 } 2103 2104 vtnet_txeof(sc); 2105 2106 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2107 vtnet_start_locked(ifp); 2108 2109 if (vtnet_enable_tx_intr(sc) != 0) { 2110 vtnet_disable_tx_intr(sc); 2111 sc->vtnet_stats.tx_task_rescheduled++; 2112 VTNET_UNLOCK(sc); 2113 taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_tx_intr_task); 2114 return; 2115 } 2116 2117 VTNET_UNLOCK(sc); 2118 } 2119 2120 static int 2121 vtnet_tx_vq_intr(void *xsc) 2122 { 2123 struct vtnet_softc *sc; 2124 2125 sc = xsc; 2126 2127 vtnet_disable_tx_intr(sc); 2128 taskqueue_enqueue_fast(sc->vtnet_tq, &sc->vtnet_tx_intr_task); 2129 2130 return (1); 2131 } 2132 2133 static void 2134 vtnet_stop(struct vtnet_softc *sc) 2135 { 2136 device_t dev; 2137 struct ifnet *ifp; 2138 2139 dev = sc->vtnet_dev; 2140 ifp = sc->vtnet_ifp; 2141 2142 VTNET_LOCK_ASSERT(sc); 2143 2144 sc->vtnet_watchdog_timer = 0; 2145 callout_stop(&sc->vtnet_tick_ch); 2146 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2147 2148 vtnet_disable_rx_intr(sc); 2149 vtnet_disable_tx_intr(sc); 2150 2151 /* 2152 * Stop the host VirtIO adapter. Note this will reset the host 2153 * adapter's state back to the pre-initialized state, so in 2154 * order to make the device usable again, we must drive it 2155 * through virtio_reinit() and virtio_reinit_complete(). 2156 */ 2157 virtio_stop(dev); 2158 2159 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 2160 2161 vtnet_free_rx_mbufs(sc); 2162 vtnet_free_tx_mbufs(sc); 2163 } 2164 2165 static int 2166 vtnet_reinit(struct vtnet_softc *sc) 2167 { 2168 struct ifnet *ifp; 2169 uint64_t features; 2170 2171 ifp = sc->vtnet_ifp; 2172 features = sc->vtnet_features; 2173 2174 /* 2175 * Re-negotiate with the host, removing any disabled receive 2176 * features. Transmit features are disabled only on our side 2177 * via if_capenable and if_hwassist. 2178 */ 2179 2180 if (ifp->if_capabilities & IFCAP_RXCSUM) { 2181 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 2182 features &= ~VIRTIO_NET_F_GUEST_CSUM; 2183 } 2184 2185 if (ifp->if_capabilities & IFCAP_LRO) { 2186 if ((ifp->if_capenable & IFCAP_LRO) == 0) 2187 features &= ~VTNET_LRO_FEATURES; 2188 } 2189 2190 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { 2191 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 2192 features &= ~VIRTIO_NET_F_CTRL_VLAN; 2193 } 2194 2195 return (virtio_reinit(sc->vtnet_dev, features)); 2196 } 2197 2198 static void 2199 vtnet_init_locked(struct vtnet_softc *sc) 2200 { 2201 device_t dev; 2202 struct ifnet *ifp; 2203 int error; 2204 2205 dev = sc->vtnet_dev; 2206 ifp = sc->vtnet_ifp; 2207 2208 VTNET_LOCK_ASSERT(sc); 2209 2210 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2211 return; 2212 2213 /* Stop host's adapter, cancel any pending I/O. */ 2214 vtnet_stop(sc); 2215 2216 /* Reinitialize the host device. */ 2217 error = vtnet_reinit(sc); 2218 if (error) { 2219 device_printf(dev, 2220 "reinitialization failed, stopping device...\n"); 2221 vtnet_stop(sc); 2222 return; 2223 } 2224 2225 /* Update host with assigned MAC address. */ 2226 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); 2227 vtnet_set_hwaddr(sc); 2228 2229 ifp->if_hwassist = 0; 2230 if (ifp->if_capenable & IFCAP_TXCSUM) 2231 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 2232 if (ifp->if_capenable & IFCAP_TSO4) 2233 ifp->if_hwassist |= CSUM_TSO; 2234 2235 error = vtnet_init_rx_vq(sc); 2236 if (error) { 2237 device_printf(dev, 2238 "cannot allocate mbufs for Rx virtqueue\n"); 2239 vtnet_stop(sc); 2240 return; 2241 } 2242 2243 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 2244 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 2245 /* Restore promiscuous and all-multicast modes. */ 2246 vtnet_rx_filter(sc); 2247 2248 /* Restore filtered MAC addresses. */ 2249 vtnet_rx_filter_mac(sc); 2250 } 2251 2252 /* Restore VLAN filters. */ 2253 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2254 vtnet_rx_filter_vlan(sc); 2255 } 2256 2257 #ifdef DEVICE_POLLING 2258 if (ifp->if_capenable & IFCAP_POLLING) { 2259 vtnet_disable_rx_intr(sc); 2260 vtnet_disable_tx_intr(sc); 2261 } else 2262 #endif 2263 { 2264 vtnet_enable_rx_intr(sc); 2265 vtnet_enable_tx_intr(sc); 2266 } 2267 2268 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2269 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2270 2271 virtio_reinit_complete(dev); 2272 2273 vtnet_update_link_status(sc); 2274 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2275 } 2276 2277 static void 2278 vtnet_init(void *xsc) 2279 { 2280 struct vtnet_softc *sc; 2281 2282 sc = xsc; 2283 2284 VTNET_LOCK(sc); 2285 vtnet_init_locked(sc); 2286 VTNET_UNLOCK(sc); 2287 } 2288 2289 static void 2290 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, 2291 struct sglist *sg, int readable, int writable) 2292 { 2293 struct virtqueue *vq; 2294 void *c; 2295 2296 vq = sc->vtnet_ctrl_vq; 2297 2298 VTNET_LOCK_ASSERT(sc); 2299 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, 2300 ("no control virtqueue")); 2301 KASSERT(virtqueue_empty(vq), 2302 ("control command already enqueued")); 2303 2304 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) 2305 return; 2306 2307 virtqueue_notify(vq); 2308 2309 /* 2310 * Poll until the command is complete. Previously, we would 2311 * sleep until the control virtqueue interrupt handler woke 2312 * us up, but dropping the VTNET_MTX leads to serialization 2313 * difficulties. 2314 * 2315 * Furthermore, it appears QEMU/KVM only allocates three MSIX 2316 * vectors. Two of those vectors are needed for the Rx and Tx 2317 * virtqueues. We do not support sharing both a Vq and config 2318 * changed notification on the same MSIX vector. 2319 */ 2320 c = virtqueue_poll(vq, NULL); 2321 KASSERT(c == cookie, ("unexpected control command response")); 2322 } 2323 2324 static void 2325 vtnet_rx_filter(struct vtnet_softc *sc) 2326 { 2327 device_t dev; 2328 struct ifnet *ifp; 2329 2330 dev = sc->vtnet_dev; 2331 ifp = sc->vtnet_ifp; 2332 2333 VTNET_LOCK_ASSERT(sc); 2334 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2335 ("CTRL_RX feature not negotiated")); 2336 2337 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) 2338 device_printf(dev, "cannot %s promiscuous mode\n", 2339 ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); 2340 2341 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) 2342 device_printf(dev, "cannot %s all-multicast mode\n", 2343 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); 2344 } 2345 2346 static int 2347 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) 2348 { 2349 struct virtio_net_ctrl_hdr hdr; 2350 struct sglist_seg segs[3]; 2351 struct sglist sg; 2352 uint8_t onoff, ack; 2353 int error; 2354 2355 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) 2356 return (ENOTSUP); 2357 2358 error = 0; 2359 2360 hdr.class = VIRTIO_NET_CTRL_RX; 2361 hdr.cmd = cmd; 2362 onoff = !!on; 2363 ack = VIRTIO_NET_ERR; 2364 2365 sglist_init(&sg, 3, segs); 2366 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2367 error |= sglist_append(&sg, &onoff, sizeof(uint8_t)); 2368 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2369 KASSERT(error == 0 && sg.sg_nseg == 3, 2370 ("error adding Rx filter message to sglist")); 2371 2372 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2373 2374 return (ack == VIRTIO_NET_OK ? 0 : EIO); 2375 } 2376 2377 static int 2378 vtnet_set_promisc(struct vtnet_softc *sc, int on) 2379 { 2380 2381 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); 2382 } 2383 2384 static int 2385 vtnet_set_allmulti(struct vtnet_softc *sc, int on) 2386 { 2387 2388 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); 2389 } 2390 2391 static void 2392 vtnet_rx_filter_mac(struct vtnet_softc *sc) 2393 { 2394 struct virtio_net_ctrl_hdr hdr; 2395 struct vtnet_mac_filter *filter; 2396 struct sglist_seg segs[4]; 2397 struct sglist sg; 2398 struct ifnet *ifp; 2399 struct ifaddr *ifa; 2400 struct ifmultiaddr *ifma; 2401 int ucnt, mcnt, promisc, allmulti, error; 2402 uint8_t ack; 2403 2404 ifp = sc->vtnet_ifp; 2405 filter = sc->vtnet_mac_filter; 2406 ucnt = 0; 2407 mcnt = 0; 2408 promisc = 0; 2409 allmulti = 0; 2410 error = 0; 2411 2412 VTNET_LOCK_ASSERT(sc); 2413 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2414 ("CTRL_RX feature not negotiated")); 2415 2416 /* Unicast MAC addresses: */ 2417 if_addr_rlock(ifp); 2418 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 2419 if (ifa->ifa_addr->sa_family != AF_LINK) 2420 continue; 2421 else if (ucnt == VTNET_MAX_MAC_ENTRIES) 2422 break; 2423 2424 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 2425 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); 2426 ucnt++; 2427 } 2428 if_addr_runlock(ifp); 2429 2430 if (ucnt >= VTNET_MAX_MAC_ENTRIES) { 2431 promisc = 1; 2432 filter->vmf_unicast.nentries = 0; 2433 2434 if_printf(ifp, "more than %d MAC addresses assigned, " 2435 "falling back to promiscuous mode\n", 2436 VTNET_MAX_MAC_ENTRIES); 2437 } else 2438 filter->vmf_unicast.nentries = ucnt; 2439 2440 /* Multicast MAC addresses: */ 2441 if_maddr_rlock(ifp); 2442 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2443 if (ifma->ifma_addr->sa_family != AF_LINK) 2444 continue; 2445 else if (mcnt == VTNET_MAX_MAC_ENTRIES) 2446 break; 2447 2448 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2449 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); 2450 mcnt++; 2451 } 2452 if_maddr_runlock(ifp); 2453 2454 if (mcnt >= VTNET_MAX_MAC_ENTRIES) { 2455 allmulti = 1; 2456 filter->vmf_multicast.nentries = 0; 2457 2458 if_printf(ifp, "more than %d multicast MAC addresses " 2459 "assigned, falling back to all-multicast mode\n", 2460 VTNET_MAX_MAC_ENTRIES); 2461 } else 2462 filter->vmf_multicast.nentries = mcnt; 2463 2464 if (promisc && allmulti) 2465 goto out; 2466 2467 hdr.class = VIRTIO_NET_CTRL_MAC; 2468 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 2469 ack = VIRTIO_NET_ERR; 2470 2471 sglist_init(&sg, 4, segs); 2472 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2473 error |= sglist_append(&sg, &filter->vmf_unicast, 2474 sizeof(struct vtnet_mac_table)); 2475 error |= sglist_append(&sg, &filter->vmf_multicast, 2476 sizeof(struct vtnet_mac_table)); 2477 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2478 KASSERT(error == 0 && sg.sg_nseg == 4, 2479 ("error adding MAC filtering message to sglist")); 2480 2481 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2482 2483 if (ack != VIRTIO_NET_OK) 2484 if_printf(ifp, "error setting host MAC filter table\n"); 2485 2486 out: 2487 if (promisc) 2488 if (vtnet_set_promisc(sc, 1) != 0) 2489 if_printf(ifp, "cannot enable promiscuous mode\n"); 2490 if (allmulti) 2491 if (vtnet_set_allmulti(sc, 1) != 0) 2492 if_printf(ifp, "cannot enable all-multicast mode\n"); 2493 } 2494 2495 static int 2496 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2497 { 2498 struct virtio_net_ctrl_hdr hdr; 2499 struct sglist_seg segs[3]; 2500 struct sglist sg; 2501 uint8_t ack; 2502 int error; 2503 2504 hdr.class = VIRTIO_NET_CTRL_VLAN; 2505 hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 2506 ack = VIRTIO_NET_ERR; 2507 error = 0; 2508 2509 sglist_init(&sg, 3, segs); 2510 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2511 error |= sglist_append(&sg, &tag, sizeof(uint16_t)); 2512 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2513 KASSERT(error == 0 && sg.sg_nseg == 3, 2514 ("error adding VLAN control message to sglist")); 2515 2516 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2517 2518 return (ack == VIRTIO_NET_OK ? 0 : EIO); 2519 } 2520 2521 static void 2522 vtnet_rx_filter_vlan(struct vtnet_softc *sc) 2523 { 2524 device_t dev; 2525 uint32_t w, mask; 2526 uint16_t tag; 2527 int i, nvlans, error; 2528 2529 VTNET_LOCK_ASSERT(sc); 2530 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2531 ("VLAN_FILTER feature not negotiated")); 2532 2533 dev = sc->vtnet_dev; 2534 nvlans = sc->vtnet_nvlans; 2535 error = 0; 2536 2537 /* Enable filtering for each configured VLAN. */ 2538 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) { 2539 w = sc->vtnet_vlan_shadow[i]; 2540 for (mask = 1, tag = i * 32; w != 0; mask <<= 1, tag++) { 2541 if ((w & mask) != 0) { 2542 w &= ~mask; 2543 nvlans--; 2544 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) 2545 error++; 2546 } 2547 } 2548 } 2549 2550 KASSERT(nvlans == 0, ("VLAN count incorrect")); 2551 if (error) 2552 device_printf(dev, "cannot restore VLAN filter table\n"); 2553 } 2554 2555 static void 2556 vtnet_set_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2557 { 2558 struct ifnet *ifp; 2559 int idx, bit; 2560 2561 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2562 ("VLAN_FILTER feature not negotiated")); 2563 2564 if ((tag == 0) || (tag > 4095)) 2565 return; 2566 2567 ifp = sc->vtnet_ifp; 2568 idx = (tag >> 5) & 0x7F; 2569 bit = tag & 0x1F; 2570 2571 VTNET_LOCK(sc); 2572 2573 /* Update shadow VLAN table. */ 2574 if (add) { 2575 sc->vtnet_nvlans++; 2576 sc->vtnet_vlan_shadow[idx] |= (1 << bit); 2577 } else { 2578 sc->vtnet_nvlans--; 2579 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit); 2580 } 2581 2582 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2583 if (vtnet_exec_vlan_filter(sc, add, tag) != 0) { 2584 device_printf(sc->vtnet_dev, 2585 "cannot %s VLAN %d %s the host filter table\n", 2586 add ? "add" : "remove", tag, 2587 add ? "to" : "from"); 2588 } 2589 } 2590 2591 VTNET_UNLOCK(sc); 2592 } 2593 2594 static void 2595 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2596 { 2597 2598 if (ifp->if_softc != arg) 2599 return; 2600 2601 vtnet_set_vlan_filter(arg, 1, tag); 2602 } 2603 2604 static void 2605 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2606 { 2607 2608 if (ifp->if_softc != arg) 2609 return; 2610 2611 vtnet_set_vlan_filter(arg, 0, tag); 2612 } 2613 2614 static int 2615 vtnet_ifmedia_upd(struct ifnet *ifp) 2616 { 2617 struct vtnet_softc *sc; 2618 struct ifmedia *ifm; 2619 2620 sc = ifp->if_softc; 2621 ifm = &sc->vtnet_media; 2622 2623 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2624 return (EINVAL); 2625 2626 return (0); 2627 } 2628 2629 static void 2630 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2631 { 2632 struct vtnet_softc *sc; 2633 2634 sc = ifp->if_softc; 2635 2636 ifmr->ifm_status = IFM_AVALID; 2637 ifmr->ifm_active = IFM_ETHER; 2638 2639 VTNET_LOCK(sc); 2640 if (vtnet_is_link_up(sc) != 0) { 2641 ifmr->ifm_status |= IFM_ACTIVE; 2642 ifmr->ifm_active |= VTNET_MEDIATYPE; 2643 } else 2644 ifmr->ifm_active |= IFM_NONE; 2645 VTNET_UNLOCK(sc); 2646 } 2647 2648 static void 2649 vtnet_add_statistics(struct vtnet_softc *sc) 2650 { 2651 device_t dev; 2652 struct vtnet_statistics *stats; 2653 struct sysctl_ctx_list *ctx; 2654 struct sysctl_oid *tree; 2655 struct sysctl_oid_list *child; 2656 2657 dev = sc->vtnet_dev; 2658 stats = &sc->vtnet_stats; 2659 ctx = device_get_sysctl_ctx(dev); 2660 tree = device_get_sysctl_tree(dev); 2661 child = SYSCTL_CHILDREN(tree); 2662 2663 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_failed", 2664 CTLFLAG_RD, &stats->mbuf_alloc_failed, 2665 "Mbuf cluster allocation failures"); 2666 2667 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_frame_too_large", 2668 CTLFLAG_RD, &stats->rx_frame_too_large, 2669 "Received frame larger than the mbuf chain"); 2670 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_enq_replacement_failed", 2671 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 2672 "Enqueuing the replacement receive mbuf failed"); 2673 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_mergeable_failed", 2674 CTLFLAG_RD, &stats->rx_mergeable_failed, 2675 "Mergeable buffers receive failures"); 2676 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", 2677 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 2678 "Received checksum offloaded buffer with unsupported " 2679 "Ethernet type"); 2680 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_start", 2681 CTLFLAG_RD, &stats->rx_csum_bad_start, 2682 "Received checksum offloaded buffer with incorrect start offset"); 2683 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", 2684 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 2685 "Received checksum offloaded buffer with incorrect IP protocol"); 2686 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_offset", 2687 CTLFLAG_RD, &stats->rx_csum_bad_offset, 2688 "Received checksum offloaded buffer with incorrect offset"); 2689 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_failed", 2690 CTLFLAG_RD, &stats->rx_csum_failed, 2691 "Received buffer checksum offload failed"); 2692 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_offloaded", 2693 CTLFLAG_RD, &stats->rx_csum_offloaded, 2694 "Received buffer checksum offload succeeded"); 2695 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_task_rescheduled", 2696 CTLFLAG_RD, &stats->rx_task_rescheduled, 2697 "Times the receive interrupt task rescheduled itself"); 2698 2699 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_offloaded", 2700 CTLFLAG_RD, &stats->tx_csum_offloaded, 2701 "Offloaded checksum of transmitted buffer"); 2702 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_offloaded", 2703 CTLFLAG_RD, &stats->tx_tso_offloaded, 2704 "Segmentation offload of transmitted buffer"); 2705 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", 2706 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 2707 "Aborted transmit of checksum offloaded buffer with unknown " 2708 "Ethernet type"); 2709 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", 2710 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 2711 "Aborted transmit of TSO buffer with unknown Ethernet type"); 2712 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_task_rescheduled", 2713 CTLFLAG_RD, &stats->tx_task_rescheduled, 2714 "Times the transmit interrupt task rescheduled itself"); 2715 } 2716 2717 static int 2718 vtnet_enable_rx_intr(struct vtnet_softc *sc) 2719 { 2720 2721 return (virtqueue_enable_intr(sc->vtnet_rx_vq)); 2722 } 2723 2724 static void 2725 vtnet_disable_rx_intr(struct vtnet_softc *sc) 2726 { 2727 2728 virtqueue_disable_intr(sc->vtnet_rx_vq); 2729 } 2730 2731 static int 2732 vtnet_enable_tx_intr(struct vtnet_softc *sc) 2733 { 2734 2735 #ifdef VTNET_TX_INTR_MODERATION 2736 return (0); 2737 #else 2738 return (virtqueue_enable_intr(sc->vtnet_tx_vq)); 2739 #endif 2740 } 2741 2742 static void 2743 vtnet_disable_tx_intr(struct vtnet_softc *sc) 2744 { 2745 2746 virtqueue_disable_intr(sc->vtnet_tx_vq); 2747 } 2748