1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* Driver for VirtIO network devices. */ 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/param.h> 35 #include <sys/eventhandler.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/sockio.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/msan.h> 43 #include <sys/socket.h> 44 #include <sys/sysctl.h> 45 #include <sys/random.h> 46 #include <sys/sglist.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/taskqueue.h> 50 #include <sys/smp.h> 51 #include <machine/smp.h> 52 53 #include <vm/uma.h> 54 55 #include <net/debugnet.h> 56 #include <net/ethernet.h> 57 #include <net/pfil.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_arp.h> 61 #include <net/if_dl.h> 62 #include <net/if_types.h> 63 #include <net/if_media.h> 64 #include <net/if_vlan_var.h> 65 66 #include <net/bpf.h> 67 68 #include <netinet/in_systm.h> 69 #include <netinet/in.h> 70 #include <netinet/ip.h> 71 #include <netinet/ip6.h> 72 #include <netinet6/ip6_var.h> 73 #include <netinet/udp.h> 74 #include <netinet/tcp.h> 75 #include <netinet/tcp_lro.h> 76 77 #include <machine/bus.h> 78 #include <machine/resource.h> 79 #include <sys/bus.h> 80 #include <sys/rman.h> 81 82 #include <dev/virtio/virtio.h> 83 #include <dev/virtio/virtqueue.h> 84 #include <dev/virtio/network/virtio_net.h> 85 #include <dev/virtio/network/if_vtnetvar.h> 86 #include "virtio_if.h" 87 88 #if defined(INET) || defined(INET6) 89 #include <machine/in_cksum.h> 90 #endif 91 92 #ifdef __NO_STRICT_ALIGNMENT 93 #define VTNET_ETHER_ALIGN 0 94 #else /* Strict alignment */ 95 #define VTNET_ETHER_ALIGN ETHER_ALIGN 96 #endif 97 98 static int vtnet_modevent(module_t, int, void *); 99 100 static int vtnet_probe(device_t); 101 static int vtnet_attach(device_t); 102 static int vtnet_detach(device_t); 103 static int vtnet_suspend(device_t); 104 static int vtnet_resume(device_t); 105 static int vtnet_shutdown(device_t); 106 static int vtnet_attach_completed(device_t); 107 static int vtnet_config_change(device_t); 108 109 static int vtnet_negotiate_features(struct vtnet_softc *); 110 static int vtnet_setup_features(struct vtnet_softc *); 111 static int vtnet_init_rxq(struct vtnet_softc *, int); 112 static int vtnet_init_txq(struct vtnet_softc *, int); 113 static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); 114 static void vtnet_free_rxtx_queues(struct vtnet_softc *); 115 static int vtnet_alloc_rx_filters(struct vtnet_softc *); 116 static void vtnet_free_rx_filters(struct vtnet_softc *); 117 static int vtnet_alloc_virtqueues(struct vtnet_softc *); 118 static void vtnet_alloc_interface(struct vtnet_softc *); 119 static int vtnet_setup_interface(struct vtnet_softc *); 120 static int vtnet_ioctl_mtu(struct vtnet_softc *, u_int); 121 static int vtnet_ioctl_ifflags(struct vtnet_softc *); 122 static int vtnet_ioctl_multi(struct vtnet_softc *); 123 static int vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *); 124 static int vtnet_ioctl(if_t, u_long, caddr_t); 125 static uint64_t vtnet_get_counter(if_t, ift_counter); 126 127 static int vtnet_rxq_populate(struct vtnet_rxq *); 128 static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); 129 static struct mbuf * 130 vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); 131 static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *, 132 struct mbuf *, int); 133 static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); 134 static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); 135 static int vtnet_rxq_new_buf(struct vtnet_rxq *); 136 #if defined(INET) || defined(INET6) 137 static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *, 138 bool, int, struct virtio_net_hdr *); 139 static void vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *, 140 int); 141 static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, 142 struct virtio_net_hdr *); 143 #endif 144 static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); 145 static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *); 146 static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int); 147 static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *, 148 struct virtio_net_hdr *); 149 static int vtnet_rxq_eof(struct vtnet_rxq *); 150 static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries); 151 static void vtnet_rx_vq_intr(void *); 152 static void vtnet_rxq_tq_intr(void *, int); 153 154 static int vtnet_txq_intr_threshold(struct vtnet_txq *); 155 static int vtnet_txq_below_threshold(struct vtnet_txq *); 156 static int vtnet_txq_notify(struct vtnet_txq *); 157 static void vtnet_txq_free_mbufs(struct vtnet_txq *); 158 static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *, 159 int *, int *, int *); 160 static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int, 161 int, struct virtio_net_hdr *); 162 static struct mbuf * 163 vtnet_txq_offload(struct vtnet_txq *, struct mbuf *, 164 struct virtio_net_hdr *); 165 static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **, 166 struct vtnet_tx_header *); 167 static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int); 168 169 /* Required for ALTQ */ 170 static void vtnet_start_locked(struct vtnet_txq *, if_t); 171 static void vtnet_start(if_t); 172 173 /* Required for MQ */ 174 static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *); 175 static int vtnet_txq_mq_start(if_t, struct mbuf *); 176 static void vtnet_txq_tq_deferred(void *, int); 177 static void vtnet_qflush(if_t); 178 179 180 static void vtnet_txq_start(struct vtnet_txq *); 181 static void vtnet_txq_tq_intr(void *, int); 182 static int vtnet_txq_eof(struct vtnet_txq *); 183 static void vtnet_tx_vq_intr(void *); 184 static void vtnet_tx_start_all(struct vtnet_softc *); 185 186 static int vtnet_watchdog(struct vtnet_txq *); 187 static void vtnet_accum_stats(struct vtnet_softc *, 188 struct vtnet_rxq_stats *, struct vtnet_txq_stats *); 189 static void vtnet_tick(void *); 190 191 static void vtnet_start_taskqueues(struct vtnet_softc *); 192 static void vtnet_free_taskqueues(struct vtnet_softc *); 193 static void vtnet_drain_taskqueues(struct vtnet_softc *); 194 195 static void vtnet_drain_rxtx_queues(struct vtnet_softc *); 196 static void vtnet_stop_rendezvous(struct vtnet_softc *); 197 static void vtnet_stop(struct vtnet_softc *); 198 static int vtnet_virtio_reinit(struct vtnet_softc *); 199 static void vtnet_init_rx_filters(struct vtnet_softc *); 200 static int vtnet_init_rx_queues(struct vtnet_softc *); 201 static int vtnet_init_tx_queues(struct vtnet_softc *); 202 static int vtnet_init_rxtx_queues(struct vtnet_softc *); 203 static void vtnet_set_active_vq_pairs(struct vtnet_softc *); 204 static void vtnet_update_rx_offloads(struct vtnet_softc *); 205 static int vtnet_reinit(struct vtnet_softc *); 206 static void vtnet_init_locked(struct vtnet_softc *, int); 207 static void vtnet_init(void *); 208 209 static void vtnet_free_ctrl_vq(struct vtnet_softc *); 210 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, 211 struct sglist *, int, int); 212 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); 213 static int vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t); 214 static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); 215 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, bool); 216 static int vtnet_set_promisc(struct vtnet_softc *, bool); 217 static int vtnet_set_allmulti(struct vtnet_softc *, bool); 218 static void vtnet_rx_filter(struct vtnet_softc *); 219 static void vtnet_rx_filter_mac(struct vtnet_softc *); 220 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); 221 static void vtnet_rx_filter_vlan(struct vtnet_softc *); 222 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t); 223 static void vtnet_register_vlan(void *, if_t, uint16_t); 224 static void vtnet_unregister_vlan(void *, if_t, uint16_t); 225 226 static void vtnet_update_speed_duplex(struct vtnet_softc *); 227 static int vtnet_is_link_up(struct vtnet_softc *); 228 static void vtnet_update_link_status(struct vtnet_softc *); 229 static int vtnet_ifmedia_upd(if_t); 230 static void vtnet_ifmedia_sts(if_t, struct ifmediareq *); 231 static void vtnet_get_macaddr(struct vtnet_softc *); 232 static void vtnet_set_macaddr(struct vtnet_softc *); 233 static void vtnet_attached_set_macaddr(struct vtnet_softc *); 234 static void vtnet_vlan_tag_remove(struct mbuf *); 235 static void vtnet_set_rx_process_limit(struct vtnet_softc *); 236 237 static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, 238 struct sysctl_oid_list *, struct vtnet_rxq *); 239 static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, 240 struct sysctl_oid_list *, struct vtnet_txq *); 241 static void vtnet_setup_queue_sysctl(struct vtnet_softc *); 242 static void vtnet_load_tunables(struct vtnet_softc *); 243 static void vtnet_setup_sysctl(struct vtnet_softc *); 244 245 static int vtnet_rxq_enable_intr(struct vtnet_rxq *); 246 static void vtnet_rxq_disable_intr(struct vtnet_rxq *); 247 static int vtnet_txq_enable_intr(struct vtnet_txq *); 248 static void vtnet_txq_disable_intr(struct vtnet_txq *); 249 static void vtnet_enable_rx_interrupts(struct vtnet_softc *); 250 static void vtnet_enable_tx_interrupts(struct vtnet_softc *); 251 static void vtnet_enable_interrupts(struct vtnet_softc *); 252 static void vtnet_disable_rx_interrupts(struct vtnet_softc *); 253 static void vtnet_disable_tx_interrupts(struct vtnet_softc *); 254 static void vtnet_disable_interrupts(struct vtnet_softc *); 255 256 static int vtnet_tunable_int(struct vtnet_softc *, const char *, int); 257 258 DEBUGNET_DEFINE(vtnet); 259 260 #define vtnet_htog16(_sc, _val) virtio_htog16(vtnet_modern(_sc), _val) 261 #define vtnet_htog32(_sc, _val) virtio_htog32(vtnet_modern(_sc), _val) 262 #define vtnet_htog64(_sc, _val) virtio_htog64(vtnet_modern(_sc), _val) 263 #define vtnet_gtoh16(_sc, _val) virtio_gtoh16(vtnet_modern(_sc), _val) 264 #define vtnet_gtoh32(_sc, _val) virtio_gtoh32(vtnet_modern(_sc), _val) 265 #define vtnet_gtoh64(_sc, _val) virtio_gtoh64(vtnet_modern(_sc), _val) 266 267 /* Tunables. */ 268 static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 269 "VirtIO Net driver parameters"); 270 271 static int vtnet_csum_disable = 0; 272 SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN, 273 &vtnet_csum_disable, 0, "Disables receive and send checksum offload"); 274 275 static int vtnet_fixup_needs_csum = 0; 276 SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN, 277 &vtnet_fixup_needs_csum, 0, 278 "Calculate valid checksum for NEEDS_CSUM packets"); 279 280 static int vtnet_tso_disable = 0; 281 SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, 282 &vtnet_tso_disable, 0, "Disables TSO"); 283 284 static int vtnet_lro_disable = 0; 285 SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, 286 &vtnet_lro_disable, 0, "Disables hardware LRO"); 287 288 static int vtnet_mq_disable = 0; 289 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, 290 &vtnet_mq_disable, 0, "Disables multiqueue support"); 291 292 static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS; 293 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN, 294 &vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs"); 295 296 static int vtnet_tso_maxlen = IP_MAXPACKET; 297 SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN, 298 &vtnet_tso_maxlen, 0, "TSO burst limit"); 299 300 static int vtnet_rx_process_limit = 1024; 301 SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 302 &vtnet_rx_process_limit, 0, 303 "Number of RX segments processed in one pass"); 304 305 static int vtnet_lro_entry_count = 128; 306 SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN, 307 &vtnet_lro_entry_count, 0, "Software LRO entry count"); 308 309 /* Enable sorted LRO, and the depth of the mbuf queue. */ 310 static int vtnet_lro_mbufq_depth = 0; 311 SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN, 312 &vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue"); 313 314 /* Deactivate ALTQ Support */ 315 static int vtnet_altq_disable = 0; 316 SYSCTL_INT(_hw_vtnet, OID_AUTO, altq_disable, CTLFLAG_RDTUN, 317 &vtnet_altq_disable, 0, "Disables ALTQ Support"); 318 319 /* 320 * For the driver to be considered as having altq enabled, 321 * it must be compiled with an ALTQ capable kernel, 322 * and the tunable hw.vtnet.altq_disable must be zero 323 */ 324 #define VTNET_ALTQ_ENABLED (VTNET_ALTQ_CAPABLE && (!vtnet_altq_disable)) 325 326 327 static uma_zone_t vtnet_tx_header_zone; 328 329 static struct virtio_feature_desc vtnet_feature_desc[] = { 330 { VIRTIO_NET_F_CSUM, "TxChecksum" }, 331 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, 332 { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "CtrlRxOffloads" }, 333 { VIRTIO_NET_F_MAC, "MAC" }, 334 { VIRTIO_NET_F_GSO, "TxGSO" }, 335 { VIRTIO_NET_F_GUEST_TSO4, "RxLROv4" }, 336 { VIRTIO_NET_F_GUEST_TSO6, "RxLROv6" }, 337 { VIRTIO_NET_F_GUEST_ECN, "RxLROECN" }, 338 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, 339 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, 340 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, 341 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, 342 { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, 343 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, 344 { VIRTIO_NET_F_STATUS, "Status" }, 345 { VIRTIO_NET_F_CTRL_VQ, "CtrlVq" }, 346 { VIRTIO_NET_F_CTRL_RX, "CtrlRxMode" }, 347 { VIRTIO_NET_F_CTRL_VLAN, "CtrlVLANFilter" }, 348 { VIRTIO_NET_F_CTRL_RX_EXTRA, "CtrlRxModeExtra" }, 349 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, 350 { VIRTIO_NET_F_MQ, "Multiqueue" }, 351 { VIRTIO_NET_F_CTRL_MAC_ADDR, "CtrlMacAddr" }, 352 { VIRTIO_NET_F_SPEED_DUPLEX, "SpeedDuplex" }, 353 354 { 0, NULL } 355 }; 356 357 static device_method_t vtnet_methods[] = { 358 /* Device methods. */ 359 DEVMETHOD(device_probe, vtnet_probe), 360 DEVMETHOD(device_attach, vtnet_attach), 361 DEVMETHOD(device_detach, vtnet_detach), 362 DEVMETHOD(device_suspend, vtnet_suspend), 363 DEVMETHOD(device_resume, vtnet_resume), 364 DEVMETHOD(device_shutdown, vtnet_shutdown), 365 366 /* VirtIO methods. */ 367 DEVMETHOD(virtio_attach_completed, vtnet_attach_completed), 368 DEVMETHOD(virtio_config_change, vtnet_config_change), 369 370 DEVMETHOD_END 371 }; 372 373 #ifdef DEV_NETMAP 374 #include <dev/netmap/if_vtnet_netmap.h> 375 #endif 376 377 static driver_t vtnet_driver = { 378 .name = "vtnet", 379 .methods = vtnet_methods, 380 .size = sizeof(struct vtnet_softc) 381 }; 382 VIRTIO_DRIVER_MODULE(vtnet, vtnet_driver, vtnet_modevent, NULL); 383 MODULE_VERSION(vtnet, 1); 384 MODULE_DEPEND(vtnet, virtio, 1, 1, 1); 385 #ifdef DEV_NETMAP 386 MODULE_DEPEND(vtnet, netmap, 1, 1, 1); 387 #endif 388 389 VIRTIO_SIMPLE_PNPINFO(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter"); 390 391 static int 392 vtnet_modevent(module_t mod __unused, int type, void *unused __unused) 393 { 394 int error = 0; 395 static int loaded = 0; 396 397 switch (type) { 398 case MOD_LOAD: 399 if (loaded++ == 0) { 400 vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr", 401 sizeof(struct vtnet_tx_header), 402 NULL, NULL, NULL, NULL, 0, 0); 403 #ifdef DEBUGNET 404 /* 405 * We need to allocate from this zone in the transmit path, so ensure 406 * that we have at least one item per header available. 407 * XXX add a separate zone like we do for mbufs? otherwise we may alloc 408 * buckets 409 */ 410 uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2); 411 uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2); 412 #endif 413 } 414 break; 415 case MOD_QUIESCE: 416 if (uma_zone_get_cur(vtnet_tx_header_zone) > 0) 417 error = EBUSY; 418 break; 419 case MOD_UNLOAD: 420 if (--loaded == 0) { 421 uma_zdestroy(vtnet_tx_header_zone); 422 vtnet_tx_header_zone = NULL; 423 } 424 break; 425 case MOD_SHUTDOWN: 426 break; 427 default: 428 error = EOPNOTSUPP; 429 break; 430 } 431 432 return (error); 433 } 434 435 static int 436 vtnet_probe(device_t dev) 437 { 438 return (VIRTIO_SIMPLE_PROBE(dev, vtnet)); 439 } 440 441 static int 442 vtnet_attach(device_t dev) 443 { 444 struct vtnet_softc *sc; 445 int error; 446 447 sc = device_get_softc(dev); 448 sc->vtnet_dev = dev; 449 virtio_set_feature_desc(dev, vtnet_feature_desc); 450 451 VTNET_CORE_LOCK_INIT(sc); 452 callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); 453 vtnet_load_tunables(sc); 454 455 vtnet_alloc_interface(sc); 456 vtnet_setup_sysctl(sc); 457 458 error = vtnet_setup_features(sc); 459 if (error) { 460 device_printf(dev, "cannot setup features\n"); 461 goto fail; 462 } 463 464 error = vtnet_alloc_rx_filters(sc); 465 if (error) { 466 device_printf(dev, "cannot allocate Rx filters\n"); 467 goto fail; 468 } 469 470 error = vtnet_alloc_rxtx_queues(sc); 471 if (error) { 472 device_printf(dev, "cannot allocate queues\n"); 473 goto fail; 474 } 475 476 error = vtnet_alloc_virtqueues(sc); 477 if (error) { 478 device_printf(dev, "cannot allocate virtqueues\n"); 479 goto fail; 480 } 481 482 error = vtnet_setup_interface(sc); 483 if (error) { 484 device_printf(dev, "cannot setup interface\n"); 485 goto fail; 486 } 487 488 error = virtio_setup_intr(dev, INTR_TYPE_NET); 489 if (error) { 490 device_printf(dev, "cannot setup interrupts\n"); 491 ether_ifdetach(sc->vtnet_ifp); 492 goto fail; 493 } 494 495 #ifdef DEV_NETMAP 496 vtnet_netmap_attach(sc); 497 #endif 498 vtnet_start_taskqueues(sc); 499 500 fail: 501 if (error) 502 vtnet_detach(dev); 503 504 return (error); 505 } 506 507 static int 508 vtnet_detach(device_t dev) 509 { 510 struct vtnet_softc *sc; 511 if_t ifp; 512 513 sc = device_get_softc(dev); 514 ifp = sc->vtnet_ifp; 515 516 if (device_is_attached(dev)) { 517 VTNET_CORE_LOCK(sc); 518 vtnet_stop(sc); 519 VTNET_CORE_UNLOCK(sc); 520 521 callout_drain(&sc->vtnet_tick_ch); 522 vtnet_drain_taskqueues(sc); 523 524 ether_ifdetach(ifp); 525 } 526 527 #ifdef DEV_NETMAP 528 netmap_detach(ifp); 529 #endif 530 531 if (sc->vtnet_pfil != NULL) { 532 pfil_head_unregister(sc->vtnet_pfil); 533 sc->vtnet_pfil = NULL; 534 } 535 536 vtnet_free_taskqueues(sc); 537 538 if (sc->vtnet_vlan_attach != NULL) { 539 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); 540 sc->vtnet_vlan_attach = NULL; 541 } 542 if (sc->vtnet_vlan_detach != NULL) { 543 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach); 544 sc->vtnet_vlan_detach = NULL; 545 } 546 547 ifmedia_removeall(&sc->vtnet_media); 548 549 if (ifp != NULL) { 550 if_free(ifp); 551 sc->vtnet_ifp = NULL; 552 } 553 554 vtnet_free_rxtx_queues(sc); 555 vtnet_free_rx_filters(sc); 556 557 if (sc->vtnet_ctrl_vq != NULL) 558 vtnet_free_ctrl_vq(sc); 559 560 VTNET_CORE_LOCK_DESTROY(sc); 561 562 return (0); 563 } 564 565 static int 566 vtnet_suspend(device_t dev) 567 { 568 struct vtnet_softc *sc; 569 570 sc = device_get_softc(dev); 571 572 VTNET_CORE_LOCK(sc); 573 vtnet_stop(sc); 574 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; 575 VTNET_CORE_UNLOCK(sc); 576 577 return (0); 578 } 579 580 static int 581 vtnet_resume(device_t dev) 582 { 583 struct vtnet_softc *sc; 584 if_t ifp; 585 586 sc = device_get_softc(dev); 587 ifp = sc->vtnet_ifp; 588 589 VTNET_CORE_LOCK(sc); 590 if (if_getflags(ifp) & IFF_UP) 591 vtnet_init_locked(sc, 0); 592 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; 593 VTNET_CORE_UNLOCK(sc); 594 595 return (0); 596 } 597 598 static int 599 vtnet_shutdown(device_t dev) 600 { 601 /* 602 * Suspend already does all of what we need to 603 * do here; we just never expect to be resumed. 604 */ 605 return (vtnet_suspend(dev)); 606 } 607 608 static int 609 vtnet_attach_completed(device_t dev) 610 { 611 struct vtnet_softc *sc; 612 613 sc = device_get_softc(dev); 614 615 VTNET_CORE_LOCK(sc); 616 vtnet_attached_set_macaddr(sc); 617 VTNET_CORE_UNLOCK(sc); 618 619 return (0); 620 } 621 622 static int 623 vtnet_config_change(device_t dev) 624 { 625 struct vtnet_softc *sc; 626 627 sc = device_get_softc(dev); 628 629 VTNET_CORE_LOCK(sc); 630 vtnet_update_link_status(sc); 631 if (sc->vtnet_link_active != 0) 632 vtnet_tx_start_all(sc); 633 VTNET_CORE_UNLOCK(sc); 634 635 return (0); 636 } 637 638 static int 639 vtnet_negotiate_features(struct vtnet_softc *sc) 640 { 641 device_t dev; 642 uint64_t features, negotiated_features; 643 int no_csum; 644 645 dev = sc->vtnet_dev; 646 features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES : 647 VTNET_LEGACY_FEATURES; 648 649 /* 650 * TSO and LRO are only available when their corresponding checksum 651 * offload feature is also negotiated. 652 */ 653 no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable); 654 if (no_csum) 655 features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM); 656 if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) 657 features &= ~VTNET_TSO_FEATURES; 658 if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) 659 features &= ~VTNET_LRO_FEATURES; 660 661 /* Deactivate MQ Feature flag, if driver has ALTQ enabled, or MQ is explicitly disabled */ 662 if (VTNET_ALTQ_ENABLED || vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) 663 features &= ~VIRTIO_NET_F_MQ; 664 665 negotiated_features = virtio_negotiate_features(dev, features); 666 667 if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) { 668 uint16_t mtu; 669 670 mtu = virtio_read_dev_config_2(dev, 671 offsetof(struct virtio_net_config, mtu)); 672 if (mtu < VTNET_MIN_MTU) { 673 device_printf(dev, "Invalid MTU value: %d. " 674 "MTU feature disabled.\n", mtu); 675 features &= ~VIRTIO_NET_F_MTU; 676 negotiated_features = 677 virtio_negotiate_features(dev, features); 678 } 679 } 680 681 if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) { 682 uint16_t npairs; 683 684 npairs = virtio_read_dev_config_2(dev, 685 offsetof(struct virtio_net_config, max_virtqueue_pairs)); 686 if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || 687 npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) { 688 device_printf(dev, "Invalid max_virtqueue_pairs value: " 689 "%d. Multiqueue feature disabled.\n", npairs); 690 features &= ~VIRTIO_NET_F_MQ; 691 negotiated_features = 692 virtio_negotiate_features(dev, features); 693 } 694 } 695 696 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && 697 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { 698 /* 699 * LRO without mergeable buffers requires special care. This 700 * is not ideal because every receive buffer must be large 701 * enough to hold the maximum TCP packet, the Ethernet header, 702 * and the header. This requires up to 34 descriptors with 703 * MCLBYTES clusters. If we do not have indirect descriptors, 704 * LRO is disabled since the virtqueue will not contain very 705 * many receive buffers. 706 */ 707 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { 708 device_printf(dev, 709 "Host LRO disabled since both mergeable buffers " 710 "and indirect descriptors were not negotiated\n"); 711 features &= ~VTNET_LRO_FEATURES; 712 negotiated_features = 713 virtio_negotiate_features(dev, features); 714 } else 715 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; 716 } 717 718 sc->vtnet_features = negotiated_features; 719 sc->vtnet_negotiated_features = negotiated_features; 720 721 return (virtio_finalize_features(dev)); 722 } 723 724 static int 725 vtnet_setup_features(struct vtnet_softc *sc) 726 { 727 device_t dev; 728 int error; 729 730 dev = sc->vtnet_dev; 731 732 error = vtnet_negotiate_features(sc); 733 if (error) 734 return (error); 735 736 if (virtio_with_feature(dev, VIRTIO_F_VERSION_1)) 737 sc->vtnet_flags |= VTNET_FLAG_MODERN; 738 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 739 sc->vtnet_flags |= VTNET_FLAG_INDIRECT; 740 if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) 741 sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX; 742 743 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { 744 /* This feature should always be negotiated. */ 745 sc->vtnet_flags |= VTNET_FLAG_MAC; 746 } 747 748 if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) { 749 sc->vtnet_max_mtu = virtio_read_dev_config_2(dev, 750 offsetof(struct virtio_net_config, mtu)); 751 } else 752 sc->vtnet_max_mtu = VTNET_MAX_MTU; 753 754 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { 755 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; 756 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 757 } else if (vtnet_modern(sc)) { 758 /* This is identical to the mergeable header. */ 759 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1); 760 } else 761 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 762 763 if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) 764 sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE; 765 else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) 766 sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG; 767 else 768 sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE; 769 770 /* 771 * Favor "hardware" LRO if negotiated, but support software LRO as 772 * a fallback; there is usually little benefit (or worse) with both. 773 */ 774 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 && 775 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0) 776 sc->vtnet_flags |= VTNET_FLAG_SW_LRO; 777 778 if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || 779 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || 780 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 781 sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX; 782 else 783 sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN; 784 785 sc->vtnet_req_vq_pairs = 1; 786 sc->vtnet_max_vq_pairs = 1; 787 788 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { 789 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; 790 791 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 792 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; 793 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) 794 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; 795 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) 796 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; 797 798 if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) { 799 sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev, 800 offsetof(struct virtio_net_config, 801 max_virtqueue_pairs)); 802 } 803 } 804 805 if (sc->vtnet_max_vq_pairs > 1) { 806 int req; 807 808 /* 809 * Limit the maximum number of requested queue pairs to the 810 * number of CPUs and the configured maximum. 811 */ 812 req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); 813 if (req < 0) 814 req = 1; 815 if (req == 0) 816 req = mp_ncpus; 817 if (req > sc->vtnet_max_vq_pairs) 818 req = sc->vtnet_max_vq_pairs; 819 if (req > mp_ncpus) 820 req = mp_ncpus; 821 if (req > 1) { 822 sc->vtnet_req_vq_pairs = req; 823 sc->vtnet_flags |= VTNET_FLAG_MQ; 824 } 825 } 826 827 return (0); 828 } 829 830 static int 831 vtnet_init_rxq(struct vtnet_softc *sc, int id) 832 { 833 struct vtnet_rxq *rxq; 834 835 rxq = &sc->vtnet_rxqs[id]; 836 837 snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d", 838 device_get_nameunit(sc->vtnet_dev), id); 839 mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF); 840 841 rxq->vtnrx_sc = sc; 842 rxq->vtnrx_id = id; 843 844 rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT); 845 if (rxq->vtnrx_sg == NULL) 846 return (ENOMEM); 847 848 #if defined(INET) || defined(INET6) 849 if (vtnet_software_lro(sc)) { 850 if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp, 851 sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0) 852 return (ENOMEM); 853 } 854 #endif 855 856 NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); 857 rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, 858 taskqueue_thread_enqueue, &rxq->vtnrx_tq); 859 860 return (rxq->vtnrx_tq == NULL ? ENOMEM : 0); 861 } 862 863 static int 864 vtnet_init_txq(struct vtnet_softc *sc, int id) 865 { 866 struct vtnet_txq *txq; 867 868 txq = &sc->vtnet_txqs[id]; 869 870 snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d", 871 device_get_nameunit(sc->vtnet_dev), id); 872 mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF); 873 874 txq->vtntx_sc = sc; 875 txq->vtntx_id = id; 876 877 txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT); 878 if (txq->vtntx_sg == NULL) 879 return (ENOMEM); 880 881 if (!VTNET_ALTQ_ENABLED) { 882 txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF, 883 M_NOWAIT, &txq->vtntx_mtx); 884 if (txq->vtntx_br == NULL) 885 return (ENOMEM); 886 887 TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq); 888 } 889 TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq); 890 txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT, 891 taskqueue_thread_enqueue, &txq->vtntx_tq); 892 if (txq->vtntx_tq == NULL) 893 return (ENOMEM); 894 895 return (0); 896 } 897 898 static int 899 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc) 900 { 901 int i, npairs, error; 902 903 npairs = sc->vtnet_max_vq_pairs; 904 905 sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF, 906 M_NOWAIT | M_ZERO); 907 sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF, 908 M_NOWAIT | M_ZERO); 909 if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL) 910 return (ENOMEM); 911 912 for (i = 0; i < npairs; i++) { 913 error = vtnet_init_rxq(sc, i); 914 if (error) 915 return (error); 916 error = vtnet_init_txq(sc, i); 917 if (error) 918 return (error); 919 } 920 921 vtnet_set_rx_process_limit(sc); 922 vtnet_setup_queue_sysctl(sc); 923 924 return (0); 925 } 926 927 static void 928 vtnet_destroy_rxq(struct vtnet_rxq *rxq) 929 { 930 931 rxq->vtnrx_sc = NULL; 932 rxq->vtnrx_id = -1; 933 934 #if defined(INET) || defined(INET6) 935 tcp_lro_free(&rxq->vtnrx_lro); 936 #endif 937 938 if (rxq->vtnrx_sg != NULL) { 939 sglist_free(rxq->vtnrx_sg); 940 rxq->vtnrx_sg = NULL; 941 } 942 943 if (mtx_initialized(&rxq->vtnrx_mtx) != 0) 944 mtx_destroy(&rxq->vtnrx_mtx); 945 } 946 947 static void 948 vtnet_destroy_txq(struct vtnet_txq *txq) 949 { 950 951 txq->vtntx_sc = NULL; 952 txq->vtntx_id = -1; 953 954 if (txq->vtntx_sg != NULL) { 955 sglist_free(txq->vtntx_sg); 956 txq->vtntx_sg = NULL; 957 } 958 959 if (!VTNET_ALTQ_ENABLED) { 960 if (txq->vtntx_br != NULL) { 961 buf_ring_free(txq->vtntx_br, M_DEVBUF); 962 txq->vtntx_br = NULL; 963 } 964 } 965 966 if (mtx_initialized(&txq->vtntx_mtx) != 0) 967 mtx_destroy(&txq->vtntx_mtx); 968 } 969 970 static void 971 vtnet_free_rxtx_queues(struct vtnet_softc *sc) 972 { 973 int i; 974 975 if (sc->vtnet_rxqs != NULL) { 976 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) 977 vtnet_destroy_rxq(&sc->vtnet_rxqs[i]); 978 free(sc->vtnet_rxqs, M_DEVBUF); 979 sc->vtnet_rxqs = NULL; 980 } 981 982 if (sc->vtnet_txqs != NULL) { 983 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) 984 vtnet_destroy_txq(&sc->vtnet_txqs[i]); 985 free(sc->vtnet_txqs, M_DEVBUF); 986 sc->vtnet_txqs = NULL; 987 } 988 } 989 990 static int 991 vtnet_alloc_rx_filters(struct vtnet_softc *sc) 992 { 993 994 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 995 sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter), 996 M_DEVBUF, M_NOWAIT | M_ZERO); 997 if (sc->vtnet_mac_filter == NULL) 998 return (ENOMEM); 999 } 1000 1001 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 1002 sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) * 1003 VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO); 1004 if (sc->vtnet_vlan_filter == NULL) 1005 return (ENOMEM); 1006 } 1007 1008 return (0); 1009 } 1010 1011 static void 1012 vtnet_free_rx_filters(struct vtnet_softc *sc) 1013 { 1014 1015 if (sc->vtnet_mac_filter != NULL) { 1016 free(sc->vtnet_mac_filter, M_DEVBUF); 1017 sc->vtnet_mac_filter = NULL; 1018 } 1019 1020 if (sc->vtnet_vlan_filter != NULL) { 1021 free(sc->vtnet_vlan_filter, M_DEVBUF); 1022 sc->vtnet_vlan_filter = NULL; 1023 } 1024 } 1025 1026 static int 1027 vtnet_alloc_virtqueues(struct vtnet_softc *sc) 1028 { 1029 device_t dev; 1030 struct vq_alloc_info *info; 1031 struct vtnet_rxq *rxq; 1032 struct vtnet_txq *txq; 1033 int i, idx, nvqs, error; 1034 1035 dev = sc->vtnet_dev; 1036 1037 nvqs = sc->vtnet_max_vq_pairs * 2; 1038 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) 1039 nvqs++; 1040 1041 info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); 1042 if (info == NULL) 1043 return (ENOMEM); 1044 1045 for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) { 1046 rxq = &sc->vtnet_rxqs[i]; 1047 VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, 1048 vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, 1049 "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id); 1050 1051 txq = &sc->vtnet_txqs[i]; 1052 VQ_ALLOC_INFO_INIT(&info[idx + 1], sc->vtnet_tx_nsegs, 1053 vtnet_tx_vq_intr, txq, &txq->vtntx_vq, 1054 "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id); 1055 } 1056 1057 /* These queues will not be used so allocate the minimum resources. */ 1058 for (; i < sc->vtnet_max_vq_pairs; i++, idx += 2) { 1059 rxq = &sc->vtnet_rxqs[i]; 1060 VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq, 1061 "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id); 1062 1063 txq = &sc->vtnet_txqs[i]; 1064 VQ_ALLOC_INFO_INIT(&info[idx + 1], 0, NULL, txq, &txq->vtntx_vq, 1065 "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id); 1066 } 1067 1068 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 1069 VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, 1070 &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); 1071 } 1072 1073 error = virtio_alloc_virtqueues(dev, nvqs, info); 1074 free(info, M_TEMP); 1075 1076 return (error); 1077 } 1078 1079 static void 1080 vtnet_alloc_interface(struct vtnet_softc *sc) 1081 { 1082 device_t dev; 1083 if_t ifp; 1084 1085 dev = sc->vtnet_dev; 1086 1087 ifp = if_alloc(IFT_ETHER); 1088 sc->vtnet_ifp = ifp; 1089 if_setsoftc(ifp, sc); 1090 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1091 } 1092 1093 static int 1094 vtnet_setup_interface(struct vtnet_softc *sc) 1095 { 1096 device_t dev; 1097 struct pfil_head_args pa; 1098 if_t ifp; 1099 1100 dev = sc->vtnet_dev; 1101 ifp = sc->vtnet_ifp; 1102 1103 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1104 if_setbaudrate(ifp, IF_Gbps(10)); 1105 if_setinitfn(ifp, vtnet_init); 1106 if_setioctlfn(ifp, vtnet_ioctl); 1107 if_setgetcounterfn(ifp, vtnet_get_counter); 1108 1109 if (!VTNET_ALTQ_ENABLED) { 1110 if_settransmitfn(ifp, vtnet_txq_mq_start); 1111 if_setqflushfn(ifp, vtnet_qflush); 1112 } else { 1113 struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq; 1114 if_setstartfn(ifp, vtnet_start); 1115 if_setsendqlen(ifp, virtqueue_size(vq) - 1); 1116 if_setsendqready(ifp); 1117 } 1118 1119 vtnet_get_macaddr(sc); 1120 1121 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) 1122 if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0); 1123 1124 ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts); 1125 ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1126 ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO); 1127 1128 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { 1129 int gso; 1130 1131 if_setcapabilitiesbit(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6, 0); 1132 1133 gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO); 1134 if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) 1135 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); 1136 if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 1137 if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0); 1138 if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) 1139 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 1140 1141 if (if_getcapabilities(ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { 1142 int tso_maxlen; 1143 1144 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0); 1145 1146 tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen", 1147 vtnet_tso_maxlen); 1148 if_sethwtsomax(ifp, tso_maxlen - 1149 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 1150 if_sethwtsomaxsegcount(ifp, sc->vtnet_tx_nsegs - 1); 1151 if_sethwtsomaxsegsize(ifp, PAGE_SIZE); 1152 } 1153 } 1154 1155 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { 1156 /* BMV: Rx checksums not distinguished between IPv4 and IPv6. */ 1157 if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0); 1158 if_setcapabilitiesbit(ifp, IFCAP_RXCSUM_IPV6, 0); 1159 1160 if (vtnet_tunable_int(sc, "fixup_needs_csum", 1161 vtnet_fixup_needs_csum) != 0) 1162 sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM; 1163 1164 /* Support either "hardware" or software LRO. */ 1165 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); 1166 } 1167 1168 if (if_getcapabilities(ifp) & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) { 1169 /* 1170 * VirtIO does not support VLAN tagging, but we can fake 1171 * it by inserting and removing the 802.1Q header during 1172 * transmit and receive. We are then able to do checksum 1173 * offloading of VLAN frames. 1174 */ 1175 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0); 1176 } 1177 1178 if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO) 1179 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0); 1180 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 1181 if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0); 1182 1183 /* 1184 * Capabilities after here are not enabled by default. 1185 */ 1186 if_setcapenable(ifp, if_getcapabilities(ifp)); 1187 1188 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 1189 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); 1190 1191 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 1192 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 1193 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 1194 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 1195 } 1196 1197 ether_ifattach(ifp, sc->vtnet_hwaddr); 1198 1199 /* Tell the upper layer(s) we support long frames. */ 1200 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 1201 1202 DEBUGNET_SET(ifp, vtnet); 1203 1204 pa.pa_version = PFIL_VERSION; 1205 pa.pa_flags = PFIL_IN; 1206 pa.pa_type = PFIL_TYPE_ETHERNET; 1207 pa.pa_headname = if_name(ifp); 1208 sc->vtnet_pfil = pfil_head_register(&pa); 1209 1210 return (0); 1211 } 1212 1213 static int 1214 vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu) 1215 { 1216 int framesz; 1217 1218 if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) 1219 return (MJUMPAGESIZE); 1220 else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) 1221 return (MCLBYTES); 1222 1223 /* 1224 * Try to scale the receive mbuf cluster size from the MTU. We 1225 * could also use the VQ size to influence the selected size, 1226 * but that would only matter for very small queues. 1227 */ 1228 if (vtnet_modern(sc)) { 1229 MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1)); 1230 framesz = sizeof(struct virtio_net_hdr_v1); 1231 } else 1232 framesz = sizeof(struct vtnet_rx_header); 1233 framesz += sizeof(struct ether_vlan_header) + mtu; 1234 /* 1235 * Account for the offsetting we'll do elsewhere so we allocate the 1236 * right size for the mtu. 1237 */ 1238 if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) { 1239 framesz += VTNET_ETHER_ALIGN; 1240 } 1241 1242 if (framesz <= MCLBYTES) 1243 return (MCLBYTES); 1244 else if (framesz <= MJUMPAGESIZE) 1245 return (MJUMPAGESIZE); 1246 else if (framesz <= MJUM9BYTES) 1247 return (MJUM9BYTES); 1248 1249 /* Sane default; avoid 16KB clusters. */ 1250 return (MCLBYTES); 1251 } 1252 1253 static int 1254 vtnet_ioctl_mtu(struct vtnet_softc *sc, u_int mtu) 1255 { 1256 if_t ifp; 1257 int clustersz; 1258 1259 ifp = sc->vtnet_ifp; 1260 VTNET_CORE_LOCK_ASSERT(sc); 1261 1262 if (if_getmtu(ifp) == mtu) 1263 return (0); 1264 else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu) 1265 return (EINVAL); 1266 1267 if_setmtu(ifp, mtu); 1268 clustersz = vtnet_rx_cluster_size(sc, mtu); 1269 1270 if (clustersz != sc->vtnet_rx_clustersz && 1271 if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1272 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1273 vtnet_init_locked(sc, 0); 1274 } 1275 1276 return (0); 1277 } 1278 1279 static int 1280 vtnet_ioctl_ifflags(struct vtnet_softc *sc) 1281 { 1282 if_t ifp; 1283 int drv_running; 1284 1285 ifp = sc->vtnet_ifp; 1286 drv_running = (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; 1287 1288 VTNET_CORE_LOCK_ASSERT(sc); 1289 1290 if ((if_getflags(ifp) & IFF_UP) == 0) { 1291 if (drv_running) 1292 vtnet_stop(sc); 1293 goto out; 1294 } 1295 1296 if (!drv_running) { 1297 vtnet_init_locked(sc, 0); 1298 goto out; 1299 } 1300 1301 if ((if_getflags(ifp) ^ sc->vtnet_if_flags) & 1302 (IFF_PROMISC | IFF_ALLMULTI)) { 1303 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) 1304 vtnet_rx_filter(sc); 1305 else { 1306 /* 1307 * We don't support filtering out multicast, so 1308 * ALLMULTI is always set. 1309 */ 1310 if_setflagbits(ifp, IFF_ALLMULTI, 0); 1311 if_setflagbits(ifp, IFF_PROMISC, 0); 1312 } 1313 } 1314 1315 out: 1316 sc->vtnet_if_flags = if_getflags(ifp); 1317 return (0); 1318 } 1319 1320 static int 1321 vtnet_ioctl_multi(struct vtnet_softc *sc) 1322 { 1323 if_t ifp; 1324 1325 ifp = sc->vtnet_ifp; 1326 1327 VTNET_CORE_LOCK_ASSERT(sc); 1328 1329 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX && 1330 if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1331 vtnet_rx_filter_mac(sc); 1332 1333 return (0); 1334 } 1335 1336 static int 1337 vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr) 1338 { 1339 if_t ifp; 1340 int mask, reinit, update; 1341 1342 ifp = sc->vtnet_ifp; 1343 mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^ if_getcapenable(ifp); 1344 reinit = update = 0; 1345 1346 VTNET_CORE_LOCK_ASSERT(sc); 1347 1348 if (mask & IFCAP_TXCSUM) 1349 if_togglecapenable(ifp, IFCAP_TXCSUM); 1350 if (mask & IFCAP_TXCSUM_IPV6) 1351 if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6); 1352 if (mask & IFCAP_TSO4) 1353 if_togglecapenable(ifp, IFCAP_TSO4); 1354 if (mask & IFCAP_TSO6) 1355 if_togglecapenable(ifp, IFCAP_TSO6); 1356 1357 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) { 1358 /* 1359 * These Rx features require the negotiated features to 1360 * be updated. Avoid a full reinit if possible. 1361 */ 1362 if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) 1363 update = 1; 1364 else 1365 reinit = 1; 1366 1367 /* BMV: Avoid needless renegotiation for just software LRO. */ 1368 if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) == 1369 IFCAP_LRO && vtnet_software_lro(sc)) 1370 reinit = update = 0; 1371 /* 1372 * VirtIO does not distinguish between receive checksum offload 1373 * for IPv4 and IPv6 packets, so treat them as a pair. 1374 */ 1375 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { 1376 if_togglecapenable(ifp, IFCAP_RXCSUM); 1377 if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6); 1378 } 1379 if (mask & IFCAP_LRO) 1380 if_togglecapenable(ifp, IFCAP_LRO); 1381 /* Both SW and HW TCP LRO require receive checksum offload. */ 1382 if ((if_getcapenable(ifp) & 1383 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0) 1384 if_setcapenablebit(ifp, 0, IFCAP_LRO); 1385 } 1386 1387 if (mask & IFCAP_VLAN_HWFILTER) { 1388 /* These Rx features require renegotiation. */ 1389 reinit = 1; 1390 1391 if (mask & IFCAP_VLAN_HWFILTER) 1392 if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER); 1393 } 1394 1395 if (mask & IFCAP_VLAN_HWTSO) 1396 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 1397 if (mask & IFCAP_VLAN_HWTAGGING) 1398 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 1399 1400 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1401 if (reinit) { 1402 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1403 vtnet_init_locked(sc, 0); 1404 } else if (update) 1405 vtnet_update_rx_offloads(sc); 1406 } 1407 1408 return (0); 1409 } 1410 1411 static int 1412 vtnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 1413 { 1414 struct vtnet_softc *sc; 1415 struct ifreq *ifr; 1416 int error; 1417 1418 sc = if_getsoftc(ifp); 1419 ifr = (struct ifreq *) data; 1420 error = 0; 1421 1422 switch (cmd) { 1423 case SIOCSIFMTU: 1424 VTNET_CORE_LOCK(sc); 1425 error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu); 1426 VTNET_CORE_UNLOCK(sc); 1427 break; 1428 1429 case SIOCSIFFLAGS: 1430 VTNET_CORE_LOCK(sc); 1431 error = vtnet_ioctl_ifflags(sc); 1432 VTNET_CORE_UNLOCK(sc); 1433 break; 1434 1435 case SIOCADDMULTI: 1436 case SIOCDELMULTI: 1437 VTNET_CORE_LOCK(sc); 1438 error = vtnet_ioctl_multi(sc); 1439 VTNET_CORE_UNLOCK(sc); 1440 break; 1441 1442 case SIOCSIFMEDIA: 1443 case SIOCGIFMEDIA: 1444 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); 1445 break; 1446 1447 case SIOCSIFCAP: 1448 VTNET_CORE_LOCK(sc); 1449 error = vtnet_ioctl_ifcap(sc, ifr); 1450 VTNET_CORE_UNLOCK(sc); 1451 VLAN_CAPABILITIES(ifp); 1452 break; 1453 1454 default: 1455 error = ether_ioctl(ifp, cmd, data); 1456 break; 1457 } 1458 1459 VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc); 1460 1461 return (error); 1462 } 1463 1464 static int 1465 vtnet_rxq_populate(struct vtnet_rxq *rxq) 1466 { 1467 struct virtqueue *vq; 1468 int nbufs, error; 1469 1470 #ifdef DEV_NETMAP 1471 error = vtnet_netmap_rxq_populate(rxq); 1472 if (error >= 0) 1473 return (error); 1474 #endif /* DEV_NETMAP */ 1475 1476 vq = rxq->vtnrx_vq; 1477 error = ENOSPC; 1478 1479 for (nbufs = 0; !virtqueue_full(vq); nbufs++) { 1480 error = vtnet_rxq_new_buf(rxq); 1481 if (error) 1482 break; 1483 } 1484 1485 if (nbufs > 0) { 1486 virtqueue_notify(vq); 1487 /* 1488 * EMSGSIZE signifies the virtqueue did not have enough 1489 * entries available to hold the last mbuf. This is not 1490 * an error. 1491 */ 1492 if (error == EMSGSIZE) 1493 error = 0; 1494 } 1495 1496 return (error); 1497 } 1498 1499 static void 1500 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq) 1501 { 1502 struct virtqueue *vq; 1503 struct mbuf *m; 1504 int last; 1505 #ifdef DEV_NETMAP 1506 struct netmap_kring *kring = netmap_kring_on(NA(rxq->vtnrx_sc->vtnet_ifp), 1507 rxq->vtnrx_id, NR_RX); 1508 #else /* !DEV_NETMAP */ 1509 void *kring = NULL; 1510 #endif /* !DEV_NETMAP */ 1511 1512 vq = rxq->vtnrx_vq; 1513 last = 0; 1514 1515 while ((m = virtqueue_drain(vq, &last)) != NULL) { 1516 if (kring == NULL) 1517 m_freem(m); 1518 } 1519 1520 KASSERT(virtqueue_empty(vq), 1521 ("%s: mbufs remaining in rx queue %p", __func__, rxq)); 1522 } 1523 1524 static struct mbuf * 1525 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) 1526 { 1527 struct mbuf *m_head, *m_tail, *m; 1528 int i, size; 1529 1530 m_head = NULL; 1531 size = sc->vtnet_rx_clustersz; 1532 1533 KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1534 ("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs)); 1535 1536 for (i = 0; i < nbufs; i++) { 1537 m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size); 1538 if (m == NULL) { 1539 sc->vtnet_stats.mbuf_alloc_failed++; 1540 m_freem(m_head); 1541 return (NULL); 1542 } 1543 1544 m->m_len = size; 1545 /* 1546 * Need to offset the mbuf if the header we're going to add 1547 * will misalign. 1548 */ 1549 if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) { 1550 m_adj(m, VTNET_ETHER_ALIGN); 1551 } 1552 if (m_head != NULL) { 1553 m_tail->m_next = m; 1554 m_tail = m; 1555 } else 1556 m_head = m_tail = m; 1557 } 1558 1559 if (m_tailp != NULL) 1560 *m_tailp = m_tail; 1561 1562 return (m_head); 1563 } 1564 1565 /* 1566 * Slow path for when LRO without mergeable buffers is negotiated. 1567 */ 1568 static int 1569 vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0, 1570 int len0) 1571 { 1572 struct vtnet_softc *sc; 1573 struct mbuf *m, *m_prev, *m_new, *m_tail; 1574 int len, clustersz, nreplace, error; 1575 1576 sc = rxq->vtnrx_sc; 1577 clustersz = sc->vtnet_rx_clustersz; 1578 /* 1579 * Need to offset the mbuf if the header we're going to add will 1580 * misalign, account for that here. 1581 */ 1582 if (VTNET_ETHER_ALIGN != 0 && sc->vtnet_hdr_size % 4 == 0) 1583 clustersz -= VTNET_ETHER_ALIGN; 1584 1585 m_prev = NULL; 1586 m_tail = NULL; 1587 nreplace = 0; 1588 1589 m = m0; 1590 len = len0; 1591 1592 /* 1593 * Since these mbuf chains are so large, avoid allocating a complete 1594 * replacement when the received frame did not consume the entire 1595 * chain. Unused mbufs are moved to the tail of the replacement mbuf. 1596 */ 1597 while (len > 0) { 1598 if (m == NULL) { 1599 sc->vtnet_stats.rx_frame_too_large++; 1600 return (EMSGSIZE); 1601 } 1602 1603 /* 1604 * Every mbuf should have the expected cluster size since that 1605 * is also used to allocate the replacements. 1606 */ 1607 KASSERT(m->m_len == clustersz, 1608 ("%s: mbuf size %d not expected cluster size %d", __func__, 1609 m->m_len, clustersz)); 1610 1611 m->m_len = MIN(m->m_len, len); 1612 len -= m->m_len; 1613 1614 m_prev = m; 1615 m = m->m_next; 1616 nreplace++; 1617 } 1618 1619 KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs, 1620 ("%s: invalid replacement mbuf count %d max %d", __func__, 1621 nreplace, sc->vtnet_rx_nmbufs)); 1622 1623 m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); 1624 if (m_new == NULL) { 1625 m_prev->m_len = clustersz; 1626 return (ENOBUFS); 1627 } 1628 1629 /* 1630 * Move any unused mbufs from the received mbuf chain onto the 1631 * end of the replacement chain. 1632 */ 1633 if (m_prev->m_next != NULL) { 1634 m_tail->m_next = m_prev->m_next; 1635 m_prev->m_next = NULL; 1636 } 1637 1638 error = vtnet_rxq_enqueue_buf(rxq, m_new); 1639 if (error) { 1640 /* 1641 * The replacement is suppose to be an copy of the one 1642 * dequeued so this is a very unexpected error. 1643 * 1644 * Restore the m0 chain to the original state if it was 1645 * modified so we can then discard it. 1646 */ 1647 if (m_tail->m_next != NULL) { 1648 m_prev->m_next = m_tail->m_next; 1649 m_tail->m_next = NULL; 1650 } 1651 m_prev->m_len = clustersz; 1652 sc->vtnet_stats.rx_enq_replacement_failed++; 1653 m_freem(m_new); 1654 } 1655 1656 return (error); 1657 } 1658 1659 static int 1660 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len) 1661 { 1662 struct vtnet_softc *sc; 1663 struct mbuf *m_new; 1664 int error; 1665 1666 sc = rxq->vtnrx_sc; 1667 1668 if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) 1669 return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len)); 1670 1671 MPASS(m->m_next == NULL); 1672 if (m->m_len < len) 1673 return (EMSGSIZE); 1674 1675 m_new = vtnet_rx_alloc_buf(sc, 1, NULL); 1676 if (m_new == NULL) 1677 return (ENOBUFS); 1678 1679 error = vtnet_rxq_enqueue_buf(rxq, m_new); 1680 if (error) { 1681 sc->vtnet_stats.rx_enq_replacement_failed++; 1682 m_freem(m_new); 1683 } else 1684 m->m_len = len; 1685 1686 return (error); 1687 } 1688 1689 static int 1690 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m) 1691 { 1692 struct vtnet_softc *sc; 1693 struct sglist *sg; 1694 int header_inlined, error; 1695 1696 sc = rxq->vtnrx_sc; 1697 sg = rxq->vtnrx_sg; 1698 1699 KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1700 ("%s: mbuf chain without LRO_NOMRG", __func__)); 1701 VTNET_RXQ_LOCK_ASSERT(rxq); 1702 1703 sglist_reset(sg); 1704 header_inlined = vtnet_modern(sc) || 1705 (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */ 1706 1707 /* 1708 * Note: The mbuf has been already adjusted when we allocate it if we 1709 * have to do strict alignment. 1710 */ 1711 if (header_inlined) 1712 error = sglist_append_mbuf(sg, m); 1713 else { 1714 struct vtnet_rx_header *rxhdr = 1715 mtod(m, struct vtnet_rx_header *); 1716 MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); 1717 1718 /* Append the header and remaining mbuf data. */ 1719 error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); 1720 if (error) 1721 return (error); 1722 error = sglist_append(sg, &rxhdr[1], 1723 m->m_len - sizeof(struct vtnet_rx_header)); 1724 if (error) 1725 return (error); 1726 1727 if (m->m_next != NULL) 1728 error = sglist_append_mbuf(sg, m->m_next); 1729 } 1730 1731 if (error) 1732 return (error); 1733 1734 return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg)); 1735 } 1736 1737 static int 1738 vtnet_rxq_new_buf(struct vtnet_rxq *rxq) 1739 { 1740 struct vtnet_softc *sc; 1741 struct mbuf *m; 1742 int error; 1743 1744 sc = rxq->vtnrx_sc; 1745 1746 m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL); 1747 if (m == NULL) 1748 return (ENOBUFS); 1749 1750 error = vtnet_rxq_enqueue_buf(rxq, m); 1751 if (error) 1752 m_freem(m); 1753 1754 return (error); 1755 } 1756 1757 #if defined(INET) || defined(INET6) 1758 static int 1759 vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, bool isipv6, 1760 int protocol, struct virtio_net_hdr *hdr) 1761 { 1762 struct vtnet_softc *sc; 1763 1764 /* 1765 * The packet is likely from another VM on the same host or from the 1766 * host that itself performed checksum offloading so Tx/Rx is basically 1767 * a memcpy and the checksum has little value so far. 1768 */ 1769 1770 KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP, 1771 ("%s: unsupported IP protocol %d", __func__, protocol)); 1772 1773 /* 1774 * If the user don't want us to fix it up here by computing the 1775 * checksum, just forward the order to compute the checksum by setting 1776 * the corresponding mbuf flag (e.g., CSUM_TCP). 1777 */ 1778 sc = rxq->vtnrx_sc; 1779 if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) { 1780 switch (protocol) { 1781 case IPPROTO_TCP: 1782 m->m_pkthdr.csum_flags |= 1783 (isipv6 ? CSUM_TCP_IPV6 : CSUM_TCP); 1784 break; 1785 case IPPROTO_UDP: 1786 m->m_pkthdr.csum_flags |= 1787 (isipv6 ? CSUM_UDP_IPV6 : CSUM_UDP); 1788 break; 1789 } 1790 m->m_pkthdr.csum_data = hdr->csum_offset; 1791 return (0); 1792 } 1793 1794 /* 1795 * Compute the checksum in the driver so the packet will contain a 1796 * valid checksum. The checksum is at csum_offset from csum_start. 1797 */ 1798 int csum_off, csum_end; 1799 uint16_t csum; 1800 1801 csum_off = hdr->csum_start + hdr->csum_offset; 1802 csum_end = csum_off + sizeof(uint16_t); 1803 1804 /* Assume checksum will be in the first mbuf. */ 1805 if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) { 1806 sc->vtnet_stats.rx_csum_bad_offset++; 1807 return (1); 1808 } 1809 1810 /* 1811 * Like in_delayed_cksum()/in6_delayed_cksum(), compute the 1812 * checksum and write it at the specified offset. We could 1813 * try to verify the packet: csum_start should probably 1814 * correspond to the start of the TCP/UDP header. 1815 * 1816 * BMV: Need to properly handle UDP with zero checksum. Is 1817 * the IPv4 header checksum implicitly validated? 1818 */ 1819 csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start); 1820 *(uint16_t *)(mtodo(m, csum_off)) = csum; 1821 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1822 m->m_pkthdr.csum_data = 0xFFFF; 1823 1824 return (0); 1825 } 1826 1827 static void 1828 vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, int protocol) 1829 { 1830 KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP, 1831 ("%s: unsupported IP protocol %d", __func__, protocol)); 1832 1833 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1834 m->m_pkthdr.csum_data = 0xFFFF; 1835 } 1836 1837 static int 1838 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, 1839 struct virtio_net_hdr *hdr) 1840 { 1841 const struct ether_header *eh; 1842 struct vtnet_softc *sc; 1843 int hoff, protocol; 1844 uint16_t etype; 1845 bool isipv6; 1846 1847 KASSERT(hdr->flags & 1848 (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID), 1849 ("%s: missing checksum offloading flag %x", __func__, hdr->flags)); 1850 1851 eh = mtod(m, const struct ether_header *); 1852 etype = ntohs(eh->ether_type); 1853 if (etype == ETHERTYPE_VLAN) { 1854 /* TODO BMV: Handle QinQ. */ 1855 const struct ether_vlan_header *evh = 1856 mtod(m, const struct ether_vlan_header *); 1857 etype = ntohs(evh->evl_proto); 1858 hoff = sizeof(struct ether_vlan_header); 1859 } else 1860 hoff = sizeof(struct ether_header); 1861 1862 sc = rxq->vtnrx_sc; 1863 1864 /* Check whether ethernet type is IP or IPv6, and get protocol. */ 1865 switch (etype) { 1866 #if defined(INET) 1867 case ETHERTYPE_IP: 1868 if (__predict_false(m->m_len < hoff + sizeof(struct ip))) { 1869 sc->vtnet_stats.rx_csum_inaccessible_ipproto++; 1870 return (1); 1871 } else { 1872 struct ip *ip = (struct ip *)(m->m_data + hoff); 1873 protocol = ip->ip_p; 1874 } 1875 isipv6 = false; 1876 break; 1877 #endif 1878 #if defined(INET6) 1879 case ETHERTYPE_IPV6: 1880 if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr)) 1881 || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) { 1882 sc->vtnet_stats.rx_csum_inaccessible_ipproto++; 1883 return (1); 1884 } 1885 isipv6 = true; 1886 break; 1887 #endif 1888 default: 1889 sc->vtnet_stats.rx_csum_bad_ethtype++; 1890 return (1); 1891 } 1892 1893 /* Check whether protocol is TCP or UDP. */ 1894 switch (protocol) { 1895 case IPPROTO_TCP: 1896 case IPPROTO_UDP: 1897 break; 1898 default: 1899 /* 1900 * FreeBSD does not support checksum offloading of this 1901 * protocol here. 1902 */ 1903 sc->vtnet_stats.rx_csum_bad_ipproto++; 1904 return (1); 1905 } 1906 1907 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1908 return (vtnet_rxq_csum_needs_csum(rxq, m, isipv6, protocol, 1909 hdr)); 1910 else /* VIRTIO_NET_HDR_F_DATA_VALID */ 1911 vtnet_rxq_csum_data_valid(rxq, m, protocol); 1912 1913 return (0); 1914 } 1915 #endif 1916 1917 static void 1918 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs) 1919 { 1920 struct mbuf *m; 1921 1922 while (--nbufs > 0) { 1923 m = virtqueue_dequeue(rxq->vtnrx_vq, NULL); 1924 if (m == NULL) 1925 break; 1926 vtnet_rxq_discard_buf(rxq, m); 1927 } 1928 } 1929 1930 static void 1931 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m) 1932 { 1933 int error __diagused; 1934 1935 /* 1936 * Requeue the discarded mbuf. This should always be successful 1937 * since it was just dequeued. 1938 */ 1939 error = vtnet_rxq_enqueue_buf(rxq, m); 1940 KASSERT(error == 0, 1941 ("%s: cannot requeue discarded mbuf %d", __func__, error)); 1942 } 1943 1944 static int 1945 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs) 1946 { 1947 struct vtnet_softc *sc; 1948 struct virtqueue *vq; 1949 struct mbuf *m_tail; 1950 1951 sc = rxq->vtnrx_sc; 1952 vq = rxq->vtnrx_vq; 1953 m_tail = m_head; 1954 1955 while (--nbufs > 0) { 1956 struct mbuf *m; 1957 uint32_t len; 1958 1959 m = virtqueue_dequeue(vq, &len); 1960 if (m == NULL) { 1961 rxq->vtnrx_stats.vrxs_ierrors++; 1962 goto fail; 1963 } 1964 1965 if (vtnet_rxq_new_buf(rxq) != 0) { 1966 rxq->vtnrx_stats.vrxs_iqdrops++; 1967 vtnet_rxq_discard_buf(rxq, m); 1968 if (nbufs > 1) 1969 vtnet_rxq_discard_merged_bufs(rxq, nbufs); 1970 goto fail; 1971 } 1972 1973 if (m->m_len < len) 1974 len = m->m_len; 1975 1976 m->m_len = len; 1977 m->m_flags &= ~M_PKTHDR; 1978 1979 m_head->m_pkthdr.len += len; 1980 m_tail->m_next = m; 1981 m_tail = m; 1982 } 1983 1984 return (0); 1985 1986 fail: 1987 sc->vtnet_stats.rx_mergeable_failed++; 1988 m_freem(m_head); 1989 1990 return (1); 1991 } 1992 1993 #if defined(INET) || defined(INET6) 1994 static int 1995 vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m) 1996 { 1997 struct lro_ctrl *lro; 1998 1999 lro = &rxq->vtnrx_lro; 2000 2001 if (lro->lro_mbuf_max != 0) { 2002 tcp_lro_queue_mbuf(lro, m); 2003 return (0); 2004 } 2005 2006 return (tcp_lro_rx(lro, m, 0)); 2007 } 2008 #endif 2009 2010 static void 2011 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, 2012 struct virtio_net_hdr *hdr) 2013 { 2014 struct vtnet_softc *sc; 2015 if_t ifp; 2016 2017 sc = rxq->vtnrx_sc; 2018 ifp = sc->vtnet_ifp; 2019 2020 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { 2021 struct ether_header *eh = mtod(m, struct ether_header *); 2022 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2023 vtnet_vlan_tag_remove(m); 2024 /* 2025 * With the 802.1Q header removed, update the 2026 * checksum starting location accordingly. 2027 */ 2028 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2029 hdr->csum_start -= ETHER_VLAN_ENCAP_LEN; 2030 } 2031 } 2032 2033 m->m_pkthdr.flowid = rxq->vtnrx_id; 2034 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 2035 2036 if (hdr->flags & 2037 (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) { 2038 #if defined(INET) || defined(INET6) 2039 if (vtnet_rxq_csum(rxq, m, hdr) == 0) 2040 rxq->vtnrx_stats.vrxs_csum++; 2041 else 2042 rxq->vtnrx_stats.vrxs_csum_failed++; 2043 #else 2044 sc->vtnet_stats.rx_csum_bad_ethtype++; 2045 rxq->vtnrx_stats.vrxs_csum_failed++; 2046 #endif 2047 } 2048 2049 if (hdr->gso_size != 0) { 2050 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 2051 case VIRTIO_NET_HDR_GSO_TCPV4: 2052 case VIRTIO_NET_HDR_GSO_TCPV6: 2053 m->m_pkthdr.lro_nsegs = 2054 howmany(m->m_pkthdr.len, hdr->gso_size); 2055 rxq->vtnrx_stats.vrxs_host_lro++; 2056 break; 2057 } 2058 } 2059 2060 rxq->vtnrx_stats.vrxs_ipackets++; 2061 rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; 2062 2063 #if defined(INET) || defined(INET6) 2064 if (vtnet_software_lro(sc) && if_getcapenable(ifp) & IFCAP_LRO) { 2065 if (vtnet_lro_rx(rxq, m) == 0) 2066 return; 2067 } 2068 #endif 2069 2070 if_input(ifp, m); 2071 } 2072 2073 static int 2074 vtnet_rxq_eof(struct vtnet_rxq *rxq) 2075 { 2076 struct virtio_net_hdr lhdr, *hdr; 2077 struct vtnet_softc *sc; 2078 if_t ifp; 2079 struct virtqueue *vq; 2080 int deq, count; 2081 2082 sc = rxq->vtnrx_sc; 2083 vq = rxq->vtnrx_vq; 2084 ifp = sc->vtnet_ifp; 2085 deq = 0; 2086 count = sc->vtnet_rx_process_limit; 2087 2088 VTNET_RXQ_LOCK_ASSERT(rxq); 2089 2090 CURVNET_SET(if_getvnet(ifp)); 2091 while (count-- > 0) { 2092 struct mbuf *m; 2093 uint32_t len, nbufs, adjsz; 2094 2095 m = virtqueue_dequeue(vq, &len); 2096 if (m == NULL) 2097 break; 2098 deq++; 2099 2100 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { 2101 rxq->vtnrx_stats.vrxs_ierrors++; 2102 vtnet_rxq_discard_buf(rxq, m); 2103 continue; 2104 } 2105 2106 if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) { 2107 struct virtio_net_hdr_mrg_rxbuf *mhdr = 2108 mtod(m, struct virtio_net_hdr_mrg_rxbuf *); 2109 kmsan_mark(mhdr, sizeof(*mhdr), KMSAN_STATE_INITED); 2110 nbufs = vtnet_htog16(sc, mhdr->num_buffers); 2111 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); 2112 } else if (vtnet_modern(sc)) { 2113 nbufs = 1; /* num_buffers is always 1 */ 2114 adjsz = sizeof(struct virtio_net_hdr_v1); 2115 } else { 2116 nbufs = 1; 2117 adjsz = sizeof(struct vtnet_rx_header); 2118 /* 2119 * Account for our gap between the header and start of 2120 * data to keep the segments separated. 2121 */ 2122 len += VTNET_RX_HEADER_PAD; 2123 } 2124 2125 if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { 2126 rxq->vtnrx_stats.vrxs_iqdrops++; 2127 vtnet_rxq_discard_buf(rxq, m); 2128 if (nbufs > 1) 2129 vtnet_rxq_discard_merged_bufs(rxq, nbufs); 2130 continue; 2131 } 2132 2133 m->m_pkthdr.len = len; 2134 m->m_pkthdr.rcvif = ifp; 2135 m->m_pkthdr.csum_flags = 0; 2136 2137 if (nbufs > 1) { 2138 /* Dequeue the rest of chain. */ 2139 if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0) 2140 continue; 2141 } 2142 2143 kmsan_mark_mbuf(m, KMSAN_STATE_INITED); 2144 2145 /* 2146 * Save an endian swapped version of the header prior to it 2147 * being stripped. The header is always at the start of the 2148 * mbuf data. num_buffers was already saved (and not needed) 2149 * so use the standard header. 2150 */ 2151 hdr = mtod(m, struct virtio_net_hdr *); 2152 lhdr.flags = hdr->flags; 2153 lhdr.gso_type = hdr->gso_type; 2154 lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len); 2155 lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size); 2156 lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start); 2157 lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset); 2158 m_adj(m, adjsz); 2159 2160 if (PFIL_HOOKED_IN(sc->vtnet_pfil)) { 2161 pfil_return_t pfil; 2162 2163 pfil = pfil_mbuf_in(sc->vtnet_pfil, &m, ifp, NULL); 2164 switch (pfil) { 2165 case PFIL_DROPPED: 2166 case PFIL_CONSUMED: 2167 continue; 2168 default: 2169 KASSERT(pfil == PFIL_PASS, 2170 ("Filter returned %d!", pfil)); 2171 } 2172 } 2173 2174 vtnet_rxq_input(rxq, m, &lhdr); 2175 } 2176 2177 if (deq > 0) { 2178 #if defined(INET) || defined(INET6) 2179 if (vtnet_software_lro(sc)) 2180 tcp_lro_flush_all(&rxq->vtnrx_lro); 2181 #endif 2182 virtqueue_notify(vq); 2183 } 2184 CURVNET_RESTORE(); 2185 2186 return (count > 0 ? 0 : EAGAIN); 2187 } 2188 2189 static void 2190 vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries) 2191 { 2192 struct vtnet_softc *sc; 2193 if_t ifp; 2194 u_int more; 2195 #ifdef DEV_NETMAP 2196 int nmirq; 2197 #endif /* DEV_NETMAP */ 2198 2199 sc = rxq->vtnrx_sc; 2200 ifp = sc->vtnet_ifp; 2201 2202 if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) { 2203 /* 2204 * Ignore this interrupt. Either this is a spurious interrupt 2205 * or multiqueue without per-VQ MSIX so every queue needs to 2206 * be polled (a brain dead configuration we could try harder 2207 * to avoid). 2208 */ 2209 vtnet_rxq_disable_intr(rxq); 2210 return; 2211 } 2212 2213 VTNET_RXQ_LOCK(rxq); 2214 2215 #ifdef DEV_NETMAP 2216 /* 2217 * We call netmap_rx_irq() under lock to prevent concurrent calls. 2218 * This is not necessary to serialize the access to the RX vq, but 2219 * rather to avoid races that may happen if this interface is 2220 * attached to a VALE switch, which would cause received packets 2221 * to stall in the RX queue (nm_kr_tryget() could find the kring 2222 * busy when called from netmap_bwrap_intr_notify()). 2223 */ 2224 nmirq = netmap_rx_irq(ifp, rxq->vtnrx_id, &more); 2225 if (nmirq != NM_IRQ_PASS) { 2226 VTNET_RXQ_UNLOCK(rxq); 2227 if (nmirq == NM_IRQ_RESCHED) { 2228 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); 2229 } 2230 return; 2231 } 2232 #endif /* DEV_NETMAP */ 2233 2234 again: 2235 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 2236 VTNET_RXQ_UNLOCK(rxq); 2237 return; 2238 } 2239 2240 more = vtnet_rxq_eof(rxq); 2241 if (more || vtnet_rxq_enable_intr(rxq) != 0) { 2242 if (!more) 2243 vtnet_rxq_disable_intr(rxq); 2244 /* 2245 * This is an occasional condition or race (when !more), 2246 * so retry a few times before scheduling the taskqueue. 2247 */ 2248 if (tries-- > 0) 2249 goto again; 2250 2251 rxq->vtnrx_stats.vrxs_rescheduled++; 2252 VTNET_RXQ_UNLOCK(rxq); 2253 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); 2254 } else 2255 VTNET_RXQ_UNLOCK(rxq); 2256 } 2257 2258 static void 2259 vtnet_rx_vq_intr(void *xrxq) 2260 { 2261 struct vtnet_rxq *rxq; 2262 2263 rxq = xrxq; 2264 vtnet_rx_vq_process(rxq, VTNET_INTR_DISABLE_RETRIES); 2265 } 2266 2267 static void 2268 vtnet_rxq_tq_intr(void *xrxq, int pending __unused) 2269 { 2270 struct vtnet_rxq *rxq; 2271 2272 rxq = xrxq; 2273 vtnet_rx_vq_process(rxq, 0); 2274 } 2275 2276 static int 2277 vtnet_txq_intr_threshold(struct vtnet_txq *txq) 2278 { 2279 struct vtnet_softc *sc; 2280 int threshold; 2281 2282 sc = txq->vtntx_sc; 2283 2284 /* 2285 * The Tx interrupt is disabled until the queue free count falls 2286 * below our threshold. Completed frames are drained from the Tx 2287 * virtqueue before transmitting new frames and in the watchdog 2288 * callout, so the frequency of Tx interrupts is greatly reduced, 2289 * at the cost of not freeing mbufs as quickly as they otherwise 2290 * would be. 2291 */ 2292 threshold = virtqueue_size(txq->vtntx_vq) / 4; 2293 2294 /* 2295 * Without indirect descriptors, leave enough room for the most 2296 * segments we handle. 2297 */ 2298 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && 2299 threshold < sc->vtnet_tx_nsegs) 2300 threshold = sc->vtnet_tx_nsegs; 2301 2302 return (threshold); 2303 } 2304 2305 static int 2306 vtnet_txq_below_threshold(struct vtnet_txq *txq) 2307 { 2308 struct virtqueue *vq; 2309 2310 vq = txq->vtntx_vq; 2311 2312 return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold); 2313 } 2314 2315 static int 2316 vtnet_txq_notify(struct vtnet_txq *txq) 2317 { 2318 struct virtqueue *vq; 2319 2320 vq = txq->vtntx_vq; 2321 2322 txq->vtntx_watchdog = VTNET_TX_TIMEOUT; 2323 virtqueue_notify(vq); 2324 2325 if (vtnet_txq_enable_intr(txq) == 0) 2326 return (0); 2327 2328 /* 2329 * Drain frames that were completed since last checked. If this 2330 * causes the queue to go above the threshold, the caller should 2331 * continue transmitting. 2332 */ 2333 if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) { 2334 virtqueue_disable_intr(vq); 2335 return (1); 2336 } 2337 2338 return (0); 2339 } 2340 2341 static void 2342 vtnet_txq_free_mbufs(struct vtnet_txq *txq) 2343 { 2344 struct virtqueue *vq; 2345 struct vtnet_tx_header *txhdr; 2346 int last; 2347 #ifdef DEV_NETMAP 2348 struct netmap_kring *kring = netmap_kring_on(NA(txq->vtntx_sc->vtnet_ifp), 2349 txq->vtntx_id, NR_TX); 2350 #else /* !DEV_NETMAP */ 2351 void *kring = NULL; 2352 #endif /* !DEV_NETMAP */ 2353 2354 vq = txq->vtntx_vq; 2355 last = 0; 2356 2357 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { 2358 if (kring == NULL) { 2359 m_freem(txhdr->vth_mbuf); 2360 uma_zfree(vtnet_tx_header_zone, txhdr); 2361 } 2362 } 2363 2364 KASSERT(virtqueue_empty(vq), 2365 ("%s: mbufs remaining in tx queue %p", __func__, txq)); 2366 } 2367 2368 /* 2369 * BMV: This can go away once we finally have offsets in the mbuf header. 2370 */ 2371 static int 2372 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype, 2373 int *proto, int *start) 2374 { 2375 struct vtnet_softc *sc; 2376 struct ether_vlan_header *evh; 2377 #if defined(INET) || defined(INET6) 2378 int offset; 2379 #endif 2380 2381 sc = txq->vtntx_sc; 2382 2383 evh = mtod(m, struct ether_vlan_header *); 2384 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2385 /* BMV: We should handle nested VLAN tags too. */ 2386 *etype = ntohs(evh->evl_proto); 2387 #if defined(INET) || defined(INET6) 2388 offset = sizeof(struct ether_vlan_header); 2389 #endif 2390 } else { 2391 *etype = ntohs(evh->evl_encap_proto); 2392 #if defined(INET) || defined(INET6) 2393 offset = sizeof(struct ether_header); 2394 #endif 2395 } 2396 2397 switch (*etype) { 2398 #if defined(INET) 2399 case ETHERTYPE_IP: { 2400 struct ip *ip, iphdr; 2401 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 2402 m_copydata(m, offset, sizeof(struct ip), 2403 (caddr_t) &iphdr); 2404 ip = &iphdr; 2405 } else 2406 ip = (struct ip *)(m->m_data + offset); 2407 *proto = ip->ip_p; 2408 *start = offset + (ip->ip_hl << 2); 2409 break; 2410 } 2411 #endif 2412 #if defined(INET6) 2413 case ETHERTYPE_IPV6: 2414 *proto = -1; 2415 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 2416 /* Assert the network stack sent us a valid packet. */ 2417 KASSERT(*start > offset, 2418 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 2419 *start, offset, *proto)); 2420 break; 2421 #endif 2422 default: 2423 sc->vtnet_stats.tx_csum_unknown_ethtype++; 2424 return (EINVAL); 2425 } 2426 2427 return (0); 2428 } 2429 2430 static int 2431 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type, 2432 int offset, struct virtio_net_hdr *hdr) 2433 { 2434 static struct timeval lastecn; 2435 static int curecn; 2436 struct vtnet_softc *sc; 2437 struct tcphdr *tcp, tcphdr; 2438 2439 sc = txq->vtntx_sc; 2440 2441 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 2442 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 2443 tcp = &tcphdr; 2444 } else 2445 tcp = (struct tcphdr *)(m->m_data + offset); 2446 2447 hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2)); 2448 hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz); 2449 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 2450 VIRTIO_NET_HDR_GSO_TCPV6; 2451 2452 if (__predict_false(tcp_get_flags(tcp) & TH_CWR)) { 2453 /* 2454 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In 2455 * FreeBSD, ECN support is not on a per-interface basis, 2456 * but globally via the net.inet.tcp.ecn.enable sysctl 2457 * knob. The default is off. 2458 */ 2459 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { 2460 if (ppsratecheck(&lastecn, &curecn, 1)) 2461 if_printf(sc->vtnet_ifp, 2462 "TSO with ECN not negotiated with host\n"); 2463 return (ENOTSUP); 2464 } 2465 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 2466 } 2467 2468 txq->vtntx_stats.vtxs_tso++; 2469 2470 return (0); 2471 } 2472 2473 static struct mbuf * 2474 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m, 2475 struct virtio_net_hdr *hdr) 2476 { 2477 struct vtnet_softc *sc; 2478 int flags, etype, csum_start, proto, error; 2479 2480 sc = txq->vtntx_sc; 2481 flags = m->m_pkthdr.csum_flags; 2482 2483 error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start); 2484 if (error) 2485 goto drop; 2486 2487 if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) { 2488 /* Sanity check the parsed mbuf matches the offload flags. */ 2489 if (__predict_false((flags & VTNET_CSUM_OFFLOAD && 2490 etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6 2491 && etype != ETHERTYPE_IPV6))) { 2492 sc->vtnet_stats.tx_csum_proto_mismatch++; 2493 goto drop; 2494 } 2495 2496 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 2497 hdr->csum_start = vtnet_gtoh16(sc, csum_start); 2498 hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data); 2499 txq->vtntx_stats.vtxs_csum++; 2500 } else if ((flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) && 2501 (proto == IPPROTO_TCP || proto == IPPROTO_UDP) && 2502 (m->m_pkthdr.csum_data == 0xFFFF)) { 2503 hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID; 2504 } 2505 2506 if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) { 2507 /* 2508 * Sanity check the parsed mbuf IP protocol is TCP, and 2509 * VirtIO TSO reqires the checksum offloading above. 2510 */ 2511 if (__predict_false(proto != IPPROTO_TCP)) { 2512 sc->vtnet_stats.tx_tso_not_tcp++; 2513 goto drop; 2514 } else if (__predict_false((hdr->flags & 2515 VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) { 2516 sc->vtnet_stats.tx_tso_without_csum++; 2517 goto drop; 2518 } 2519 2520 error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); 2521 if (error) 2522 goto drop; 2523 } 2524 2525 return (m); 2526 2527 drop: 2528 m_freem(m); 2529 return (NULL); 2530 } 2531 2532 static int 2533 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, 2534 struct vtnet_tx_header *txhdr) 2535 { 2536 struct vtnet_softc *sc; 2537 struct virtqueue *vq; 2538 struct sglist *sg; 2539 struct mbuf *m; 2540 int error; 2541 2542 sc = txq->vtntx_sc; 2543 vq = txq->vtntx_vq; 2544 sg = txq->vtntx_sg; 2545 m = *m_head; 2546 2547 sglist_reset(sg); 2548 error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); 2549 if (error != 0 || sg->sg_nseg != 1) { 2550 KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d", 2551 __func__, error, sg->sg_nseg)); 2552 goto fail; 2553 } 2554 2555 error = sglist_append_mbuf(sg, m); 2556 if (error) { 2557 m = m_defrag(m, M_NOWAIT); 2558 if (m == NULL) { 2559 sc->vtnet_stats.tx_defrag_failed++; 2560 goto fail; 2561 } 2562 2563 *m_head = m; 2564 sc->vtnet_stats.tx_defragged++; 2565 2566 error = sglist_append_mbuf(sg, m); 2567 if (error) 2568 goto fail; 2569 } 2570 2571 txhdr->vth_mbuf = m; 2572 error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0); 2573 2574 return (error); 2575 2576 fail: 2577 m_freem(*m_head); 2578 *m_head = NULL; 2579 2580 return (ENOBUFS); 2581 } 2582 2583 static int 2584 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags) 2585 { 2586 struct vtnet_tx_header *txhdr; 2587 struct virtio_net_hdr *hdr; 2588 struct mbuf *m; 2589 int error; 2590 2591 m = *m_head; 2592 M_ASSERTPKTHDR(m); 2593 2594 txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO); 2595 if (txhdr == NULL) { 2596 m_freem(m); 2597 *m_head = NULL; 2598 return (ENOMEM); 2599 } 2600 2601 /* 2602 * Always use the non-mergeable header, regardless if mergable headers 2603 * were negotiated, because for transmit num_buffers is always zero. 2604 * The vtnet_hdr_size is used to enqueue the right header size segment. 2605 */ 2606 hdr = &txhdr->vth_uhdr.hdr; 2607 2608 if (m->m_flags & M_VLANTAG) { 2609 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 2610 if ((*m_head = m) == NULL) { 2611 error = ENOBUFS; 2612 goto fail; 2613 } 2614 m->m_flags &= ~M_VLANTAG; 2615 } 2616 2617 if (m->m_pkthdr.csum_flags & 2618 (VTNET_CSUM_ALL_OFFLOAD | CSUM_DATA_VALID)) { 2619 m = vtnet_txq_offload(txq, m, hdr); 2620 if ((*m_head = m) == NULL) { 2621 error = ENOBUFS; 2622 goto fail; 2623 } 2624 } 2625 2626 error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); 2627 fail: 2628 if (error) 2629 uma_zfree(vtnet_tx_header_zone, txhdr); 2630 2631 return (error); 2632 } 2633 2634 2635 static void 2636 vtnet_start_locked(struct vtnet_txq *txq, if_t ifp) 2637 { 2638 struct vtnet_softc *sc; 2639 struct virtqueue *vq; 2640 struct mbuf *m0; 2641 int tries, enq; 2642 2643 sc = txq->vtntx_sc; 2644 vq = txq->vtntx_vq; 2645 tries = 0; 2646 2647 VTNET_TXQ_LOCK_ASSERT(txq); 2648 2649 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || 2650 sc->vtnet_link_active == 0) 2651 return; 2652 2653 vtnet_txq_eof(txq); 2654 2655 again: 2656 enq = 0; 2657 2658 while (!if_sendq_empty(ifp)) { 2659 if (virtqueue_full(vq)) 2660 break; 2661 2662 m0 = if_dequeue(ifp); 2663 if (m0 == NULL) 2664 break; 2665 2666 if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) { 2667 if (m0 != NULL) 2668 if_sendq_prepend(ifp, m0); 2669 break; 2670 } 2671 2672 enq++; 2673 ETHER_BPF_MTAP(ifp, m0); 2674 } 2675 2676 if (enq > 0 && vtnet_txq_notify(txq) != 0) { 2677 if (tries++ < VTNET_NOTIFY_RETRIES) 2678 goto again; 2679 2680 txq->vtntx_stats.vtxs_rescheduled++; 2681 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); 2682 } 2683 } 2684 2685 static void 2686 vtnet_start(if_t ifp) 2687 { 2688 struct vtnet_softc *sc; 2689 struct vtnet_txq *txq; 2690 2691 sc = if_getsoftc(ifp); 2692 txq = &sc->vtnet_txqs[0]; 2693 2694 VTNET_TXQ_LOCK(txq); 2695 vtnet_start_locked(txq, ifp); 2696 VTNET_TXQ_UNLOCK(txq); 2697 } 2698 2699 2700 static int 2701 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m) 2702 { 2703 struct vtnet_softc *sc; 2704 struct virtqueue *vq; 2705 struct buf_ring *br; 2706 if_t ifp; 2707 int enq, tries, error; 2708 2709 sc = txq->vtntx_sc; 2710 vq = txq->vtntx_vq; 2711 br = txq->vtntx_br; 2712 ifp = sc->vtnet_ifp; 2713 tries = 0; 2714 error = 0; 2715 2716 VTNET_TXQ_LOCK_ASSERT(txq); 2717 2718 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || 2719 sc->vtnet_link_active == 0) { 2720 if (m != NULL) 2721 error = drbr_enqueue(ifp, br, m); 2722 return (error); 2723 } 2724 2725 if (m != NULL) { 2726 error = drbr_enqueue(ifp, br, m); 2727 if (error) 2728 return (error); 2729 } 2730 2731 vtnet_txq_eof(txq); 2732 2733 again: 2734 enq = 0; 2735 2736 while ((m = drbr_peek(ifp, br)) != NULL) { 2737 if (virtqueue_full(vq)) { 2738 drbr_putback(ifp, br, m); 2739 break; 2740 } 2741 2742 if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) { 2743 if (m != NULL) 2744 drbr_putback(ifp, br, m); 2745 else 2746 drbr_advance(ifp, br); 2747 break; 2748 } 2749 drbr_advance(ifp, br); 2750 2751 enq++; 2752 ETHER_BPF_MTAP(ifp, m); 2753 } 2754 2755 if (enq > 0 && vtnet_txq_notify(txq) != 0) { 2756 if (tries++ < VTNET_NOTIFY_RETRIES) 2757 goto again; 2758 2759 txq->vtntx_stats.vtxs_rescheduled++; 2760 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); 2761 } 2762 2763 return (0); 2764 } 2765 2766 static int 2767 vtnet_txq_mq_start(if_t ifp, struct mbuf *m) 2768 { 2769 struct vtnet_softc *sc; 2770 struct vtnet_txq *txq; 2771 int i, npairs, error; 2772 2773 sc = if_getsoftc(ifp); 2774 npairs = sc->vtnet_act_vq_pairs; 2775 2776 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 2777 i = m->m_pkthdr.flowid % npairs; 2778 else 2779 i = curcpu % npairs; 2780 2781 txq = &sc->vtnet_txqs[i]; 2782 2783 if (VTNET_TXQ_TRYLOCK(txq) != 0) { 2784 error = vtnet_txq_mq_start_locked(txq, m); 2785 VTNET_TXQ_UNLOCK(txq); 2786 } else { 2787 error = drbr_enqueue(ifp, txq->vtntx_br, m); 2788 taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask); 2789 } 2790 2791 return (error); 2792 } 2793 2794 static void 2795 vtnet_txq_tq_deferred(void *xtxq, int pending __unused) 2796 { 2797 struct vtnet_softc *sc; 2798 struct vtnet_txq *txq; 2799 2800 txq = xtxq; 2801 sc = txq->vtntx_sc; 2802 2803 VTNET_TXQ_LOCK(txq); 2804 if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br)) 2805 vtnet_txq_mq_start_locked(txq, NULL); 2806 VTNET_TXQ_UNLOCK(txq); 2807 } 2808 2809 2810 static void 2811 vtnet_txq_start(struct vtnet_txq *txq) 2812 { 2813 struct vtnet_softc *sc; 2814 if_t ifp; 2815 2816 sc = txq->vtntx_sc; 2817 ifp = sc->vtnet_ifp; 2818 2819 if (!VTNET_ALTQ_ENABLED) { 2820 if (!drbr_empty(ifp, txq->vtntx_br)) 2821 vtnet_txq_mq_start_locked(txq, NULL); 2822 } else { 2823 if (!if_sendq_empty(ifp)) 2824 vtnet_start_locked(txq, ifp); 2825 2826 } 2827 } 2828 2829 static void 2830 vtnet_txq_tq_intr(void *xtxq, int pending __unused) 2831 { 2832 struct vtnet_softc *sc; 2833 struct vtnet_txq *txq; 2834 if_t ifp; 2835 2836 txq = xtxq; 2837 sc = txq->vtntx_sc; 2838 ifp = sc->vtnet_ifp; 2839 2840 VTNET_TXQ_LOCK(txq); 2841 2842 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 2843 VTNET_TXQ_UNLOCK(txq); 2844 return; 2845 } 2846 2847 vtnet_txq_eof(txq); 2848 vtnet_txq_start(txq); 2849 2850 VTNET_TXQ_UNLOCK(txq); 2851 } 2852 2853 static int 2854 vtnet_txq_eof(struct vtnet_txq *txq) 2855 { 2856 struct virtqueue *vq; 2857 struct vtnet_tx_header *txhdr; 2858 struct mbuf *m; 2859 int deq; 2860 2861 vq = txq->vtntx_vq; 2862 deq = 0; 2863 VTNET_TXQ_LOCK_ASSERT(txq); 2864 2865 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { 2866 m = txhdr->vth_mbuf; 2867 deq++; 2868 2869 txq->vtntx_stats.vtxs_opackets++; 2870 txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len; 2871 if (m->m_flags & M_MCAST) 2872 txq->vtntx_stats.vtxs_omcasts++; 2873 2874 m_freem(m); 2875 uma_zfree(vtnet_tx_header_zone, txhdr); 2876 } 2877 2878 if (virtqueue_empty(vq)) 2879 txq->vtntx_watchdog = 0; 2880 2881 return (deq); 2882 } 2883 2884 static void 2885 vtnet_tx_vq_intr(void *xtxq) 2886 { 2887 struct vtnet_softc *sc; 2888 struct vtnet_txq *txq; 2889 if_t ifp; 2890 2891 txq = xtxq; 2892 sc = txq->vtntx_sc; 2893 ifp = sc->vtnet_ifp; 2894 2895 if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) { 2896 /* 2897 * Ignore this interrupt. Either this is a spurious interrupt 2898 * or multiqueue without per-VQ MSIX so every queue needs to 2899 * be polled (a brain dead configuration we could try harder 2900 * to avoid). 2901 */ 2902 vtnet_txq_disable_intr(txq); 2903 return; 2904 } 2905 2906 #ifdef DEV_NETMAP 2907 if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS) 2908 return; 2909 #endif /* DEV_NETMAP */ 2910 2911 VTNET_TXQ_LOCK(txq); 2912 2913 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 2914 VTNET_TXQ_UNLOCK(txq); 2915 return; 2916 } 2917 2918 vtnet_txq_eof(txq); 2919 vtnet_txq_start(txq); 2920 2921 VTNET_TXQ_UNLOCK(txq); 2922 } 2923 2924 static void 2925 vtnet_tx_start_all(struct vtnet_softc *sc) 2926 { 2927 struct vtnet_txq *txq; 2928 int i; 2929 2930 VTNET_CORE_LOCK_ASSERT(sc); 2931 2932 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 2933 txq = &sc->vtnet_txqs[i]; 2934 2935 VTNET_TXQ_LOCK(txq); 2936 vtnet_txq_start(txq); 2937 VTNET_TXQ_UNLOCK(txq); 2938 } 2939 } 2940 2941 static void 2942 vtnet_qflush(if_t ifp) 2943 { 2944 struct vtnet_softc *sc; 2945 struct vtnet_txq *txq; 2946 struct mbuf *m; 2947 int i; 2948 2949 sc = if_getsoftc(ifp); 2950 2951 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 2952 txq = &sc->vtnet_txqs[i]; 2953 2954 VTNET_TXQ_LOCK(txq); 2955 while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL) 2956 m_freem(m); 2957 VTNET_TXQ_UNLOCK(txq); 2958 } 2959 2960 if_qflush(ifp); 2961 } 2962 2963 static int 2964 vtnet_watchdog(struct vtnet_txq *txq) 2965 { 2966 if_t ifp; 2967 2968 ifp = txq->vtntx_sc->vtnet_ifp; 2969 2970 VTNET_TXQ_LOCK(txq); 2971 if (txq->vtntx_watchdog == 1) { 2972 /* 2973 * Only drain completed frames if the watchdog is about to 2974 * expire. If any frames were drained, there may be enough 2975 * free descriptors now available to transmit queued frames. 2976 * In that case, the timer will immediately be decremented 2977 * below, but the timeout is generous enough that should not 2978 * be a problem. 2979 */ 2980 if (vtnet_txq_eof(txq) != 0) 2981 vtnet_txq_start(txq); 2982 } 2983 2984 if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) { 2985 VTNET_TXQ_UNLOCK(txq); 2986 return (0); 2987 } 2988 VTNET_TXQ_UNLOCK(txq); 2989 2990 if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id); 2991 return (1); 2992 } 2993 2994 static void 2995 vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc, 2996 struct vtnet_txq_stats *txacc) 2997 { 2998 2999 bzero(rxacc, sizeof(struct vtnet_rxq_stats)); 3000 bzero(txacc, sizeof(struct vtnet_txq_stats)); 3001 3002 for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) { 3003 struct vtnet_rxq_stats *rxst; 3004 struct vtnet_txq_stats *txst; 3005 3006 rxst = &sc->vtnet_rxqs[i].vtnrx_stats; 3007 rxacc->vrxs_ipackets += rxst->vrxs_ipackets; 3008 rxacc->vrxs_ibytes += rxst->vrxs_ibytes; 3009 rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops; 3010 rxacc->vrxs_csum += rxst->vrxs_csum; 3011 rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed; 3012 rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled; 3013 3014 txst = &sc->vtnet_txqs[i].vtntx_stats; 3015 txacc->vtxs_opackets += txst->vtxs_opackets; 3016 txacc->vtxs_obytes += txst->vtxs_obytes; 3017 txacc->vtxs_csum += txst->vtxs_csum; 3018 txacc->vtxs_tso += txst->vtxs_tso; 3019 txacc->vtxs_rescheduled += txst->vtxs_rescheduled; 3020 } 3021 } 3022 3023 static uint64_t 3024 vtnet_get_counter(if_t ifp, ift_counter cnt) 3025 { 3026 struct vtnet_softc *sc; 3027 struct vtnet_rxq_stats rxaccum; 3028 struct vtnet_txq_stats txaccum; 3029 3030 sc = if_getsoftc(ifp); 3031 vtnet_accum_stats(sc, &rxaccum, &txaccum); 3032 3033 switch (cnt) { 3034 case IFCOUNTER_IPACKETS: 3035 return (rxaccum.vrxs_ipackets); 3036 case IFCOUNTER_IQDROPS: 3037 return (rxaccum.vrxs_iqdrops); 3038 case IFCOUNTER_IERRORS: 3039 return (rxaccum.vrxs_ierrors); 3040 case IFCOUNTER_IBYTES: 3041 return (rxaccum.vrxs_ibytes); 3042 case IFCOUNTER_OPACKETS: 3043 return (txaccum.vtxs_opackets); 3044 case IFCOUNTER_OBYTES: 3045 return (txaccum.vtxs_obytes); 3046 case IFCOUNTER_OMCASTS: 3047 return (txaccum.vtxs_omcasts); 3048 default: 3049 return (if_get_counter_default(ifp, cnt)); 3050 } 3051 } 3052 3053 static void 3054 vtnet_tick(void *xsc) 3055 { 3056 struct vtnet_softc *sc; 3057 if_t ifp; 3058 int i, timedout; 3059 3060 sc = xsc; 3061 ifp = sc->vtnet_ifp; 3062 timedout = 0; 3063 3064 VTNET_CORE_LOCK_ASSERT(sc); 3065 3066 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) 3067 timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]); 3068 3069 if (timedout != 0) { 3070 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 3071 vtnet_init_locked(sc, 0); 3072 } else 3073 callout_schedule(&sc->vtnet_tick_ch, hz); 3074 } 3075 3076 static void 3077 vtnet_start_taskqueues(struct vtnet_softc *sc) 3078 { 3079 device_t dev; 3080 struct vtnet_rxq *rxq; 3081 struct vtnet_txq *txq; 3082 int i, error; 3083 3084 dev = sc->vtnet_dev; 3085 3086 /* 3087 * Errors here are very difficult to recover from - we cannot 3088 * easily fail because, if this is during boot, we will hang 3089 * when freeing any successfully started taskqueues because 3090 * the scheduler isn't up yet. 3091 * 3092 * Most drivers just ignore the return value - it only fails 3093 * with ENOMEM so an error is not likely. 3094 */ 3095 for (i = 0; i < sc->vtnet_req_vq_pairs; i++) { 3096 rxq = &sc->vtnet_rxqs[i]; 3097 error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, 3098 "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); 3099 if (error) { 3100 device_printf(dev, "failed to start rx taskq %d\n", 3101 rxq->vtnrx_id); 3102 } 3103 3104 txq = &sc->vtnet_txqs[i]; 3105 error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET, 3106 "%s txq %d", device_get_nameunit(dev), txq->vtntx_id); 3107 if (error) { 3108 device_printf(dev, "failed to start tx taskq %d\n", 3109 txq->vtntx_id); 3110 } 3111 } 3112 } 3113 3114 static void 3115 vtnet_free_taskqueues(struct vtnet_softc *sc) 3116 { 3117 struct vtnet_rxq *rxq; 3118 struct vtnet_txq *txq; 3119 int i; 3120 3121 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 3122 rxq = &sc->vtnet_rxqs[i]; 3123 if (rxq->vtnrx_tq != NULL) { 3124 taskqueue_free(rxq->vtnrx_tq); 3125 rxq->vtnrx_tq = NULL; 3126 } 3127 3128 txq = &sc->vtnet_txqs[i]; 3129 if (txq->vtntx_tq != NULL) { 3130 taskqueue_free(txq->vtntx_tq); 3131 txq->vtntx_tq = NULL; 3132 } 3133 } 3134 } 3135 3136 static void 3137 vtnet_drain_taskqueues(struct vtnet_softc *sc) 3138 { 3139 struct vtnet_rxq *rxq; 3140 struct vtnet_txq *txq; 3141 int i; 3142 3143 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 3144 rxq = &sc->vtnet_rxqs[i]; 3145 if (rxq->vtnrx_tq != NULL) 3146 taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); 3147 3148 txq = &sc->vtnet_txqs[i]; 3149 if (txq->vtntx_tq != NULL) { 3150 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask); 3151 if (!VTNET_ALTQ_ENABLED) 3152 taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask); 3153 } 3154 } 3155 } 3156 3157 static void 3158 vtnet_drain_rxtx_queues(struct vtnet_softc *sc) 3159 { 3160 struct vtnet_rxq *rxq; 3161 struct vtnet_txq *txq; 3162 int i; 3163 3164 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 3165 rxq = &sc->vtnet_rxqs[i]; 3166 vtnet_rxq_free_mbufs(rxq); 3167 3168 txq = &sc->vtnet_txqs[i]; 3169 vtnet_txq_free_mbufs(txq); 3170 } 3171 } 3172 3173 static void 3174 vtnet_stop_rendezvous(struct vtnet_softc *sc) 3175 { 3176 struct vtnet_rxq *rxq; 3177 struct vtnet_txq *txq; 3178 int i; 3179 3180 VTNET_CORE_LOCK_ASSERT(sc); 3181 3182 /* 3183 * Lock and unlock the per-queue mutex so we known the stop 3184 * state is visible. Doing only the active queues should be 3185 * sufficient, but it does not cost much extra to do all the 3186 * queues. 3187 */ 3188 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 3189 rxq = &sc->vtnet_rxqs[i]; 3190 VTNET_RXQ_LOCK(rxq); 3191 VTNET_RXQ_UNLOCK(rxq); 3192 3193 txq = &sc->vtnet_txqs[i]; 3194 VTNET_TXQ_LOCK(txq); 3195 VTNET_TXQ_UNLOCK(txq); 3196 } 3197 } 3198 3199 static void 3200 vtnet_stop(struct vtnet_softc *sc) 3201 { 3202 device_t dev; 3203 if_t ifp; 3204 3205 dev = sc->vtnet_dev; 3206 ifp = sc->vtnet_ifp; 3207 3208 VTNET_CORE_LOCK_ASSERT(sc); 3209 3210 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 3211 sc->vtnet_link_active = 0; 3212 callout_stop(&sc->vtnet_tick_ch); 3213 3214 /* Only advisory. */ 3215 vtnet_disable_interrupts(sc); 3216 3217 #ifdef DEV_NETMAP 3218 /* Stop any pending txsync/rxsync and disable them. */ 3219 netmap_disable_all_rings(ifp); 3220 #endif /* DEV_NETMAP */ 3221 3222 /* 3223 * Stop the host adapter. This resets it to the pre-initialized 3224 * state. It will not generate any interrupts until after it is 3225 * reinitialized. 3226 */ 3227 virtio_stop(dev); 3228 vtnet_stop_rendezvous(sc); 3229 3230 vtnet_drain_rxtx_queues(sc); 3231 sc->vtnet_act_vq_pairs = 1; 3232 } 3233 3234 static int 3235 vtnet_virtio_reinit(struct vtnet_softc *sc) 3236 { 3237 device_t dev; 3238 if_t ifp; 3239 uint64_t features; 3240 int error; 3241 3242 dev = sc->vtnet_dev; 3243 ifp = sc->vtnet_ifp; 3244 features = sc->vtnet_negotiated_features; 3245 3246 /* 3247 * Re-negotiate with the host, removing any disabled receive 3248 * features. Transmit features are disabled only on our side 3249 * via if_capenable and if_hwassist. 3250 */ 3251 3252 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0) 3253 features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES); 3254 3255 if ((if_getcapenable(ifp) & IFCAP_LRO) == 0) 3256 features &= ~VTNET_LRO_FEATURES; 3257 3258 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 3259 features &= ~VIRTIO_NET_F_CTRL_VLAN; 3260 3261 error = virtio_reinit(dev, features); 3262 if (error) { 3263 device_printf(dev, "virtio reinit error %d\n", error); 3264 return (error); 3265 } 3266 3267 sc->vtnet_features = features; 3268 virtio_reinit_complete(dev); 3269 3270 return (0); 3271 } 3272 3273 static void 3274 vtnet_init_rx_filters(struct vtnet_softc *sc) 3275 { 3276 if_t ifp; 3277 3278 ifp = sc->vtnet_ifp; 3279 3280 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 3281 vtnet_rx_filter(sc); 3282 vtnet_rx_filter_mac(sc); 3283 } 3284 3285 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 3286 vtnet_rx_filter_vlan(sc); 3287 } 3288 3289 static int 3290 vtnet_init_rx_queues(struct vtnet_softc *sc) 3291 { 3292 device_t dev; 3293 if_t ifp; 3294 struct vtnet_rxq *rxq; 3295 int i, clustersz, error; 3296 3297 dev = sc->vtnet_dev; 3298 ifp = sc->vtnet_ifp; 3299 3300 clustersz = vtnet_rx_cluster_size(sc, if_getmtu(ifp)); 3301 sc->vtnet_rx_clustersz = clustersz; 3302 3303 if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) { 3304 sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) + 3305 VTNET_MAX_RX_SIZE, clustersz); 3306 KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, 3307 ("%s: too many rx mbufs %d for %d segments", __func__, 3308 sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); 3309 } else 3310 sc->vtnet_rx_nmbufs = 1; 3311 3312 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 3313 rxq = &sc->vtnet_rxqs[i]; 3314 3315 /* Hold the lock to satisfy asserts. */ 3316 VTNET_RXQ_LOCK(rxq); 3317 error = vtnet_rxq_populate(rxq); 3318 VTNET_RXQ_UNLOCK(rxq); 3319 3320 if (error) { 3321 device_printf(dev, "cannot populate Rx queue %d\n", i); 3322 return (error); 3323 } 3324 } 3325 3326 return (0); 3327 } 3328 3329 static int 3330 vtnet_init_tx_queues(struct vtnet_softc *sc) 3331 { 3332 struct vtnet_txq *txq; 3333 int i; 3334 3335 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 3336 txq = &sc->vtnet_txqs[i]; 3337 txq->vtntx_watchdog = 0; 3338 txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq); 3339 #ifdef DEV_NETMAP 3340 netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0); 3341 #endif /* DEV_NETMAP */ 3342 } 3343 3344 return (0); 3345 } 3346 3347 static int 3348 vtnet_init_rxtx_queues(struct vtnet_softc *sc) 3349 { 3350 int error; 3351 3352 error = vtnet_init_rx_queues(sc); 3353 if (error) 3354 return (error); 3355 3356 error = vtnet_init_tx_queues(sc); 3357 if (error) 3358 return (error); 3359 3360 return (0); 3361 } 3362 3363 static void 3364 vtnet_set_active_vq_pairs(struct vtnet_softc *sc) 3365 { 3366 device_t dev; 3367 int npairs; 3368 3369 dev = sc->vtnet_dev; 3370 3371 if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) { 3372 sc->vtnet_act_vq_pairs = 1; 3373 return; 3374 } 3375 3376 npairs = sc->vtnet_req_vq_pairs; 3377 3378 if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { 3379 device_printf(dev, "cannot set active queue pairs to %d, " 3380 "falling back to 1 queue pair\n", npairs); 3381 npairs = 1; 3382 } 3383 3384 sc->vtnet_act_vq_pairs = npairs; 3385 } 3386 3387 static void 3388 vtnet_update_rx_offloads(struct vtnet_softc *sc) 3389 { 3390 if_t ifp; 3391 uint64_t features; 3392 int error; 3393 3394 ifp = sc->vtnet_ifp; 3395 features = sc->vtnet_features; 3396 3397 VTNET_CORE_LOCK_ASSERT(sc); 3398 3399 if (if_getcapabilities(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { 3400 if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) 3401 features |= VIRTIO_NET_F_GUEST_CSUM; 3402 else 3403 features &= ~VIRTIO_NET_F_GUEST_CSUM; 3404 } 3405 3406 if (if_getcapabilities(ifp) & IFCAP_LRO && !vtnet_software_lro(sc)) { 3407 if (if_getcapenable(ifp) & IFCAP_LRO) 3408 features |= VTNET_LRO_FEATURES; 3409 else 3410 features &= ~VTNET_LRO_FEATURES; 3411 } 3412 3413 error = vtnet_ctrl_guest_offloads(sc, 3414 features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | 3415 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN | 3416 VIRTIO_NET_F_GUEST_UFO)); 3417 if (error) { 3418 device_printf(sc->vtnet_dev, 3419 "%s: cannot update Rx features\n", __func__); 3420 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 3421 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 3422 vtnet_init_locked(sc, 0); 3423 } 3424 } else 3425 sc->vtnet_features = features; 3426 } 3427 3428 static int 3429 vtnet_reinit(struct vtnet_softc *sc) 3430 { 3431 if_t ifp; 3432 int error; 3433 3434 ifp = sc->vtnet_ifp; 3435 3436 bcopy(if_getlladdr(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); 3437 3438 error = vtnet_virtio_reinit(sc); 3439 if (error) 3440 return (error); 3441 3442 vtnet_set_macaddr(sc); 3443 vtnet_set_active_vq_pairs(sc); 3444 3445 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) 3446 vtnet_init_rx_filters(sc); 3447 3448 if_sethwassist(ifp, 0); 3449 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 3450 if_sethwassistbits(ifp, VTNET_CSUM_OFFLOAD, 0); 3451 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 3452 if_sethwassistbits(ifp, VTNET_CSUM_OFFLOAD_IPV6, 0); 3453 if (if_getcapenable(ifp) & IFCAP_TSO4) 3454 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 3455 if (if_getcapenable(ifp) & IFCAP_TSO6) 3456 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 3457 3458 error = vtnet_init_rxtx_queues(sc); 3459 if (error) 3460 return (error); 3461 3462 return (0); 3463 } 3464 3465 static void 3466 vtnet_init_locked(struct vtnet_softc *sc, int init_mode) 3467 { 3468 if_t ifp; 3469 3470 ifp = sc->vtnet_ifp; 3471 3472 VTNET_CORE_LOCK_ASSERT(sc); 3473 3474 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 3475 return; 3476 3477 vtnet_stop(sc); 3478 3479 #ifdef DEV_NETMAP 3480 /* Once stopped we can update the netmap flags, if necessary. */ 3481 switch (init_mode) { 3482 case VTNET_INIT_NETMAP_ENTER: 3483 nm_set_native_flags(NA(ifp)); 3484 break; 3485 case VTNET_INIT_NETMAP_EXIT: 3486 nm_clear_native_flags(NA(ifp)); 3487 break; 3488 } 3489 #endif /* DEV_NETMAP */ 3490 3491 if (vtnet_reinit(sc) != 0) { 3492 vtnet_stop(sc); 3493 return; 3494 } 3495 3496 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 3497 vtnet_update_link_status(sc); 3498 vtnet_enable_interrupts(sc); 3499 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 3500 3501 #ifdef DEV_NETMAP 3502 /* Re-enable txsync/rxsync. */ 3503 netmap_enable_all_rings(ifp); 3504 #endif /* DEV_NETMAP */ 3505 } 3506 3507 static void 3508 vtnet_init(void *xsc) 3509 { 3510 struct vtnet_softc *sc; 3511 3512 sc = xsc; 3513 3514 VTNET_CORE_LOCK(sc); 3515 vtnet_init_locked(sc, 0); 3516 VTNET_CORE_UNLOCK(sc); 3517 } 3518 3519 static void 3520 vtnet_free_ctrl_vq(struct vtnet_softc *sc) 3521 { 3522 3523 /* 3524 * The control virtqueue is only polled and therefore it should 3525 * already be empty. 3526 */ 3527 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), 3528 ("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq)); 3529 } 3530 3531 static void 3532 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, 3533 struct sglist *sg, int readable, int writable) 3534 { 3535 struct virtqueue *vq; 3536 3537 vq = sc->vtnet_ctrl_vq; 3538 3539 MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ); 3540 VTNET_CORE_LOCK_ASSERT(sc); 3541 3542 if (!virtqueue_empty(vq)) 3543 return; 3544 3545 /* 3546 * Poll for the response, but the command is likely completed before 3547 * returning from the notify. 3548 */ 3549 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0) { 3550 virtqueue_notify(vq); 3551 virtqueue_poll(vq, NULL); 3552 } 3553 } 3554 3555 static int 3556 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) 3557 { 3558 struct sglist_seg segs[3]; 3559 struct sglist sg; 3560 struct { 3561 struct virtio_net_ctrl_hdr hdr __aligned(2); 3562 uint8_t pad1; 3563 uint8_t addr[ETHER_ADDR_LEN] __aligned(8); 3564 uint8_t pad2; 3565 uint8_t ack; 3566 } s; 3567 int error; 3568 3569 error = 0; 3570 MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC); 3571 3572 s.hdr.class = VIRTIO_NET_CTRL_MAC; 3573 s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; 3574 bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN); 3575 s.ack = VIRTIO_NET_ERR; 3576 3577 sglist_init(&sg, nitems(segs), segs); 3578 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 3579 error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN); 3580 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 3581 MPASS(error == 0 && sg.sg_nseg == nitems(segs)); 3582 3583 if (error == 0) 3584 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 3585 3586 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 3587 } 3588 3589 static int 3590 vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads) 3591 { 3592 struct sglist_seg segs[3]; 3593 struct sglist sg; 3594 struct { 3595 struct virtio_net_ctrl_hdr hdr __aligned(2); 3596 uint8_t pad1; 3597 uint64_t offloads __aligned(8); 3598 uint8_t pad2; 3599 uint8_t ack; 3600 } s; 3601 int error; 3602 3603 error = 0; 3604 MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); 3605 3606 s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS; 3607 s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET; 3608 s.offloads = vtnet_gtoh64(sc, offloads); 3609 s.ack = VIRTIO_NET_ERR; 3610 3611 sglist_init(&sg, nitems(segs), segs); 3612 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 3613 error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t)); 3614 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 3615 MPASS(error == 0 && sg.sg_nseg == nitems(segs)); 3616 3617 if (error == 0) 3618 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 3619 3620 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 3621 } 3622 3623 static int 3624 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs) 3625 { 3626 struct sglist_seg segs[3]; 3627 struct sglist sg; 3628 struct { 3629 struct virtio_net_ctrl_hdr hdr __aligned(2); 3630 uint8_t pad1; 3631 struct virtio_net_ctrl_mq mq __aligned(2); 3632 uint8_t pad2; 3633 uint8_t ack; 3634 } s; 3635 int error; 3636 3637 error = 0; 3638 MPASS(sc->vtnet_flags & VTNET_FLAG_MQ); 3639 3640 s.hdr.class = VIRTIO_NET_CTRL_MQ; 3641 s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; 3642 s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs); 3643 s.ack = VIRTIO_NET_ERR; 3644 3645 sglist_init(&sg, nitems(segs), segs); 3646 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 3647 error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); 3648 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 3649 MPASS(error == 0 && sg.sg_nseg == nitems(segs)); 3650 3651 if (error == 0) 3652 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 3653 3654 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 3655 } 3656 3657 static int 3658 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, bool on) 3659 { 3660 struct sglist_seg segs[3]; 3661 struct sglist sg; 3662 struct { 3663 struct virtio_net_ctrl_hdr hdr __aligned(2); 3664 uint8_t pad1; 3665 uint8_t onoff; 3666 uint8_t pad2; 3667 uint8_t ack; 3668 } s; 3669 int error; 3670 3671 error = 0; 3672 MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX); 3673 3674 s.hdr.class = VIRTIO_NET_CTRL_RX; 3675 s.hdr.cmd = cmd; 3676 s.onoff = on; 3677 s.ack = VIRTIO_NET_ERR; 3678 3679 sglist_init(&sg, nitems(segs), segs); 3680 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 3681 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); 3682 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 3683 MPASS(error == 0 && sg.sg_nseg == nitems(segs)); 3684 3685 if (error == 0) 3686 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 3687 3688 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 3689 } 3690 3691 static int 3692 vtnet_set_promisc(struct vtnet_softc *sc, bool on) 3693 { 3694 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); 3695 } 3696 3697 static int 3698 vtnet_set_allmulti(struct vtnet_softc *sc, bool on) 3699 { 3700 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); 3701 } 3702 3703 static void 3704 vtnet_rx_filter(struct vtnet_softc *sc) 3705 { 3706 device_t dev; 3707 if_t ifp; 3708 3709 dev = sc->vtnet_dev; 3710 ifp = sc->vtnet_ifp; 3711 3712 VTNET_CORE_LOCK_ASSERT(sc); 3713 3714 if (vtnet_set_promisc(sc, if_getflags(ifp) & IFF_PROMISC) != 0) { 3715 device_printf(dev, "cannot %s promiscuous mode\n", 3716 if_getflags(ifp) & IFF_PROMISC ? "enable" : "disable"); 3717 } 3718 3719 if (vtnet_set_allmulti(sc, if_getflags(ifp) & IFF_ALLMULTI) != 0) { 3720 device_printf(dev, "cannot %s all-multicast mode\n", 3721 if_getflags(ifp) & IFF_ALLMULTI ? "enable" : "disable"); 3722 } 3723 } 3724 3725 static u_int 3726 vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt) 3727 { 3728 struct vtnet_softc *sc = arg; 3729 3730 if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0) 3731 return (0); 3732 3733 if (ucnt < VTNET_MAX_MAC_ENTRIES) 3734 bcopy(LLADDR(sdl), 3735 &sc->vtnet_mac_filter->vmf_unicast.macs[ucnt], 3736 ETHER_ADDR_LEN); 3737 3738 return (1); 3739 } 3740 3741 static u_int 3742 vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 3743 { 3744 struct vtnet_mac_filter *filter = arg; 3745 3746 if (mcnt < VTNET_MAX_MAC_ENTRIES) 3747 bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt], 3748 ETHER_ADDR_LEN); 3749 3750 return (1); 3751 } 3752 3753 static void 3754 vtnet_rx_filter_mac(struct vtnet_softc *sc) 3755 { 3756 struct virtio_net_ctrl_hdr hdr __aligned(2); 3757 struct vtnet_mac_filter *filter; 3758 struct sglist_seg segs[4]; 3759 struct sglist sg; 3760 if_t ifp; 3761 bool promisc, allmulti; 3762 u_int ucnt, mcnt; 3763 int error; 3764 uint8_t ack; 3765 3766 ifp = sc->vtnet_ifp; 3767 filter = sc->vtnet_mac_filter; 3768 error = 0; 3769 3770 MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX); 3771 VTNET_CORE_LOCK_ASSERT(sc); 3772 3773 /* Unicast MAC addresses: */ 3774 ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc); 3775 promisc = (ucnt > VTNET_MAX_MAC_ENTRIES); 3776 3777 if (promisc) { 3778 ucnt = 0; 3779 if_printf(ifp, "more than %d MAC addresses assigned, " 3780 "falling back to promiscuous mode\n", 3781 VTNET_MAX_MAC_ENTRIES); 3782 } 3783 3784 /* Multicast MAC addresses: */ 3785 mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter); 3786 allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES); 3787 3788 if (allmulti) { 3789 mcnt = 0; 3790 if_printf(ifp, "more than %d multicast MAC addresses " 3791 "assigned, falling back to all-multicast mode\n", 3792 VTNET_MAX_MAC_ENTRIES); 3793 } 3794 3795 if (promisc && allmulti) 3796 goto out; 3797 3798 filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt); 3799 filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt); 3800 3801 hdr.class = VIRTIO_NET_CTRL_MAC; 3802 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 3803 ack = VIRTIO_NET_ERR; 3804 3805 sglist_init(&sg, nitems(segs), segs); 3806 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 3807 error |= sglist_append(&sg, &filter->vmf_unicast, 3808 sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN); 3809 error |= sglist_append(&sg, &filter->vmf_multicast, 3810 sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN); 3811 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 3812 MPASS(error == 0 && sg.sg_nseg == nitems(segs)); 3813 3814 if (error == 0) 3815 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 3816 if (ack != VIRTIO_NET_OK) 3817 if_printf(ifp, "error setting host MAC filter table\n"); 3818 3819 out: 3820 if (promisc && vtnet_set_promisc(sc, true) != 0) 3821 if_printf(ifp, "cannot enable promiscuous mode\n"); 3822 if (allmulti && vtnet_set_allmulti(sc, true) != 0) 3823 if_printf(ifp, "cannot enable all-multicast mode\n"); 3824 } 3825 3826 static int 3827 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 3828 { 3829 struct sglist_seg segs[3]; 3830 struct sglist sg; 3831 struct { 3832 struct virtio_net_ctrl_hdr hdr __aligned(2); 3833 uint8_t pad1; 3834 uint16_t tag __aligned(2); 3835 uint8_t pad2; 3836 uint8_t ack; 3837 } s; 3838 int error; 3839 3840 error = 0; 3841 MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER); 3842 3843 s.hdr.class = VIRTIO_NET_CTRL_VLAN; 3844 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 3845 s.tag = vtnet_gtoh16(sc, tag); 3846 s.ack = VIRTIO_NET_ERR; 3847 3848 sglist_init(&sg, nitems(segs), segs); 3849 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 3850 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); 3851 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 3852 MPASS(error == 0 && sg.sg_nseg == nitems(segs)); 3853 3854 if (error == 0) 3855 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 3856 3857 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 3858 } 3859 3860 static void 3861 vtnet_rx_filter_vlan(struct vtnet_softc *sc) 3862 { 3863 int i, bit; 3864 uint32_t w; 3865 uint16_t tag; 3866 3867 MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER); 3868 VTNET_CORE_LOCK_ASSERT(sc); 3869 3870 /* Enable the filter for each configured VLAN. */ 3871 for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { 3872 w = sc->vtnet_vlan_filter[i]; 3873 3874 while ((bit = ffs(w) - 1) != -1) { 3875 w &= ~(1 << bit); 3876 tag = sizeof(w) * CHAR_BIT * i + bit; 3877 3878 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) { 3879 device_printf(sc->vtnet_dev, 3880 "cannot enable VLAN %d filter\n", tag); 3881 } 3882 } 3883 } 3884 } 3885 3886 static void 3887 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 3888 { 3889 if_t ifp; 3890 int idx, bit; 3891 3892 ifp = sc->vtnet_ifp; 3893 idx = (tag >> 5) & 0x7F; 3894 bit = tag & 0x1F; 3895 3896 if (tag == 0 || tag > 4095) 3897 return; 3898 3899 VTNET_CORE_LOCK(sc); 3900 3901 if (add) 3902 sc->vtnet_vlan_filter[idx] |= (1 << bit); 3903 else 3904 sc->vtnet_vlan_filter[idx] &= ~(1 << bit); 3905 3906 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER && 3907 if_getdrvflags(ifp) & IFF_DRV_RUNNING && 3908 vtnet_exec_vlan_filter(sc, add, tag) != 0) { 3909 device_printf(sc->vtnet_dev, 3910 "cannot %s VLAN %d %s the host filter table\n", 3911 add ? "add" : "remove", tag, add ? "to" : "from"); 3912 } 3913 3914 VTNET_CORE_UNLOCK(sc); 3915 } 3916 3917 static void 3918 vtnet_register_vlan(void *arg, if_t ifp, uint16_t tag) 3919 { 3920 3921 if (if_getsoftc(ifp) != arg) 3922 return; 3923 3924 vtnet_update_vlan_filter(arg, 1, tag); 3925 } 3926 3927 static void 3928 vtnet_unregister_vlan(void *arg, if_t ifp, uint16_t tag) 3929 { 3930 3931 if (if_getsoftc(ifp) != arg) 3932 return; 3933 3934 vtnet_update_vlan_filter(arg, 0, tag); 3935 } 3936 3937 static void 3938 vtnet_update_speed_duplex(struct vtnet_softc *sc) 3939 { 3940 if_t ifp; 3941 uint32_t speed; 3942 3943 ifp = sc->vtnet_ifp; 3944 3945 if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0) 3946 return; 3947 3948 /* BMV: Ignore duplex. */ 3949 speed = virtio_read_dev_config_4(sc->vtnet_dev, 3950 offsetof(struct virtio_net_config, speed)); 3951 if (speed != UINT32_MAX) 3952 if_setbaudrate(ifp, IF_Mbps(speed)); 3953 } 3954 3955 static int 3956 vtnet_is_link_up(struct vtnet_softc *sc) 3957 { 3958 uint16_t status; 3959 3960 if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0) 3961 return (1); 3962 3963 status = virtio_read_dev_config_2(sc->vtnet_dev, 3964 offsetof(struct virtio_net_config, status)); 3965 3966 return ((status & VIRTIO_NET_S_LINK_UP) != 0); 3967 } 3968 3969 static void 3970 vtnet_update_link_status(struct vtnet_softc *sc) 3971 { 3972 if_t ifp; 3973 int link; 3974 3975 ifp = sc->vtnet_ifp; 3976 VTNET_CORE_LOCK_ASSERT(sc); 3977 link = vtnet_is_link_up(sc); 3978 3979 /* Notify if the link status has changed. */ 3980 if (link != 0 && sc->vtnet_link_active == 0) { 3981 vtnet_update_speed_duplex(sc); 3982 sc->vtnet_link_active = 1; 3983 if_link_state_change(ifp, LINK_STATE_UP); 3984 } else if (link == 0 && sc->vtnet_link_active != 0) { 3985 sc->vtnet_link_active = 0; 3986 if_link_state_change(ifp, LINK_STATE_DOWN); 3987 } 3988 } 3989 3990 static int 3991 vtnet_ifmedia_upd(if_t ifp __unused) 3992 { 3993 return (EOPNOTSUPP); 3994 } 3995 3996 static void 3997 vtnet_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 3998 { 3999 struct vtnet_softc *sc; 4000 4001 sc = if_getsoftc(ifp); 4002 4003 ifmr->ifm_status = IFM_AVALID; 4004 ifmr->ifm_active = IFM_ETHER; 4005 4006 VTNET_CORE_LOCK(sc); 4007 if (vtnet_is_link_up(sc) != 0) { 4008 ifmr->ifm_status |= IFM_ACTIVE; 4009 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 4010 } else 4011 ifmr->ifm_active |= IFM_NONE; 4012 VTNET_CORE_UNLOCK(sc); 4013 } 4014 4015 static void 4016 vtnet_get_macaddr(struct vtnet_softc *sc) 4017 { 4018 4019 if (sc->vtnet_flags & VTNET_FLAG_MAC) { 4020 virtio_read_device_config_array(sc->vtnet_dev, 4021 offsetof(struct virtio_net_config, mac), 4022 &sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN); 4023 } else { 4024 /* Generate a random locally administered unicast address. */ 4025 sc->vtnet_hwaddr[0] = 0xB2; 4026 arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); 4027 } 4028 } 4029 4030 static void 4031 vtnet_set_macaddr(struct vtnet_softc *sc) 4032 { 4033 device_t dev; 4034 int error; 4035 4036 dev = sc->vtnet_dev; 4037 4038 if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { 4039 error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr); 4040 if (error) 4041 device_printf(dev, "unable to set MAC address\n"); 4042 return; 4043 } 4044 4045 /* MAC in config is read-only in modern VirtIO. */ 4046 if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) { 4047 for (int i = 0; i < ETHER_ADDR_LEN; i++) { 4048 virtio_write_dev_config_1(dev, 4049 offsetof(struct virtio_net_config, mac) + i, 4050 sc->vtnet_hwaddr[i]); 4051 } 4052 } 4053 } 4054 4055 static void 4056 vtnet_attached_set_macaddr(struct vtnet_softc *sc) 4057 { 4058 4059 /* Assign MAC address if it was generated. */ 4060 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) 4061 vtnet_set_macaddr(sc); 4062 } 4063 4064 static void 4065 vtnet_vlan_tag_remove(struct mbuf *m) 4066 { 4067 struct ether_vlan_header *evh; 4068 4069 evh = mtod(m, struct ether_vlan_header *); 4070 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 4071 m->m_flags |= M_VLANTAG; 4072 4073 /* Strip the 802.1Q header. */ 4074 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 4075 ETHER_HDR_LEN - ETHER_TYPE_LEN); 4076 m_adj(m, ETHER_VLAN_ENCAP_LEN); 4077 } 4078 4079 static void 4080 vtnet_set_rx_process_limit(struct vtnet_softc *sc) 4081 { 4082 int limit; 4083 4084 limit = vtnet_tunable_int(sc, "rx_process_limit", 4085 vtnet_rx_process_limit); 4086 if (limit < 0) 4087 limit = INT_MAX; 4088 sc->vtnet_rx_process_limit = limit; 4089 } 4090 4091 static void 4092 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, 4093 struct sysctl_oid_list *child, struct vtnet_rxq *rxq) 4094 { 4095 struct sysctl_oid *node; 4096 struct sysctl_oid_list *list; 4097 struct vtnet_rxq_stats *stats; 4098 char namebuf[16]; 4099 4100 snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id); 4101 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4102 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue"); 4103 list = SYSCTL_CHILDREN(node); 4104 4105 stats = &rxq->vtnrx_stats; 4106 4107 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", 4108 CTLFLAG_RD | CTLFLAG_STATS, 4109 &stats->vrxs_ipackets, "Receive packets"); 4110 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", 4111 CTLFLAG_RD | CTLFLAG_STATS, 4112 &stats->vrxs_ibytes, "Receive bytes"); 4113 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", 4114 CTLFLAG_RD | CTLFLAG_STATS, 4115 &stats->vrxs_iqdrops, "Receive drops"); 4116 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", 4117 CTLFLAG_RD | CTLFLAG_STATS, 4118 &stats->vrxs_ierrors, "Receive errors"); 4119 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", 4120 CTLFLAG_RD | CTLFLAG_STATS, 4121 &stats->vrxs_csum, "Receive checksum offloaded"); 4122 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", 4123 CTLFLAG_RD | CTLFLAG_STATS, 4124 &stats->vrxs_csum_failed, "Receive checksum offload failed"); 4125 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", 4126 CTLFLAG_RD | CTLFLAG_STATS, 4127 &stats->vrxs_host_lro, "Receive host segmentation offloaded"); 4128 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", 4129 CTLFLAG_RD | CTLFLAG_STATS, 4130 &stats->vrxs_rescheduled, 4131 "Receive interrupt handler rescheduled"); 4132 } 4133 4134 static void 4135 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx, 4136 struct sysctl_oid_list *child, struct vtnet_txq *txq) 4137 { 4138 struct sysctl_oid *node; 4139 struct sysctl_oid_list *list; 4140 struct vtnet_txq_stats *stats; 4141 char namebuf[16]; 4142 4143 snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id); 4144 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 4145 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue"); 4146 list = SYSCTL_CHILDREN(node); 4147 4148 stats = &txq->vtntx_stats; 4149 4150 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", 4151 CTLFLAG_RD | CTLFLAG_STATS, 4152 &stats->vtxs_opackets, "Transmit packets"); 4153 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", 4154 CTLFLAG_RD | CTLFLAG_STATS, 4155 &stats->vtxs_obytes, "Transmit bytes"); 4156 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", 4157 CTLFLAG_RD | CTLFLAG_STATS, 4158 &stats->vtxs_omcasts, "Transmit multicasts"); 4159 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", 4160 CTLFLAG_RD | CTLFLAG_STATS, 4161 &stats->vtxs_csum, "Transmit checksum offloaded"); 4162 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", 4163 CTLFLAG_RD | CTLFLAG_STATS, 4164 &stats->vtxs_tso, "Transmit TCP segmentation offloaded"); 4165 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", 4166 CTLFLAG_RD | CTLFLAG_STATS, 4167 &stats->vtxs_rescheduled, 4168 "Transmit interrupt handler rescheduled"); 4169 } 4170 4171 static void 4172 vtnet_setup_queue_sysctl(struct vtnet_softc *sc) 4173 { 4174 device_t dev; 4175 struct sysctl_ctx_list *ctx; 4176 struct sysctl_oid *tree; 4177 struct sysctl_oid_list *child; 4178 int i; 4179 4180 dev = sc->vtnet_dev; 4181 ctx = device_get_sysctl_ctx(dev); 4182 tree = device_get_sysctl_tree(dev); 4183 child = SYSCTL_CHILDREN(tree); 4184 4185 for (i = 0; i < sc->vtnet_req_vq_pairs; i++) { 4186 vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); 4187 vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); 4188 } 4189 } 4190 4191 static int 4192 vtnet_sysctl_rx_csum_failed(SYSCTL_HANDLER_ARGS) 4193 { 4194 struct vtnet_softc *sc = (struct vtnet_softc *)arg1; 4195 struct vtnet_statistics *stats = &sc->vtnet_stats; 4196 struct vtnet_rxq_stats *rxst; 4197 int i; 4198 4199 stats->rx_csum_failed = 0; 4200 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 4201 rxst = &sc->vtnet_rxqs[i].vtnrx_stats; 4202 stats->rx_csum_failed += rxst->vrxs_csum_failed; 4203 } 4204 return (sysctl_handle_64(oidp, NULL, stats->rx_csum_failed, req)); 4205 } 4206 4207 static int 4208 vtnet_sysctl_rx_csum_offloaded(SYSCTL_HANDLER_ARGS) 4209 { 4210 struct vtnet_softc *sc = (struct vtnet_softc *)arg1; 4211 struct vtnet_statistics *stats = &sc->vtnet_stats; 4212 struct vtnet_rxq_stats *rxst; 4213 int i; 4214 4215 stats->rx_csum_offloaded = 0; 4216 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 4217 rxst = &sc->vtnet_rxqs[i].vtnrx_stats; 4218 stats->rx_csum_offloaded += rxst->vrxs_csum; 4219 } 4220 return (sysctl_handle_64(oidp, NULL, stats->rx_csum_offloaded, req)); 4221 } 4222 4223 static int 4224 vtnet_sysctl_rx_task_rescheduled(SYSCTL_HANDLER_ARGS) 4225 { 4226 struct vtnet_softc *sc = (struct vtnet_softc *)arg1; 4227 struct vtnet_statistics *stats = &sc->vtnet_stats; 4228 struct vtnet_rxq_stats *rxst; 4229 int i; 4230 4231 stats->rx_task_rescheduled = 0; 4232 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 4233 rxst = &sc->vtnet_rxqs[i].vtnrx_stats; 4234 stats->rx_task_rescheduled += rxst->vrxs_rescheduled; 4235 } 4236 return (sysctl_handle_64(oidp, NULL, stats->rx_task_rescheduled, req)); 4237 } 4238 4239 static int 4240 vtnet_sysctl_tx_csum_offloaded(SYSCTL_HANDLER_ARGS) 4241 { 4242 struct vtnet_softc *sc = (struct vtnet_softc *)arg1; 4243 struct vtnet_statistics *stats = &sc->vtnet_stats; 4244 struct vtnet_txq_stats *txst; 4245 int i; 4246 4247 stats->tx_csum_offloaded = 0; 4248 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 4249 txst = &sc->vtnet_txqs[i].vtntx_stats; 4250 stats->tx_csum_offloaded += txst->vtxs_csum; 4251 } 4252 return (sysctl_handle_64(oidp, NULL, stats->tx_csum_offloaded, req)); 4253 } 4254 4255 static int 4256 vtnet_sysctl_tx_tso_offloaded(SYSCTL_HANDLER_ARGS) 4257 { 4258 struct vtnet_softc *sc = (struct vtnet_softc *)arg1; 4259 struct vtnet_statistics *stats = &sc->vtnet_stats; 4260 struct vtnet_txq_stats *txst; 4261 int i; 4262 4263 stats->tx_tso_offloaded = 0; 4264 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 4265 txst = &sc->vtnet_txqs[i].vtntx_stats; 4266 stats->tx_tso_offloaded += txst->vtxs_tso; 4267 } 4268 return (sysctl_handle_64(oidp, NULL, stats->tx_tso_offloaded, req)); 4269 } 4270 4271 static int 4272 vtnet_sysctl_tx_task_rescheduled(SYSCTL_HANDLER_ARGS) 4273 { 4274 struct vtnet_softc *sc = (struct vtnet_softc *)arg1; 4275 struct vtnet_statistics *stats = &sc->vtnet_stats; 4276 struct vtnet_txq_stats *txst; 4277 int i; 4278 4279 stats->tx_task_rescheduled = 0; 4280 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 4281 txst = &sc->vtnet_txqs[i].vtntx_stats; 4282 stats->tx_task_rescheduled += txst->vtxs_rescheduled; 4283 } 4284 return (sysctl_handle_64(oidp, NULL, stats->tx_task_rescheduled, req)); 4285 } 4286 4287 static void 4288 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx, 4289 struct sysctl_oid_list *child, struct vtnet_softc *sc) 4290 { 4291 struct vtnet_statistics *stats; 4292 struct vtnet_rxq_stats rxaccum; 4293 struct vtnet_txq_stats txaccum; 4294 4295 vtnet_accum_stats(sc, &rxaccum, &txaccum); 4296 4297 stats = &sc->vtnet_stats; 4298 stats->rx_csum_offloaded = rxaccum.vrxs_csum; 4299 stats->rx_csum_failed = rxaccum.vrxs_csum_failed; 4300 stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled; 4301 stats->tx_csum_offloaded = txaccum.vtxs_csum; 4302 stats->tx_tso_offloaded = txaccum.vtxs_tso; 4303 stats->tx_task_rescheduled = txaccum.vtxs_rescheduled; 4304 4305 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", 4306 CTLFLAG_RD | CTLFLAG_STATS, &stats->mbuf_alloc_failed, 4307 "Mbuf cluster allocation failures"); 4308 4309 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", 4310 CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_frame_too_large, 4311 "Received frame larger than the mbuf chain"); 4312 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", 4313 CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_enq_replacement_failed, 4314 "Enqueuing the replacement receive mbuf failed"); 4315 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", 4316 CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_mergeable_failed, 4317 "Mergeable buffers receive failures"); 4318 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", 4319 CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ethtype, 4320 "Received checksum offloaded buffer with unsupported " 4321 "Ethernet type"); 4322 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", 4323 CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ipproto, 4324 "Received checksum offloaded buffer with incorrect IP protocol"); 4325 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", 4326 CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_offset, 4327 "Received checksum offloaded buffer with incorrect offset"); 4328 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_inaccessible_ipproto", 4329 CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_inaccessible_ipproto, 4330 "Received checksum offloaded buffer with inaccessible IP protocol"); 4331 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_failed", 4332 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4333 sc, 0, vtnet_sysctl_rx_csum_failed, "QU", 4334 "Received buffer checksum offload failed"); 4335 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_offloaded", 4336 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4337 sc, 0, vtnet_sysctl_rx_csum_offloaded, "QU", 4338 "Received buffer checksum offload succeeded"); 4339 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_task_rescheduled", 4340 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4341 sc, 0, vtnet_sysctl_rx_task_rescheduled, "QU", 4342 "Times the receive interrupt task rescheduled itself"); 4343 4344 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype", 4345 CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_unknown_ethtype, 4346 "Aborted transmit of checksum offloaded buffer with unknown " 4347 "Ethernet type"); 4348 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch", 4349 CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_proto_mismatch, 4350 "Aborted transmit of checksum offloaded buffer because mismatched " 4351 "protocols"); 4352 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", 4353 CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_not_tcp, 4354 "Aborted transmit of TSO buffer with non TCP protocol"); 4355 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum", 4356 CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_without_csum, 4357 "Aborted transmit of TSO buffer without TCP checksum offload"); 4358 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", 4359 CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defragged, 4360 "Transmit mbufs defragged"); 4361 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", 4362 CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defrag_failed, 4363 "Aborted transmit of buffer because defrag failed"); 4364 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_csum_offloaded", 4365 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4366 sc, 0, vtnet_sysctl_tx_csum_offloaded, "QU", 4367 "Offloaded checksum of transmitted buffer"); 4368 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_tso_offloaded", 4369 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4370 sc, 0, vtnet_sysctl_tx_tso_offloaded, "QU", 4371 "Segmentation offload of transmitted buffer"); 4372 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_task_rescheduled", 4373 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4374 sc, 0, vtnet_sysctl_tx_task_rescheduled, "QU", 4375 "Times the transmit interrupt task rescheduled itself"); 4376 } 4377 4378 static void 4379 vtnet_setup_sysctl(struct vtnet_softc *sc) 4380 { 4381 device_t dev; 4382 struct sysctl_ctx_list *ctx; 4383 struct sysctl_oid *tree; 4384 struct sysctl_oid_list *child; 4385 4386 dev = sc->vtnet_dev; 4387 ctx = device_get_sysctl_ctx(dev); 4388 tree = device_get_sysctl_tree(dev); 4389 child = SYSCTL_CHILDREN(tree); 4390 4391 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", 4392 CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, 4393 "Number of maximum supported virtqueue pairs"); 4394 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs", 4395 CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0, 4396 "Number of requested virtqueue pairs"); 4397 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", 4398 CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, 4399 "Number of active virtqueue pairs"); 4400 4401 vtnet_setup_stat_sysctl(ctx, child, sc); 4402 } 4403 4404 static void 4405 vtnet_load_tunables(struct vtnet_softc *sc) 4406 { 4407 4408 sc->vtnet_lro_entry_count = vtnet_tunable_int(sc, 4409 "lro_entry_count", vtnet_lro_entry_count); 4410 if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES) 4411 sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES; 4412 4413 sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc, 4414 "lro_mbufq_depth", vtnet_lro_mbufq_depth); 4415 } 4416 4417 static int 4418 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) 4419 { 4420 4421 return (virtqueue_enable_intr(rxq->vtnrx_vq)); 4422 } 4423 4424 static void 4425 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq) 4426 { 4427 4428 virtqueue_disable_intr(rxq->vtnrx_vq); 4429 } 4430 4431 static int 4432 vtnet_txq_enable_intr(struct vtnet_txq *txq) 4433 { 4434 struct virtqueue *vq; 4435 4436 vq = txq->vtntx_vq; 4437 4438 if (vtnet_txq_below_threshold(txq) != 0) 4439 return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG)); 4440 4441 /* 4442 * The free count is above our threshold. Keep the Tx interrupt 4443 * disabled until the queue is fuller. 4444 */ 4445 return (0); 4446 } 4447 4448 static void 4449 vtnet_txq_disable_intr(struct vtnet_txq *txq) 4450 { 4451 4452 virtqueue_disable_intr(txq->vtntx_vq); 4453 } 4454 4455 static void 4456 vtnet_enable_rx_interrupts(struct vtnet_softc *sc) 4457 { 4458 struct vtnet_rxq *rxq; 4459 int i; 4460 4461 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 4462 rxq = &sc->vtnet_rxqs[i]; 4463 if (vtnet_rxq_enable_intr(rxq) != 0) 4464 taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); 4465 } 4466 } 4467 4468 static void 4469 vtnet_enable_tx_interrupts(struct vtnet_softc *sc) 4470 { 4471 int i; 4472 4473 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) 4474 vtnet_txq_enable_intr(&sc->vtnet_txqs[i]); 4475 } 4476 4477 static void 4478 vtnet_enable_interrupts(struct vtnet_softc *sc) 4479 { 4480 4481 vtnet_enable_rx_interrupts(sc); 4482 vtnet_enable_tx_interrupts(sc); 4483 } 4484 4485 static void 4486 vtnet_disable_rx_interrupts(struct vtnet_softc *sc) 4487 { 4488 int i; 4489 4490 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) 4491 vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); 4492 } 4493 4494 static void 4495 vtnet_disable_tx_interrupts(struct vtnet_softc *sc) 4496 { 4497 int i; 4498 4499 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) 4500 vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); 4501 } 4502 4503 static void 4504 vtnet_disable_interrupts(struct vtnet_softc *sc) 4505 { 4506 4507 vtnet_disable_rx_interrupts(sc); 4508 vtnet_disable_tx_interrupts(sc); 4509 } 4510 4511 static int 4512 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def) 4513 { 4514 char path[64]; 4515 4516 snprintf(path, sizeof(path), 4517 "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob); 4518 TUNABLE_INT_FETCH(path, &def); 4519 4520 return (def); 4521 } 4522 4523 #ifdef DEBUGNET 4524 static void 4525 vtnet_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) 4526 { 4527 struct vtnet_softc *sc; 4528 4529 sc = if_getsoftc(ifp); 4530 4531 VTNET_CORE_LOCK(sc); 4532 *nrxr = sc->vtnet_req_vq_pairs; 4533 *ncl = DEBUGNET_MAX_IN_FLIGHT; 4534 *clsize = sc->vtnet_rx_clustersz; 4535 VTNET_CORE_UNLOCK(sc); 4536 } 4537 4538 static void 4539 vtnet_debugnet_event(if_t ifp __unused, enum debugnet_ev event) 4540 { 4541 struct vtnet_softc *sc; 4542 static bool sw_lro_enabled = false; 4543 4544 /* 4545 * Disable software LRO, since it would require entering the network 4546 * epoch when calling vtnet_txq_eof() in vtnet_debugnet_poll(). 4547 */ 4548 sc = if_getsoftc(ifp); 4549 switch (event) { 4550 case DEBUGNET_START: 4551 sw_lro_enabled = (sc->vtnet_flags & VTNET_FLAG_SW_LRO) != 0; 4552 if (sw_lro_enabled) 4553 sc->vtnet_flags &= ~VTNET_FLAG_SW_LRO; 4554 break; 4555 case DEBUGNET_END: 4556 if (sw_lro_enabled) 4557 sc->vtnet_flags |= VTNET_FLAG_SW_LRO; 4558 break; 4559 } 4560 } 4561 4562 static int 4563 vtnet_debugnet_transmit(if_t ifp, struct mbuf *m) 4564 { 4565 struct vtnet_softc *sc; 4566 struct vtnet_txq *txq; 4567 int error; 4568 4569 sc = if_getsoftc(ifp); 4570 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 4571 IFF_DRV_RUNNING) 4572 return (EBUSY); 4573 4574 txq = &sc->vtnet_txqs[0]; 4575 error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE); 4576 if (error == 0) 4577 (void)vtnet_txq_notify(txq); 4578 return (error); 4579 } 4580 4581 static int 4582 vtnet_debugnet_poll(if_t ifp, int count) 4583 { 4584 struct vtnet_softc *sc; 4585 int i; 4586 4587 sc = if_getsoftc(ifp); 4588 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 4589 IFF_DRV_RUNNING) 4590 return (EBUSY); 4591 4592 (void)vtnet_txq_eof(&sc->vtnet_txqs[0]); 4593 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) 4594 (void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]); 4595 return (0); 4596 } 4597 #endif /* DEBUGNET */ 4598