1 /* 2 * Copyright (c) 2013 Chris Torek <torek @ torek net> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * This file and its contents are supplied under the terms of the 28 * Common Development and Distribution License ("CDDL"), version 1.0. 29 * You may only use this file in accordance with the terms of version 30 * 1.0 of the CDDL. 31 * 32 * A full copy of the text of the CDDL should have accompanied this 33 * source. A copy of the CDDL is also available via the Internet at 34 * http://www.illumos.org/license/CDDL. 35 * 36 * Copyright 2015 Pluribus Networks Inc. 37 * Copyright 2019 Joyent, Inc. 38 * Copyright 2022 Oxide Computer Company 39 */ 40 41 /* 42 * viona - VirtIO-Net, Accelerated 43 * 44 * The purpose of viona is to provide high performance virtio-net devices to 45 * bhyve guests. It does so by sitting directly atop MAC, skipping all of the 46 * DLS/DLD stack. 47 * 48 * -------------------- 49 * General Architecture 50 * -------------------- 51 * 52 * A single viona instance is comprised of a "link" handle and two "rings". 53 * After opening the viona device, it must be associated with a MAC network 54 * interface and a bhyve (vmm) instance to form its link resource. This is 55 * done with the VNA_IOC_CREATE ioctl, where the datalink ID and vmm fd are 56 * passed in to perform the initialization. With the MAC client opened, and a 57 * driver handle to the vmm instance established, the device is ready to be 58 * configured by the guest. 59 * 60 * The userspace portion of bhyve, which interfaces with the PCI device 61 * emulation framework, is meant to stay out of the datapath if at all 62 * possible. Configuration changes made via PCI are mapped to actions which 63 * will steer the operation of the in-kernel logic. 64 * 65 * 66 * ----------- 67 * Ring Basics 68 * ----------- 69 * 70 * Each viona link has two viona_vring_t entities, RX and TX, for handling data 71 * transfers to and from the guest. They represent an interface to the 72 * standard virtio ring structures. When intiailized and active, each ring is 73 * backed by a kernel worker thread (parented to the bhyve process for the 74 * instance) which handles ring events. The RX worker has the simple task of 75 * watching for ring shutdown conditions. The TX worker does that in addition 76 * to processing all requests to transmit data. Data destined for the guest is 77 * delivered directly by MAC to viona_rx() when the ring is active. 78 * 79 * 80 * ----------- 81 * Ring States 82 * ----------- 83 * 84 * The viona_vring_t instances follow a simple path through the possible state 85 * values represented in virtio_vring_t`vr_state: 86 * 87 * +<--------------------------------------------+ 88 * | | 89 * V ^ 90 * +-----------+ This is the initial state when a link is created or 91 * | VRS_RESET | when the ring has been explicitly reset. 92 * +-----------+ 93 * | ^ 94 * |---* ioctl(VNA_IOC_RING_INIT) issued | 95 * | | 96 * | ^ 97 * V 98 * +-----------+ The ring parameters (size, guest physical addresses) 99 * | VRS_SETUP | have been set and start-up of the ring worker thread 100 * +-----------+ has begun. 101 * | ^ 102 * | | 103 * |---* ring worker thread begins execution | 104 * | | 105 * +-------------------------------------------->+ 106 * | | ^ 107 * | | 108 * | * If ring shutdown is requested (by ioctl or impending 109 * | bhyve process death) while the worker thread is 110 * | starting, the worker will transition the ring to 111 * | VRS_RESET and exit. 112 * | ^ 113 * | | 114 * |<-------------------------------------------<+ 115 * | | | 116 * | | ^ 117 * | * If ring is requested to pause (but not stop)from the 118 * | VRS_RUN state, it will return to the VRS_INIT state. 119 * | 120 * | ^ 121 * | | 122 * | ^ 123 * V 124 * +-----------+ The worker thread associated with the ring has started 125 * | VRS_INIT | executing. It has allocated any extra resources needed 126 * +-----------+ for the ring to operate. 127 * | ^ 128 * | | 129 * +-------------------------------------------->+ 130 * | | ^ 131 * | | 132 * | * If ring shutdown is requested while the worker is 133 * | waiting in VRS_INIT, it will free any extra resources 134 * | and transition to VRS_RESET. 135 * | ^ 136 * | | 137 * |--* ioctl(VNA_IOC_RING_KICK) issued | 138 * | ^ 139 * V 140 * +-----------+ The worker thread associated with the ring is executing 141 * | VRS_RUN | workload specific to that ring. 142 * +-----------+ 143 * | ^ 144 * |---* ioctl(VNA_IOC_RING_RESET) issued | 145 * | (or bhyve process begins exit) ^ 146 * | 147 * +-----------+ The worker thread associated with the ring is in the 148 * | VRS_STOP | process of exiting. All outstanding TX and RX 149 * +-----------+ requests are allowed to complete, but new requests 150 * | must be ignored. 151 * | ^ 152 * | | 153 * +-------------------------------------------->+ 154 * 155 * 156 * While the worker thread is not running, changes to vr_state are only made by 157 * viona_ioc_ring_init() under vr_lock. There, it initializes the ring, starts 158 * the worker, and sets the ring state to VRS_SETUP. Once the worker thread 159 * has been started, only it may perform ring state transitions (still under 160 * the protection of vr_lock), when requested by outside consumers via 161 * vr_state_flags or when the containing bhyve process initiates an exit. 162 * 163 * 164 * ---------------------------- 165 * Transmission mblk_t Handling 166 * ---------------------------- 167 * 168 * For incoming frames destined for a bhyve guest, the data must first land in 169 * a host OS buffer from the physical NIC before it is copied into the awaiting 170 * guest buffer(s). Outbound frames transmitted by the guest are not bound by 171 * this limitation and can avoid extra copying before the buffers are accessed 172 * directly by the NIC. When a guest designates buffers to be transmitted, 173 * viona translates the guest-physical addresses contained in the ring 174 * descriptors to host-virtual addresses via viona_hold_page(). That pointer is 175 * wrapped in an mblk_t using a preallocated viona_desb_t for the desballoc(). 176 * Doing so increments vr_xfer_outstanding, preventing the ring from being 177 * reset (allowing the link to drop its vmm handle to the guest) until all 178 * transmit mblks referencing guest memory have been processed. Allocation of 179 * the viona_desb_t entries is done during the VRS_INIT stage of the ring 180 * worker thread. The ring size informs that allocation as the number of 181 * concurrent transmissions is limited by the number of descriptors in the 182 * ring. This minimizes allocation in the transmit hot-path by acquiring those 183 * fixed-size resources during initialization. 184 * 185 * This optimization depends on the underlying NIC driver freeing the mblks in 186 * a timely manner after they have been transmitted by the hardware. Some 187 * drivers have been found to flush TX descriptors only when new transmissions 188 * are initiated. This means that there is no upper bound to the time needed 189 * for an mblk to be flushed and can stall bhyve guests from shutting down 190 * since their memory must be free of viona TX references prior to clean-up. 191 * 192 * This expectation of deterministic mblk_t processing is likely the reason 193 * behind the notable exception to the zero-copy TX path: systems with 'bnxe' 194 * loaded will copy transmit data into fresh buffers rather than passing up 195 * zero-copy mblks. It is a hold-over from the original viona sources provided 196 * by Pluribus and its continued necessity has not been confirmed. 197 * 198 * 199 * ---------------------------- 200 * Ring Notification Fast-paths 201 * ---------------------------- 202 * 203 * Device operation for viona requires that notifications flow to and from the 204 * guest to indicate certain ring conditions. In order to minimize latency and 205 * processing overhead, the notification procedures are kept in-kernel whenever 206 * possible. 207 * 208 * Guest-to-host notifications, when new available descriptors have been placed 209 * in the ring, are posted via the 'queue notify' address in the virtio BAR. 210 * The vmm_drv_ioport_hook() interface was added to bhyve which allows viona to 211 * install a callback hook on an ioport address. Guest exits for accesses to 212 * viona-hooked ioport addresses will result in direct calls to notify the 213 * appropriate ring worker without a trip to userland. 214 * 215 * Host-to-guest notifications in the form of interrupts enjoy similar 216 * acceleration. Each viona ring can be configured to send MSI notifications 217 * to the guest as virtio conditions dictate. This in-kernel interrupt 218 * configuration is kept synchronized through viona ioctls which are utilized 219 * during writes to the associated PCI config registers or MSI-X BAR. 220 * 221 * Guests which do not utilize MSI-X will result in viona falling back to the 222 * slow path for interrupts. It will poll(2) the viona handle, receiving 223 * notification when ring events necessitate the assertion of an interrupt. 224 * 225 * 226 * --------------- 227 * Nethook Support 228 * --------------- 229 * 230 * Viona provides four nethook events that consumers (e.g. ipf) can hook into 231 * to intercept packets as they go up or down the stack. Unfortunately, 232 * the nethook framework does not understand raw packets, so we can only 233 * generate events (in, out) for IPv4 and IPv6 packets. At driver attach, 234 * we register callbacks with the neti (netinfo) module that will be invoked 235 * for each netstack already present, as well as for any additional netstack 236 * instances created as the system operates. These callbacks will 237 * register/unregister the hooks with the nethook framework for each 238 * netstack instance. This registration occurs prior to creating any 239 * viona instances for a given netstack, and the unregistration for a netstack 240 * instance occurs after all viona instances of the netstack instance have 241 * been deleted. 242 */ 243 244 #include <sys/conf.h> 245 #include <sys/file.h> 246 #include <sys/stat.h> 247 248 #include <sys/dlpi.h> 249 250 #include "viona_impl.h" 251 252 253 #define VIONA_NAME "Virtio Network Accelerator" 254 #define VIONA_CTL_MINOR 0 255 #define VIONA_CLI_NAME "viona" /* MAC client name */ 256 257 258 /* 259 * Host capabilities. 260 */ 261 #define VIONA_S_HOSTCAPS ( \ 262 VIRTIO_NET_F_GUEST_CSUM | \ 263 VIRTIO_NET_F_MAC | \ 264 VIRTIO_NET_F_GUEST_TSO4 | \ 265 VIRTIO_NET_F_MRG_RXBUF | \ 266 VIRTIO_NET_F_STATUS | \ 267 VIRTIO_F_RING_NOTIFY_ON_EMPTY | \ 268 VIRTIO_F_RING_INDIRECT_DESC) 269 270 /* MAC_CAPAB_HCKSUM specifics of interest */ 271 #define VIONA_CAP_HCKSUM_INTEREST \ 272 (HCKSUM_INET_PARTIAL | \ 273 HCKSUM_INET_FULL_V4 | \ 274 HCKSUM_INET_FULL_V6) 275 276 static void *viona_state; 277 static dev_info_t *viona_dip; 278 static id_space_t *viona_minors; 279 280 281 static int viona_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 282 void **result); 283 static int viona_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 284 static int viona_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 285 static int viona_open(dev_t *devp, int flag, int otype, cred_t *credp); 286 static int viona_close(dev_t dev, int flag, int otype, cred_t *credp); 287 static int viona_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 288 cred_t *credp, int *rval); 289 static int viona_chpoll(dev_t dev, short events, int anyyet, short *reventsp, 290 struct pollhead **phpp); 291 292 static int viona_ioc_create(viona_soft_state_t *, void *, int, cred_t *); 293 static int viona_ioc_delete(viona_soft_state_t *, boolean_t); 294 295 static int viona_ioc_set_notify_ioport(viona_link_t *, uint16_t); 296 static int viona_ioc_ring_init(viona_link_t *, void *, int); 297 static int viona_ioc_ring_set_state(viona_link_t *, void *, int); 298 static int viona_ioc_ring_get_state(viona_link_t *, void *, int); 299 static int viona_ioc_ring_reset(viona_link_t *, uint_t); 300 static int viona_ioc_ring_kick(viona_link_t *, uint_t); 301 static int viona_ioc_ring_pause(viona_link_t *, uint_t); 302 static int viona_ioc_ring_set_msi(viona_link_t *, void *, int); 303 static int viona_ioc_ring_intr_clear(viona_link_t *, uint_t); 304 static int viona_ioc_intr_poll(viona_link_t *, void *, int, int *); 305 306 static struct cb_ops viona_cb_ops = { 307 viona_open, 308 viona_close, 309 nodev, 310 nodev, 311 nodev, 312 nodev, 313 nodev, 314 viona_ioctl, 315 nodev, 316 nodev, 317 nodev, 318 viona_chpoll, 319 ddi_prop_op, 320 0, 321 D_MP | D_NEW | D_HOTPLUG, 322 CB_REV, 323 nodev, 324 nodev 325 }; 326 327 static struct dev_ops viona_ops = { 328 DEVO_REV, 329 0, 330 viona_info, 331 nulldev, 332 nulldev, 333 viona_attach, 334 viona_detach, 335 nodev, 336 &viona_cb_ops, 337 NULL, 338 ddi_power, 339 ddi_quiesce_not_needed 340 }; 341 342 static struct modldrv modldrv = { 343 &mod_driverops, 344 VIONA_NAME, 345 &viona_ops, 346 }; 347 348 static struct modlinkage modlinkage = { 349 MODREV_1, &modldrv, NULL 350 }; 351 352 int 353 _init(void) 354 { 355 int ret; 356 357 ret = ddi_soft_state_init(&viona_state, sizeof (viona_soft_state_t), 0); 358 if (ret != 0) { 359 return (ret); 360 } 361 362 viona_minors = id_space_create("viona_minors", 363 VIONA_CTL_MINOR + 1, UINT16_MAX); 364 viona_rx_init(); 365 mutex_init(&viona_force_copy_lock, NULL, MUTEX_DRIVER, NULL); 366 367 ret = mod_install(&modlinkage); 368 if (ret != 0) { 369 ddi_soft_state_fini(&viona_state); 370 id_space_destroy(viona_minors); 371 viona_rx_fini(); 372 mutex_destroy(&viona_force_copy_lock); 373 } 374 375 return (ret); 376 } 377 378 int 379 _fini(void) 380 { 381 int ret; 382 383 ret = mod_remove(&modlinkage); 384 if (ret != 0) { 385 return (ret); 386 } 387 388 ddi_soft_state_fini(&viona_state); 389 id_space_destroy(viona_minors); 390 viona_rx_fini(); 391 mutex_destroy(&viona_force_copy_lock); 392 393 return (ret); 394 } 395 396 int 397 _info(struct modinfo *modinfop) 398 { 399 return (mod_info(&modlinkage, modinfop)); 400 } 401 402 /* ARGSUSED */ 403 static int 404 viona_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 405 { 406 int error; 407 408 switch (cmd) { 409 case DDI_INFO_DEVT2DEVINFO: 410 *result = (void *)viona_dip; 411 error = DDI_SUCCESS; 412 break; 413 case DDI_INFO_DEVT2INSTANCE: 414 *result = (void *)0; 415 error = DDI_SUCCESS; 416 break; 417 default: 418 error = DDI_FAILURE; 419 break; 420 } 421 return (error); 422 } 423 424 static int 425 viona_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 426 { 427 if (cmd != DDI_ATTACH) { 428 return (DDI_FAILURE); 429 } 430 431 if (ddi_create_minor_node(dip, "viona", S_IFCHR, VIONA_CTL_MINOR, 432 DDI_PSEUDO, 0) != DDI_SUCCESS) { 433 return (DDI_FAILURE); 434 } 435 436 viona_neti_attach(); 437 438 viona_dip = dip; 439 ddi_report_dev(viona_dip); 440 441 return (DDI_SUCCESS); 442 } 443 444 static int 445 viona_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 446 { 447 dev_info_t *old_dip = viona_dip; 448 449 if (cmd != DDI_DETACH) { 450 return (DDI_FAILURE); 451 } 452 453 VERIFY(old_dip != NULL); 454 455 viona_neti_detach(); 456 viona_dip = NULL; 457 ddi_remove_minor_node(old_dip, NULL); 458 459 return (DDI_SUCCESS); 460 } 461 462 static int 463 viona_open(dev_t *devp, int flag, int otype, cred_t *credp) 464 { 465 int minor; 466 viona_soft_state_t *ss; 467 468 if (otype != OTYP_CHR) { 469 return (EINVAL); 470 } 471 #if 0 472 /* 473 * XXX-mg: drv_priv() is wrong, but I'm not sure what is right. 474 * Should the check be at open() or ioctl()? 475 */ 476 if (drv_priv(credp) != 0) { 477 return (EPERM); 478 } 479 #endif 480 if (getminor(*devp) != VIONA_CTL_MINOR) { 481 return (ENXIO); 482 } 483 484 minor = id_alloc_nosleep(viona_minors); 485 if (minor == -1) { 486 /* All minors are busy */ 487 return (EBUSY); 488 } 489 if (ddi_soft_state_zalloc(viona_state, minor) != DDI_SUCCESS) { 490 id_free(viona_minors, minor); 491 return (ENOMEM); 492 } 493 494 ss = ddi_get_soft_state(viona_state, minor); 495 mutex_init(&ss->ss_lock, NULL, MUTEX_DEFAULT, NULL); 496 *devp = makedevice(getmajor(*devp), minor); 497 498 return (0); 499 } 500 501 static int 502 viona_close(dev_t dev, int flag, int otype, cred_t *credp) 503 { 504 int minor; 505 viona_soft_state_t *ss; 506 507 if (otype != OTYP_CHR) { 508 return (EINVAL); 509 } 510 511 minor = getminor(dev); 512 513 ss = ddi_get_soft_state(viona_state, minor); 514 if (ss == NULL) { 515 return (ENXIO); 516 } 517 518 VERIFY0(viona_ioc_delete(ss, B_TRUE)); 519 VERIFY(!list_link_active(&ss->ss_node)); 520 ddi_soft_state_free(viona_state, minor); 521 id_free(viona_minors, minor); 522 523 return (0); 524 } 525 526 static int 527 viona_ioctl(dev_t dev, int cmd, intptr_t data, int md, cred_t *cr, int *rv) 528 { 529 viona_soft_state_t *ss; 530 void *dptr = (void *)data; 531 int err = 0, val; 532 viona_link_t *link; 533 534 ss = ddi_get_soft_state(viona_state, getminor(dev)); 535 if (ss == NULL) { 536 return (ENXIO); 537 } 538 539 switch (cmd) { 540 case VNA_IOC_CREATE: 541 return (viona_ioc_create(ss, dptr, md, cr)); 542 case VNA_IOC_DELETE: 543 return (viona_ioc_delete(ss, B_FALSE)); 544 case VNA_IOC_VERSION: 545 *rv = VIONA_CURRENT_INTERFACE_VERSION; 546 return (0); 547 default: 548 break; 549 } 550 551 mutex_enter(&ss->ss_lock); 552 if ((link = ss->ss_link) == NULL || link->l_destroyed || 553 vmm_drv_release_reqd(link->l_vm_hold)) { 554 mutex_exit(&ss->ss_lock); 555 return (ENXIO); 556 } 557 558 switch (cmd) { 559 case VNA_IOC_GET_FEATURES: 560 val = VIONA_S_HOSTCAPS | link->l_features_hw; 561 if (ddi_copyout(&val, dptr, sizeof (val), md) != 0) { 562 err = EFAULT; 563 } 564 break; 565 case VNA_IOC_SET_FEATURES: 566 if (ddi_copyin(dptr, &val, sizeof (val), md) != 0) { 567 err = EFAULT; 568 break; 569 } 570 val &= (VIONA_S_HOSTCAPS | link->l_features_hw); 571 572 if ((val & VIRTIO_NET_F_CSUM) == 0) 573 val &= ~VIRTIO_NET_F_HOST_TSO4; 574 575 if ((val & VIRTIO_NET_F_GUEST_CSUM) == 0) 576 val &= ~VIRTIO_NET_F_GUEST_TSO4; 577 578 link->l_features = val; 579 break; 580 case VNA_IOC_RING_INIT: 581 err = viona_ioc_ring_init(link, dptr, md); 582 break; 583 case VNA_IOC_RING_RESET: 584 err = viona_ioc_ring_reset(link, (uint_t)data); 585 break; 586 case VNA_IOC_RING_KICK: 587 err = viona_ioc_ring_kick(link, (uint_t)data); 588 break; 589 case VNA_IOC_RING_SET_MSI: 590 err = viona_ioc_ring_set_msi(link, dptr, md); 591 break; 592 case VNA_IOC_RING_INTR_CLR: 593 err = viona_ioc_ring_intr_clear(link, (uint_t)data); 594 break; 595 case VNA_IOC_RING_SET_STATE: 596 err = viona_ioc_ring_set_state(link, dptr, md); 597 break; 598 case VNA_IOC_RING_GET_STATE: 599 err = viona_ioc_ring_get_state(link, dptr, md); 600 break; 601 case VNA_IOC_RING_PAUSE: 602 err = viona_ioc_ring_pause(link, (uint_t)data); 603 break; 604 605 case VNA_IOC_INTR_POLL: 606 err = viona_ioc_intr_poll(link, dptr, md, rv); 607 break; 608 case VNA_IOC_SET_NOTIFY_IOP: 609 if (data < 0 || data > UINT16_MAX) { 610 err = EINVAL; 611 break; 612 } 613 err = viona_ioc_set_notify_ioport(link, (uint16_t)data); 614 break; 615 default: 616 err = ENOTTY; 617 break; 618 } 619 620 mutex_exit(&ss->ss_lock); 621 return (err); 622 } 623 624 static int 625 viona_chpoll(dev_t dev, short events, int anyyet, short *reventsp, 626 struct pollhead **phpp) 627 { 628 viona_soft_state_t *ss; 629 viona_link_t *link; 630 631 ss = ddi_get_soft_state(viona_state, getminor(dev)); 632 if (ss == NULL) { 633 return (ENXIO); 634 } 635 636 mutex_enter(&ss->ss_lock); 637 if ((link = ss->ss_link) == NULL || link->l_destroyed) { 638 mutex_exit(&ss->ss_lock); 639 return (ENXIO); 640 } 641 642 *reventsp = 0; 643 if ((events & POLLRDBAND) != 0) { 644 for (uint_t i = 0; i < VIONA_VQ_MAX; i++) { 645 if (link->l_vrings[i].vr_intr_enabled != 0) { 646 *reventsp |= POLLRDBAND; 647 break; 648 } 649 } 650 } 651 if ((*reventsp == 0 && !anyyet) || (events & POLLET)) { 652 *phpp = &link->l_pollhead; 653 } 654 mutex_exit(&ss->ss_lock); 655 656 return (0); 657 } 658 659 static void 660 viona_get_mac_capab(viona_link_t *link) 661 { 662 mac_handle_t mh = link->l_mh; 663 uint32_t cap = 0; 664 mac_capab_lso_t lso_cap; 665 666 link->l_features_hw = 0; 667 if (mac_capab_get(mh, MAC_CAPAB_HCKSUM, &cap)) { 668 /* 669 * Only report HW checksum ability if the underlying MAC 670 * resource is capable of populating the L4 header. 671 */ 672 if ((cap & VIONA_CAP_HCKSUM_INTEREST) != 0) { 673 link->l_features_hw |= VIRTIO_NET_F_CSUM; 674 } 675 link->l_cap_csum = cap; 676 } 677 678 if ((link->l_features_hw & VIRTIO_NET_F_CSUM) && 679 mac_capab_get(mh, MAC_CAPAB_LSO, &lso_cap)) { 680 /* 681 * Virtio doesn't allow for negotiating a maximum LSO 682 * packet size. We have to assume that the guest may 683 * send a maximum length IP packet. Make sure the 684 * underlying MAC can handle an LSO of this size. 685 */ 686 if ((lso_cap.lso_flags & LSO_TX_BASIC_TCP_IPV4) && 687 lso_cap.lso_basic_tcp_ipv4.lso_max >= IP_MAXPACKET) 688 link->l_features_hw |= VIRTIO_NET_F_HOST_TSO4; 689 } 690 } 691 692 static int 693 viona_ioc_create(viona_soft_state_t *ss, void *dptr, int md, cred_t *cr) 694 { 695 vioc_create_t kvc; 696 viona_link_t *link = NULL; 697 char cli_name[MAXNAMELEN]; 698 int err = 0; 699 file_t *fp; 700 vmm_hold_t *hold = NULL; 701 viona_neti_t *nip = NULL; 702 zoneid_t zid; 703 704 ASSERT(MUTEX_NOT_HELD(&ss->ss_lock)); 705 706 if (ddi_copyin(dptr, &kvc, sizeof (kvc), md) != 0) { 707 return (EFAULT); 708 } 709 710 zid = crgetzoneid(cr); 711 nip = viona_neti_lookup_by_zid(zid); 712 if (nip == NULL) { 713 return (EIO); 714 } 715 716 if (!nip->vni_nethook.vnh_hooked) { 717 viona_neti_rele(nip); 718 return (EIO); 719 } 720 721 mutex_enter(&ss->ss_lock); 722 if (ss->ss_link != NULL) { 723 mutex_exit(&ss->ss_lock); 724 viona_neti_rele(nip); 725 return (EEXIST); 726 } 727 728 if ((fp = getf(kvc.c_vmfd)) == NULL) { 729 err = EBADF; 730 goto bail; 731 } 732 err = vmm_drv_hold(fp, cr, &hold); 733 releasef(kvc.c_vmfd); 734 if (err != 0) { 735 goto bail; 736 } 737 738 link = kmem_zalloc(sizeof (viona_link_t), KM_SLEEP); 739 link->l_linkid = kvc.c_linkid; 740 link->l_vm_hold = hold; 741 742 err = mac_open_by_linkid(link->l_linkid, &link->l_mh); 743 if (err != 0) { 744 goto bail; 745 } 746 747 viona_get_mac_capab(link); 748 749 (void) snprintf(cli_name, sizeof (cli_name), "%s-%d", VIONA_CLI_NAME, 750 link->l_linkid); 751 err = mac_client_open(link->l_mh, &link->l_mch, cli_name, 0); 752 if (err != 0) { 753 goto bail; 754 } 755 756 viona_ring_alloc(link, &link->l_vrings[VIONA_VQ_RX]); 757 viona_ring_alloc(link, &link->l_vrings[VIONA_VQ_TX]); 758 759 if ((err = viona_rx_set(link)) != 0) { 760 viona_ring_free(&link->l_vrings[VIONA_VQ_RX]); 761 viona_ring_free(&link->l_vrings[VIONA_VQ_TX]); 762 goto bail; 763 } 764 765 link->l_neti = nip; 766 ss->ss_link = link; 767 mutex_exit(&ss->ss_lock); 768 769 mutex_enter(&nip->vni_lock); 770 list_insert_tail(&nip->vni_dev_list, ss); 771 mutex_exit(&nip->vni_lock); 772 773 return (0); 774 775 bail: 776 if (link != NULL) { 777 if (link->l_mch != NULL) { 778 mac_client_close(link->l_mch, 0); 779 } 780 if (link->l_mh != NULL) { 781 mac_close(link->l_mh); 782 } 783 kmem_free(link, sizeof (viona_link_t)); 784 } 785 if (hold != NULL) { 786 vmm_drv_rele(hold); 787 } 788 viona_neti_rele(nip); 789 790 mutex_exit(&ss->ss_lock); 791 return (err); 792 } 793 794 static int 795 viona_ioc_delete(viona_soft_state_t *ss, boolean_t on_close) 796 { 797 viona_link_t *link; 798 viona_neti_t *nip = NULL; 799 800 mutex_enter(&ss->ss_lock); 801 if ((link = ss->ss_link) == NULL) { 802 /* Link destruction already complete */ 803 mutex_exit(&ss->ss_lock); 804 return (0); 805 } 806 807 if (link->l_destroyed) { 808 /* 809 * Link destruction has been started by another thread, but has 810 * not completed. This condition should be impossible to 811 * encounter when performing the on-close destroy of the link, 812 * since racing ioctl accessors must necessarily be absent. 813 */ 814 VERIFY(!on_close); 815 mutex_exit(&ss->ss_lock); 816 return (EAGAIN); 817 } 818 /* 819 * The link deletion cannot fail after this point, continuing until its 820 * successful completion is reached. 821 */ 822 link->l_destroyed = B_TRUE; 823 824 /* 825 * Tear down the IO port hook so it cannot be used to kick any of the 826 * rings which are about to be reset and stopped. 827 */ 828 VERIFY0(viona_ioc_set_notify_ioport(link, 0)); 829 mutex_exit(&ss->ss_lock); 830 831 /* 832 * Return the rings to their reset state, ignoring any possible 833 * interruptions from signals. 834 */ 835 VERIFY0(viona_ring_reset(&link->l_vrings[VIONA_VQ_RX], B_FALSE)); 836 VERIFY0(viona_ring_reset(&link->l_vrings[VIONA_VQ_TX], B_FALSE)); 837 838 mutex_enter(&ss->ss_lock); 839 if (link->l_mch != NULL) { 840 /* Unhook the receive callbacks and close out the client */ 841 viona_rx_clear(link); 842 mac_client_close(link->l_mch, 0); 843 } 844 if (link->l_mh != NULL) { 845 mac_close(link->l_mh); 846 } 847 if (link->l_vm_hold != NULL) { 848 vmm_drv_rele(link->l_vm_hold); 849 link->l_vm_hold = NULL; 850 } 851 852 nip = link->l_neti; 853 link->l_neti = NULL; 854 855 viona_ring_free(&link->l_vrings[VIONA_VQ_RX]); 856 viona_ring_free(&link->l_vrings[VIONA_VQ_TX]); 857 pollhead_clean(&link->l_pollhead); 858 ss->ss_link = NULL; 859 mutex_exit(&ss->ss_lock); 860 861 mutex_enter(&nip->vni_lock); 862 list_remove(&nip->vni_dev_list, ss); 863 mutex_exit(&nip->vni_lock); 864 865 viona_neti_rele(nip); 866 867 kmem_free(link, sizeof (viona_link_t)); 868 return (0); 869 } 870 871 static int 872 viona_ioc_ring_init(viona_link_t *link, void *udata, int md) 873 { 874 vioc_ring_init_t kri; 875 int err; 876 877 if (ddi_copyin(udata, &kri, sizeof (kri), md) != 0) { 878 return (EFAULT); 879 } 880 const struct viona_ring_params params = { 881 .vrp_pa = kri.ri_qaddr, 882 .vrp_size = kri.ri_qsize, 883 .vrp_avail_idx = 0, 884 .vrp_used_idx = 0, 885 }; 886 887 err = viona_ring_init(link, kri.ri_index, ¶ms); 888 889 return (err); 890 } 891 892 static int 893 viona_ioc_ring_set_state(viona_link_t *link, void *udata, int md) 894 { 895 vioc_ring_state_t krs; 896 int err; 897 898 if (ddi_copyin(udata, &krs, sizeof (krs), md) != 0) { 899 return (EFAULT); 900 } 901 const struct viona_ring_params params = { 902 .vrp_pa = krs.vrs_qaddr, 903 .vrp_size = krs.vrs_qsize, 904 .vrp_avail_idx = krs.vrs_avail_idx, 905 .vrp_used_idx = krs.vrs_used_idx, 906 }; 907 908 err = viona_ring_init(link, krs.vrs_index, ¶ms); 909 910 return (err); 911 } 912 913 static int 914 viona_ioc_ring_get_state(viona_link_t *link, void *udata, int md) 915 { 916 vioc_ring_state_t krs; 917 918 if (ddi_copyin(udata, &krs, sizeof (krs), md) != 0) { 919 return (EFAULT); 920 } 921 922 struct viona_ring_params params; 923 int err = viona_ring_get_state(link, krs.vrs_index, ¶ms); 924 if (err != 0) { 925 return (err); 926 } 927 krs.vrs_qsize = params.vrp_size; 928 krs.vrs_qaddr = params.vrp_pa; 929 krs.vrs_avail_idx = params.vrp_avail_idx; 930 krs.vrs_used_idx = params.vrp_used_idx; 931 932 if (ddi_copyout(&krs, udata, sizeof (krs), md) != 0) { 933 return (EFAULT); 934 } 935 return (0); 936 } 937 938 static int 939 viona_ioc_ring_reset(viona_link_t *link, uint_t idx) 940 { 941 viona_vring_t *ring; 942 943 if (idx >= VIONA_VQ_MAX) { 944 return (EINVAL); 945 } 946 ring = &link->l_vrings[idx]; 947 948 return (viona_ring_reset(ring, B_TRUE)); 949 } 950 951 static int 952 viona_ioc_ring_kick(viona_link_t *link, uint_t idx) 953 { 954 viona_vring_t *ring; 955 int err; 956 957 if (idx >= VIONA_VQ_MAX) { 958 return (EINVAL); 959 } 960 ring = &link->l_vrings[idx]; 961 962 mutex_enter(&ring->vr_lock); 963 switch (ring->vr_state) { 964 case VRS_SETUP: 965 /* 966 * An early kick to a ring which is starting its worker thread 967 * is fine. Once that thread is active, it will process the 968 * start-up request immediately. 969 */ 970 /* FALLTHROUGH */ 971 case VRS_INIT: 972 ring->vr_state_flags |= VRSF_REQ_START; 973 /* FALLTHROUGH */ 974 case VRS_RUN: 975 cv_broadcast(&ring->vr_cv); 976 err = 0; 977 break; 978 default: 979 err = EBUSY; 980 break; 981 } 982 mutex_exit(&ring->vr_lock); 983 984 return (err); 985 } 986 987 static int 988 viona_ioc_ring_pause(viona_link_t *link, uint_t idx) 989 { 990 if (idx >= VIONA_VQ_MAX) { 991 return (EINVAL); 992 } 993 994 viona_vring_t *ring = &link->l_vrings[idx]; 995 return (viona_ring_pause(ring)); 996 } 997 998 static int 999 viona_ioc_ring_set_msi(viona_link_t *link, void *data, int md) 1000 { 1001 vioc_ring_msi_t vrm; 1002 viona_vring_t *ring; 1003 1004 if (ddi_copyin(data, &vrm, sizeof (vrm), md) != 0) { 1005 return (EFAULT); 1006 } 1007 if (vrm.rm_index >= VIONA_VQ_MAX) { 1008 return (EINVAL); 1009 } 1010 1011 ring = &link->l_vrings[vrm.rm_index]; 1012 mutex_enter(&ring->vr_lock); 1013 ring->vr_msi_addr = vrm.rm_addr; 1014 ring->vr_msi_msg = vrm.rm_msg; 1015 mutex_exit(&ring->vr_lock); 1016 1017 return (0); 1018 } 1019 1020 static int 1021 viona_notify_iop(void *arg, bool in, uint16_t port, uint8_t bytes, 1022 uint32_t *val) 1023 { 1024 viona_link_t *link = (viona_link_t *)arg; 1025 1026 /* 1027 * If the request is a read (in/ins), or direct at a port other than 1028 * what we expect to be registered on, ignore it. 1029 */ 1030 if (in || port != link->l_notify_ioport) { 1031 return (ESRCH); 1032 } 1033 1034 /* Let userspace handle notifications for rings other than RX/TX. */ 1035 const uint16_t vq = *val; 1036 if (vq >= VIONA_VQ_MAX) { 1037 return (ESRCH); 1038 } 1039 1040 viona_vring_t *ring = &link->l_vrings[vq]; 1041 int res = 0; 1042 1043 mutex_enter(&ring->vr_lock); 1044 if (ring->vr_state == VRS_RUN) { 1045 cv_broadcast(&ring->vr_cv); 1046 } else { 1047 res = ESRCH; 1048 } 1049 mutex_exit(&ring->vr_lock); 1050 1051 return (res); 1052 } 1053 1054 static int 1055 viona_ioc_set_notify_ioport(viona_link_t *link, uint16_t ioport) 1056 { 1057 int err = 0; 1058 1059 if (link->l_notify_ioport != 0) { 1060 vmm_drv_ioport_unhook(link->l_vm_hold, &link->l_notify_cookie); 1061 link->l_notify_ioport = 0; 1062 } 1063 1064 if (ioport != 0) { 1065 err = vmm_drv_ioport_hook(link->l_vm_hold, ioport, 1066 viona_notify_iop, (void *)link, &link->l_notify_cookie); 1067 if (err == 0) { 1068 link->l_notify_ioport = ioport; 1069 } 1070 } 1071 return (err); 1072 } 1073 1074 static int 1075 viona_ioc_ring_intr_clear(viona_link_t *link, uint_t idx) 1076 { 1077 if (idx >= VIONA_VQ_MAX) { 1078 return (EINVAL); 1079 } 1080 1081 link->l_vrings[idx].vr_intr_enabled = 0; 1082 return (0); 1083 } 1084 1085 static int 1086 viona_ioc_intr_poll(viona_link_t *link, void *udata, int md, int *rv) 1087 { 1088 uint_t cnt = 0; 1089 vioc_intr_poll_t vip; 1090 1091 for (uint_t i = 0; i < VIONA_VQ_MAX; i++) { 1092 uint_t val = link->l_vrings[i].vr_intr_enabled; 1093 1094 vip.vip_status[i] = val; 1095 if (val != 0) { 1096 cnt++; 1097 } 1098 } 1099 1100 if (ddi_copyout(&vip, udata, sizeof (vip), md) != 0) { 1101 return (EFAULT); 1102 } 1103 *rv = (int)cnt; 1104 return (0); 1105 } 1106