1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2019 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2014 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2023 Oxide Computer Company 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/kernel.h> 51 #include <sys/kmem.h> 52 #include <sys/mutex.h> 53 #include <sys/systm.h> 54 #include <sys/cpuset.h> 55 56 #include <x86/specialreg.h> 57 #include <x86/apicreg.h> 58 59 #include <machine/clock.h> 60 61 #include <machine/vmm.h> 62 #include <sys/vmm_kernel.h> 63 64 #include "vmm_lapic.h" 65 #include "vmm_stat.h" 66 67 #include "vlapic.h" 68 #include "vlapic_priv.h" 69 #include "vioapic.h" 70 71 72 /* 73 * The 4 high bits of a given interrupt vector represent its priority. The same 74 * is true for the contents of the TPR when it is used to calculate the ultimate 75 * PPR of an APIC - the 4 high bits hold the priority. 76 */ 77 #define PRIO(x) ((x) & 0xf0) 78 79 #define VLAPIC_VERSION (16) 80 81 /* 82 * The 'vlapic->timer_lock' is used to provide mutual exclusion between the 83 * vlapic_callout_handler() and vcpu accesses to: 84 * - timer_freq_bt, timer_period_bt, timer_fire_bt 85 * - timer LVT register 86 */ 87 #define VLAPIC_TIMER_LOCK(vlapic) mutex_enter(&((vlapic)->timer_lock)) 88 #define VLAPIC_TIMER_UNLOCK(vlapic) mutex_exit(&((vlapic)->timer_lock)) 89 #define VLAPIC_TIMER_LOCKED(vlapic) MUTEX_HELD(&((vlapic)->timer_lock)) 90 91 /* 92 * APIC timer frequency: 93 * - arbitrary but chosen to be in the ballpark of contemporary hardware. 94 * - power-of-two to avoid loss of precision when calculating times 95 */ 96 #define VLAPIC_BUS_FREQ (128 * 1024 * 1024) 97 98 #define APICBASE_ADDR_MASK 0xfffffffffffff000UL 99 100 #define APIC_VALID_MASK_ESR (APIC_ESR_SEND_CS_ERROR | \ 101 APIC_ESR_RECEIVE_CS_ERROR | APIC_ESR_SEND_ACCEPT | \ 102 APIC_ESR_RECEIVE_ACCEPT | APIC_ESR_SEND_ILLEGAL_VECTOR | \ 103 APIC_ESR_RECEIVE_ILLEGAL_VECTOR | APIC_ESR_ILLEGAL_REGISTER) 104 105 static void vlapic_set_error(struct vlapic *, uint32_t, bool); 106 static void vlapic_callout_handler(void *arg); 107 108 static __inline bool 109 vlapic_x2mode(const struct vlapic *vlapic) 110 { 111 return ((vlapic->msr_apicbase & APICBASE_X2APIC) != 0); 112 } 113 114 static __inline bool 115 vlapic_hw_disabled(const struct vlapic *vlapic) 116 { 117 return ((vlapic->msr_apicbase & APICBASE_ENABLED) == 0); 118 } 119 120 static __inline bool 121 vlapic_sw_disabled(const struct vlapic *vlapic) 122 { 123 const struct LAPIC *lapic = vlapic->apic_page; 124 125 return ((lapic->svr & APIC_SVR_ENABLE) == 0); 126 } 127 128 static __inline bool 129 vlapic_enabled(const struct vlapic *vlapic) 130 { 131 return (!vlapic_hw_disabled(vlapic) && !vlapic_sw_disabled(vlapic)); 132 } 133 134 static __inline uint32_t 135 vlapic_get_id(const struct vlapic *vlapic) 136 { 137 138 if (vlapic_x2mode(vlapic)) 139 return (vlapic->vcpuid); 140 else 141 return (vlapic->vcpuid << 24); 142 } 143 144 static uint32_t 145 x2apic_ldr(const struct vlapic *vlapic) 146 { 147 int apicid; 148 uint32_t ldr; 149 150 apicid = vlapic_get_id(vlapic); 151 ldr = 1 << (apicid & 0xf); 152 ldr |= (apicid & 0xffff0) << 12; 153 return (ldr); 154 } 155 156 void 157 vlapic_dfr_write_handler(struct vlapic *vlapic) 158 { 159 struct LAPIC *lapic; 160 161 lapic = vlapic->apic_page; 162 if (vlapic_x2mode(vlapic)) { 163 /* Ignore write to DFR in x2APIC mode */ 164 lapic->dfr = 0; 165 return; 166 } 167 168 lapic->dfr &= APIC_DFR_MODEL_MASK; 169 lapic->dfr |= APIC_DFR_RESERVED; 170 } 171 172 void 173 vlapic_ldr_write_handler(struct vlapic *vlapic) 174 { 175 struct LAPIC *lapic; 176 177 lapic = vlapic->apic_page; 178 179 /* LDR is read-only in x2apic mode */ 180 if (vlapic_x2mode(vlapic)) { 181 /* Ignore write to LDR in x2APIC mode */ 182 lapic->ldr = x2apic_ldr(vlapic); 183 } else { 184 lapic->ldr &= ~APIC_LDR_RESERVED; 185 } 186 } 187 188 void 189 vlapic_id_write_handler(struct vlapic *vlapic) 190 { 191 struct LAPIC *lapic; 192 193 /* 194 * We don't allow the ID register to be modified so reset it back to 195 * its default value. 196 */ 197 lapic = vlapic->apic_page; 198 lapic->id = vlapic_get_id(vlapic); 199 } 200 201 static int 202 vlapic_timer_divisor(uint32_t dcr) 203 { 204 switch (dcr & 0xB) { 205 case APIC_TDCR_1: 206 return (1); 207 case APIC_TDCR_2: 208 return (2); 209 case APIC_TDCR_4: 210 return (4); 211 case APIC_TDCR_8: 212 return (8); 213 case APIC_TDCR_16: 214 return (16); 215 case APIC_TDCR_32: 216 return (32); 217 case APIC_TDCR_64: 218 return (64); 219 case APIC_TDCR_128: 220 return (128); 221 default: 222 panic("vlapic_timer_divisor: invalid dcr 0x%08x", dcr); 223 } 224 } 225 226 #if 0 227 static inline void 228 vlapic_dump_lvt(uint32_t offset, uint32_t *lvt) 229 { 230 printf("Offset %x: lvt %08x (V:%02x DS:%x M:%x)\n", offset, 231 *lvt, *lvt & APIC_LVTT_VECTOR, *lvt & APIC_LVTT_DS, 232 *lvt & APIC_LVTT_M); 233 } 234 #endif 235 236 static uint32_t 237 vlapic_get_ccr(struct vlapic *vlapic) 238 { 239 struct LAPIC *lapic; 240 uint32_t ccr; 241 242 ccr = 0; 243 lapic = vlapic->apic_page; 244 245 VLAPIC_TIMER_LOCK(vlapic); 246 if (callout_active(&vlapic->callout)) { 247 /* 248 * If the timer is scheduled to expire in the future then 249 * compute the value of 'ccr' based on the remaining time. 250 */ 251 252 const hrtime_t now = gethrtime(); 253 if (vlapic->timer_fire_when > now) { 254 ccr += hrt_freq_count(vlapic->timer_fire_when - now, 255 vlapic->timer_cur_freq); 256 } 257 } 258 259 /* 260 * Clamp CCR value to that programmed in ICR - its theoretical maximum. 261 * Normal operation should never result in this being necessary. Only 262 * strange circumstances due to state importation as part of instance 263 * save/restore or live-migration require such wariness. 264 */ 265 if (ccr > lapic->icr_timer) { 266 ccr = lapic->icr_timer; 267 vlapic->stats.vs_clamp_ccr++; 268 } 269 VLAPIC_TIMER_UNLOCK(vlapic); 270 return (ccr); 271 } 272 273 static void 274 vlapic_update_divider(struct vlapic *vlapic) 275 { 276 struct LAPIC *lapic = vlapic->apic_page; 277 278 ASSERT(VLAPIC_TIMER_LOCKED(vlapic)); 279 280 vlapic->timer_cur_freq = 281 VLAPIC_BUS_FREQ / vlapic_timer_divisor(lapic->dcr_timer); 282 vlapic->timer_period = 283 hrt_freq_interval(vlapic->timer_cur_freq, lapic->icr_timer); 284 } 285 286 void 287 vlapic_dcr_write_handler(struct vlapic *vlapic) 288 { 289 /* 290 * Update the timer frequency and the timer period. 291 * 292 * XXX changes to the frequency divider will not take effect until 293 * the timer is reloaded. 294 */ 295 VLAPIC_TIMER_LOCK(vlapic); 296 vlapic_update_divider(vlapic); 297 VLAPIC_TIMER_UNLOCK(vlapic); 298 } 299 300 void 301 vlapic_esr_write_handler(struct vlapic *vlapic) 302 { 303 struct LAPIC *lapic; 304 305 lapic = vlapic->apic_page; 306 lapic->esr = vlapic->esr_pending; 307 vlapic->esr_pending = 0; 308 } 309 310 vcpu_notify_t 311 vlapic_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 312 { 313 struct LAPIC *lapic; 314 uint32_t *irrptr, *tmrptr, mask, tmr; 315 int idx; 316 317 KASSERT(vector >= 0 && vector < 256, ("invalid vector %d", vector)); 318 319 lapic = vlapic->apic_page; 320 if (!(lapic->svr & APIC_SVR_ENABLE)) { 321 /* ignore interrupt on software-disabled APIC */ 322 return (VCPU_NOTIFY_NONE); 323 } 324 325 if (vector < 16) { 326 vlapic_set_error(vlapic, APIC_ESR_RECEIVE_ILLEGAL_VECTOR, 327 false); 328 329 /* 330 * If the error LVT is configured to interrupt the vCPU, it will 331 * have delivered a notification through that mechanism. 332 */ 333 return (VCPU_NOTIFY_NONE); 334 } 335 336 if (vlapic->ops.set_intr_ready) { 337 return ((*vlapic->ops.set_intr_ready)(vlapic, vector, level)); 338 } 339 340 idx = (vector / 32) * 4; 341 mask = 1 << (vector % 32); 342 tmrptr = &lapic->tmr0; 343 irrptr = &lapic->irr0; 344 345 /* 346 * Update TMR for requested vector, if necessary. 347 * This must be done prior to asserting the bit in IRR so that the 348 * proper TMR state is always visible before the to-be-queued interrupt 349 * can be injected. 350 */ 351 tmr = atomic_load_acq_32(&tmrptr[idx]); 352 if ((tmr & mask) != (level ? mask : 0)) { 353 if (level) { 354 atomic_set_int(&tmrptr[idx], mask); 355 } else { 356 atomic_clear_int(&tmrptr[idx], mask); 357 } 358 } 359 360 /* Now set the bit in IRR */ 361 atomic_set_int(&irrptr[idx], mask); 362 363 return (VCPU_NOTIFY_EXIT); 364 } 365 366 static __inline uint32_t * 367 vlapic_get_lvtptr(struct vlapic *vlapic, uint32_t offset) 368 { 369 struct LAPIC *lapic = vlapic->apic_page; 370 int i; 371 372 switch (offset) { 373 case APIC_OFFSET_CMCI_LVT: 374 return (&lapic->lvt_cmci); 375 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 376 i = (offset - APIC_OFFSET_TIMER_LVT) >> 2; 377 return ((&lapic->lvt_timer) + i); 378 default: 379 panic("vlapic_get_lvt: invalid LVT\n"); 380 } 381 } 382 383 static __inline int 384 lvt_off_to_idx(uint32_t offset) 385 { 386 int index; 387 388 switch (offset) { 389 case APIC_OFFSET_CMCI_LVT: 390 index = APIC_LVT_CMCI; 391 break; 392 case APIC_OFFSET_TIMER_LVT: 393 index = APIC_LVT_TIMER; 394 break; 395 case APIC_OFFSET_THERM_LVT: 396 index = APIC_LVT_THERMAL; 397 break; 398 case APIC_OFFSET_PERF_LVT: 399 index = APIC_LVT_PMC; 400 break; 401 case APIC_OFFSET_LINT0_LVT: 402 index = APIC_LVT_LINT0; 403 break; 404 case APIC_OFFSET_LINT1_LVT: 405 index = APIC_LVT_LINT1; 406 break; 407 case APIC_OFFSET_ERROR_LVT: 408 index = APIC_LVT_ERROR; 409 break; 410 default: 411 index = -1; 412 break; 413 } 414 KASSERT(index >= 0 && index <= VLAPIC_MAXLVT_INDEX, ("lvt_off_to_idx: " 415 "invalid lvt index %d for offset %x", index, offset)); 416 417 return (index); 418 } 419 420 static __inline uint32_t 421 vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset) 422 { 423 int idx; 424 uint32_t val; 425 426 idx = lvt_off_to_idx(offset); 427 val = atomic_load_acq_32(&vlapic->lvt_last[idx]); 428 return (val); 429 } 430 431 void 432 vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset) 433 { 434 uint32_t *lvtptr, mask, val; 435 struct LAPIC *lapic; 436 int idx; 437 438 lapic = vlapic->apic_page; 439 lvtptr = vlapic_get_lvtptr(vlapic, offset); 440 val = *lvtptr; 441 idx = lvt_off_to_idx(offset); 442 443 if (!(lapic->svr & APIC_SVR_ENABLE)) 444 val |= APIC_LVT_M; 445 mask = APIC_LVT_M | APIC_LVT_DS | APIC_LVT_VECTOR; 446 switch (offset) { 447 case APIC_OFFSET_TIMER_LVT: 448 mask |= APIC_LVTT_TM; 449 break; 450 case APIC_OFFSET_ERROR_LVT: 451 break; 452 case APIC_OFFSET_LINT0_LVT: 453 case APIC_OFFSET_LINT1_LVT: 454 mask |= APIC_LVT_TM | APIC_LVT_RIRR | APIC_LVT_IIPP; 455 /* FALLTHROUGH */ 456 default: 457 mask |= APIC_LVT_DM; 458 break; 459 } 460 val &= mask; 461 *lvtptr = val; 462 atomic_store_rel_32(&vlapic->lvt_last[idx], val); 463 } 464 465 static void 466 vlapic_refresh_lvts(struct vlapic *vlapic) 467 { 468 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_CMCI_LVT); 469 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_TIMER_LVT); 470 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_THERM_LVT); 471 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_PERF_LVT); 472 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_LINT0_LVT); 473 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_LINT1_LVT); 474 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_ERROR_LVT); 475 } 476 477 static void 478 vlapic_mask_lvts(struct vlapic *vlapic) 479 { 480 struct LAPIC *lapic = vlapic->apic_page; 481 482 lapic->lvt_cmci |= APIC_LVT_M; 483 lapic->lvt_timer |= APIC_LVT_M; 484 lapic->lvt_thermal |= APIC_LVT_M; 485 lapic->lvt_pcint |= APIC_LVT_M; 486 lapic->lvt_lint0 |= APIC_LVT_M; 487 lapic->lvt_lint1 |= APIC_LVT_M; 488 lapic->lvt_error |= APIC_LVT_M; 489 vlapic_refresh_lvts(vlapic); 490 } 491 492 static int 493 vlapic_fire_lvt(struct vlapic *vlapic, uint_t lvt) 494 { 495 uint32_t mode, reg, vec; 496 vcpu_notify_t notify; 497 498 reg = atomic_load_acq_32(&vlapic->lvt_last[lvt]); 499 500 if (reg & APIC_LVT_M) 501 return (0); 502 vec = reg & APIC_LVT_VECTOR; 503 mode = reg & APIC_LVT_DM; 504 505 switch (mode) { 506 case APIC_LVT_DM_FIXED: 507 if (vec < 16) { 508 vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR, 509 lvt == APIC_LVT_ERROR); 510 return (0); 511 } 512 notify = vlapic_set_intr_ready(vlapic, vec, false); 513 vcpu_notify_event_type(vlapic->vm, vlapic->vcpuid, notify); 514 break; 515 case APIC_LVT_DM_NMI: 516 (void) vm_inject_nmi(vlapic->vm, vlapic->vcpuid); 517 break; 518 case APIC_LVT_DM_EXTINT: 519 (void) vm_inject_extint(vlapic->vm, vlapic->vcpuid); 520 break; 521 default: 522 // Other modes ignored 523 return (0); 524 } 525 return (1); 526 } 527 528 static uint_t 529 vlapic_active_isr(struct vlapic *vlapic) 530 { 531 int i; 532 uint32_t *isrp; 533 534 isrp = &vlapic->apic_page->isr7; 535 536 for (i = 7; i >= 0; i--, isrp -= 4) { 537 uint32_t reg = *isrp; 538 539 if (reg != 0) { 540 uint_t vec = (i * 32) + bsrl(reg); 541 542 if (vec < 16) { 543 /* 544 * Truncate the illegal low vectors to value of 545 * 0, indicating that no active ISR was found. 546 */ 547 return (0); 548 } 549 return (vec); 550 } 551 } 552 553 return (0); 554 } 555 556 /* 557 * After events which might arbitrarily change the value of PPR, such as a TPR 558 * write or an EOI, calculate that new PPR value and store it in the APIC page. 559 */ 560 static void 561 vlapic_update_ppr(struct vlapic *vlapic) 562 { 563 int isrvec, tpr, ppr; 564 565 isrvec = vlapic_active_isr(vlapic); 566 tpr = vlapic->apic_page->tpr; 567 568 /* 569 * Algorithm adopted from section "Interrupt, Task and Processor 570 * Priority" in Intel Architecture Manual Vol 3a. 571 */ 572 if (PRIO(tpr) >= PRIO(isrvec)) { 573 ppr = tpr; 574 } else { 575 ppr = PRIO(isrvec); 576 } 577 578 vlapic->apic_page->ppr = ppr; 579 } 580 581 /* 582 * When a vector is asserted in ISR as in-service, the PPR must be raised to the 583 * priority of that vector, as the vCPU would have been at a lower priority in 584 * order for the vector to be accepted. 585 */ 586 static void 587 vlapic_raise_ppr(struct vlapic *vlapic, int vec) 588 { 589 struct LAPIC *lapic = vlapic->apic_page; 590 int ppr; 591 592 ppr = PRIO(vec); 593 594 lapic->ppr = ppr; 595 } 596 597 void 598 vlapic_sync_tpr(struct vlapic *vlapic) 599 { 600 vlapic_update_ppr(vlapic); 601 } 602 603 static VMM_STAT(VLAPIC_GRATUITOUS_EOI, "EOI without any in-service interrupt"); 604 605 static void 606 vlapic_process_eoi(struct vlapic *vlapic) 607 { 608 struct LAPIC *lapic = vlapic->apic_page; 609 uint32_t *isrptr, *tmrptr; 610 int i; 611 uint_t idx, bitpos, vector; 612 613 isrptr = &lapic->isr0; 614 tmrptr = &lapic->tmr0; 615 616 for (i = 7; i >= 0; i--) { 617 idx = i * 4; 618 if (isrptr[idx] != 0) { 619 bitpos = bsrl(isrptr[idx]); 620 vector = i * 32 + bitpos; 621 622 isrptr[idx] &= ~(1 << bitpos); 623 vlapic_update_ppr(vlapic); 624 if ((tmrptr[idx] & (1 << bitpos)) != 0) { 625 vioapic_process_eoi(vlapic->vm, vlapic->vcpuid, 626 vector); 627 } 628 return; 629 } 630 } 631 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_GRATUITOUS_EOI, 1); 632 } 633 634 static __inline int 635 vlapic_get_lvt_field(uint32_t lvt, uint32_t mask) 636 { 637 638 return (lvt & mask); 639 } 640 641 static __inline int 642 vlapic_periodic_timer(struct vlapic *vlapic) 643 { 644 uint32_t lvt; 645 646 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT); 647 648 return (vlapic_get_lvt_field(lvt, APIC_LVTT_TM_PERIODIC)); 649 } 650 651 static VMM_STAT(VLAPIC_INTR_ERROR, "error interrupts generated by vlapic"); 652 653 static void 654 vlapic_set_error(struct vlapic *vlapic, uint32_t mask, bool lvt_error) 655 { 656 657 vlapic->esr_pending |= mask; 658 659 /* 660 * Avoid infinite recursion if the error LVT itself is configured with 661 * an illegal vector. 662 */ 663 if (lvt_error) 664 return; 665 666 if (vlapic_fire_lvt(vlapic, APIC_LVT_ERROR)) { 667 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_ERROR, 1); 668 } 669 } 670 671 static VMM_STAT(VLAPIC_INTR_TIMER, "timer interrupts generated by vlapic"); 672 673 static void 674 vlapic_fire_timer(struct vlapic *vlapic) 675 { 676 ASSERT(VLAPIC_TIMER_LOCKED(vlapic)); 677 678 if (vlapic_fire_lvt(vlapic, APIC_LVT_TIMER)) { 679 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_TIMER, 1); 680 } 681 } 682 683 static VMM_STAT(VLAPIC_INTR_CMC, 684 "corrected machine check interrupts generated by vlapic"); 685 686 void 687 vlapic_fire_cmci(struct vlapic *vlapic) 688 { 689 690 if (vlapic_fire_lvt(vlapic, APIC_LVT_CMCI)) { 691 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_CMC, 1); 692 } 693 } 694 695 static VMM_STAT_ARRAY(LVTS_TRIGGERRED, VLAPIC_MAXLVT_INDEX + 1, 696 "lvts triggered"); 697 698 int 699 vlapic_trigger_lvt(struct vlapic *vlapic, int vector) 700 { 701 if (!vlapic_enabled(vlapic)) { 702 /* 703 * When the local APIC is global/hardware disabled, 704 * LINT[1:0] pins are configured as INTR and NMI pins, 705 * respectively. 706 */ 707 switch (vector) { 708 case APIC_LVT_LINT0: 709 (void) vm_inject_extint(vlapic->vm, 710 vlapic->vcpuid); 711 break; 712 case APIC_LVT_LINT1: 713 (void) vm_inject_nmi(vlapic->vm, 714 vlapic->vcpuid); 715 break; 716 default: 717 break; 718 } 719 return (0); 720 } 721 722 switch (vector) { 723 case APIC_LVT_LINT0: 724 case APIC_LVT_LINT1: 725 case APIC_LVT_TIMER: 726 case APIC_LVT_ERROR: 727 case APIC_LVT_PMC: 728 case APIC_LVT_THERMAL: 729 case APIC_LVT_CMCI: 730 if (vlapic_fire_lvt(vlapic, vector)) { 731 vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid, 732 LVTS_TRIGGERRED, vector, 1); 733 } 734 break; 735 default: 736 return (EINVAL); 737 } 738 return (0); 739 } 740 741 static void 742 vlapic_callout_reset(struct vlapic *vlapic) 743 { 744 callout_reset_hrtime(&vlapic->callout, vlapic->timer_fire_when, 745 vlapic_callout_handler, vlapic, C_ABSOLUTE); 746 } 747 748 static void 749 vlapic_callout_handler(void *arg) 750 { 751 struct vlapic *vlapic = arg; 752 753 VLAPIC_TIMER_LOCK(vlapic); 754 if (callout_pending(&vlapic->callout)) /* callout was reset */ 755 goto done; 756 757 if (!callout_active(&vlapic->callout)) /* callout was stopped */ 758 goto done; 759 760 callout_deactivate(&vlapic->callout); 761 762 vlapic_fire_timer(vlapic); 763 764 if (vlapic_periodic_timer(vlapic)) { 765 /* 766 * Compute the delta between when the timer was supposed to 767 * fire and the present time. We can depend on the fact that 768 * cyclics (which underly these callouts) will never be called 769 * early. 770 */ 771 const hrtime_t now = gethrtime(); 772 const hrtime_t delta = now - vlapic->timer_fire_when; 773 if (delta >= vlapic->timer_period) { 774 /* 775 * If we are so behind that we have missed an entire 776 * timer period, reset the time base rather than 777 * attempting to catch up. 778 */ 779 vlapic->timer_fire_when = now + vlapic->timer_period; 780 } else { 781 vlapic->timer_fire_when += vlapic->timer_period; 782 } 783 vlapic_callout_reset(vlapic); 784 } else { 785 /* 786 * Clear the target time so that logic can distinguish from a 787 * timer which has fired (where the value is zero) from one 788 * which is held pending due to the instance being paused (where 789 * the value is non-zero, but the callout is not pending). 790 */ 791 vlapic->timer_fire_when = 0; 792 } 793 done: 794 VLAPIC_TIMER_UNLOCK(vlapic); 795 } 796 797 void 798 vlapic_icrtmr_write_handler(struct vlapic *vlapic) 799 { 800 struct LAPIC *lapic = vlapic->apic_page; 801 802 VLAPIC_TIMER_LOCK(vlapic); 803 vlapic->timer_period = hrt_freq_interval(vlapic->timer_cur_freq, 804 lapic->icr_timer); 805 if (vlapic->timer_period != 0) { 806 vlapic->timer_fire_when = gethrtime() + vlapic->timer_period; 807 vlapic_callout_reset(vlapic); 808 } else { 809 vlapic->timer_fire_when = 0; 810 callout_stop(&vlapic->callout); 811 } 812 VLAPIC_TIMER_UNLOCK(vlapic); 813 } 814 815 /* 816 * This function populates 'dmask' with the set of vcpus that match the 817 * addressing specified by the (dest, phys, lowprio) tuple. 818 * 819 * 'x2apic_dest' specifies whether 'dest' is interpreted as x2APIC (32-bit) 820 * or xAPIC (8-bit) destination field. 821 */ 822 void 823 vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys, 824 bool lowprio, bool x2apic_dest) 825 { 826 struct vlapic *vlapic; 827 uint32_t dfr, ldr, ldest, cluster; 828 uint32_t mda_flat_ldest, mda_cluster_ldest, mda_ldest, mda_cluster_id; 829 cpuset_t amask; 830 int vcpuid; 831 832 if ((x2apic_dest && dest == 0xffffffff) || 833 (!x2apic_dest && dest == 0xff)) { 834 /* 835 * Broadcast in both logical and physical modes. 836 */ 837 *dmask = vm_active_cpus(vm); 838 return; 839 } 840 841 if (phys) { 842 /* 843 * Physical mode: destination is APIC ID. 844 */ 845 CPU_ZERO(dmask); 846 vcpuid = vm_apicid2vcpuid(vm, dest); 847 amask = vm_active_cpus(vm); 848 if (vcpuid < vm_get_maxcpus(vm) && CPU_ISSET(vcpuid, &amask)) 849 CPU_SET(vcpuid, dmask); 850 } else { 851 /* 852 * In the "Flat Model" the MDA is interpreted as an 8-bit wide 853 * bitmask. This model is only available in the xAPIC mode. 854 */ 855 mda_flat_ldest = dest & 0xff; 856 857 /* 858 * In the "Cluster Model" the MDA is used to identify a 859 * specific cluster and a set of APICs in that cluster. 860 */ 861 if (x2apic_dest) { 862 mda_cluster_id = dest >> 16; 863 mda_cluster_ldest = dest & 0xffff; 864 } else { 865 mda_cluster_id = (dest >> 4) & 0xf; 866 mda_cluster_ldest = dest & 0xf; 867 } 868 869 /* 870 * Logical mode: match each APIC that has a bit set 871 * in its LDR that matches a bit in the ldest. 872 */ 873 CPU_ZERO(dmask); 874 amask = vm_active_cpus(vm); 875 while ((vcpuid = CPU_FFS(&amask)) != 0) { 876 vcpuid--; 877 CPU_CLR(vcpuid, &amask); 878 879 vlapic = vm_lapic(vm, vcpuid); 880 dfr = vlapic->apic_page->dfr; 881 ldr = vlapic->apic_page->ldr; 882 883 if ((dfr & APIC_DFR_MODEL_MASK) == 884 APIC_DFR_MODEL_FLAT) { 885 ldest = ldr >> 24; 886 mda_ldest = mda_flat_ldest; 887 } else if ((dfr & APIC_DFR_MODEL_MASK) == 888 APIC_DFR_MODEL_CLUSTER) { 889 if (vlapic_x2mode(vlapic)) { 890 cluster = ldr >> 16; 891 ldest = ldr & 0xffff; 892 } else { 893 cluster = ldr >> 28; 894 ldest = (ldr >> 24) & 0xf; 895 } 896 if (cluster != mda_cluster_id) 897 continue; 898 mda_ldest = mda_cluster_ldest; 899 } else { 900 /* 901 * Guest has configured a bad logical 902 * model for this vcpu - skip it. 903 */ 904 continue; 905 } 906 907 if ((mda_ldest & ldest) != 0) { 908 CPU_SET(vcpuid, dmask); 909 if (lowprio) 910 break; 911 } 912 } 913 } 914 } 915 916 static VMM_STAT(VLAPIC_IPI_SEND, "ipis sent from vcpu"); 917 static VMM_STAT(VLAPIC_IPI_RECV, "ipis received by vcpu"); 918 919 static void 920 vlapic_set_tpr(struct vlapic *vlapic, uint8_t val) 921 { 922 struct LAPIC *lapic = vlapic->apic_page; 923 924 if (lapic->tpr != val) { 925 lapic->tpr = val; 926 vlapic_update_ppr(vlapic); 927 } 928 } 929 930 void 931 vlapic_set_cr8(struct vlapic *vlapic, uint64_t val) 932 { 933 uint8_t tpr; 934 935 if (val & ~0xf) { 936 vm_inject_gp(vlapic->vm, vlapic->vcpuid); 937 return; 938 } 939 940 tpr = val << 4; 941 vlapic_set_tpr(vlapic, tpr); 942 } 943 944 uint64_t 945 vlapic_get_cr8(const struct vlapic *vlapic) 946 { 947 const struct LAPIC *lapic = vlapic->apic_page; 948 949 return (lapic->tpr >> 4); 950 } 951 952 void 953 vlapic_icrlo_write_handler(struct vlapic *vlapic) 954 { 955 int i; 956 cpuset_t dmask; 957 uint64_t icrval; 958 uint32_t dest, vec, mode, dsh; 959 struct LAPIC *lapic; 960 961 lapic = vlapic->apic_page; 962 lapic->icr_lo &= ~APIC_DELSTAT_PEND; 963 icrval = ((uint64_t)lapic->icr_hi << 32) | lapic->icr_lo; 964 965 if (vlapic_x2mode(vlapic)) 966 dest = icrval >> 32; 967 else 968 dest = icrval >> (32 + 24); 969 vec = icrval & APIC_VECTOR_MASK; 970 mode = icrval & APIC_DELMODE_MASK; 971 dsh = icrval & APIC_DEST_MASK; 972 973 if (mode == APIC_DELMODE_FIXED && vec < 16) { 974 vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR, false); 975 return; 976 } 977 if (mode == APIC_DELMODE_INIT && 978 (icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT) { 979 /* No work required to deassert INIT */ 980 return; 981 } 982 if ((mode == APIC_DELMODE_STARTUP || mode == APIC_DELMODE_INIT) && 983 !(dsh == APIC_DEST_DESTFLD || dsh == APIC_DEST_ALLESELF)) { 984 /* 985 * While Intel makes no mention of restrictions for destination 986 * shorthand when sending INIT or SIPI, AMD requires either a 987 * specific destination or all-excluding self. Common use seems 988 * to be restricted to those two cases. Until handling is in 989 * place to halt a guest which makes such a frivolous request, 990 * we will ignore them. 991 */ 992 return; 993 } 994 995 switch (dsh) { 996 case APIC_DEST_DESTFLD: 997 vlapic_calcdest(vlapic->vm, &dmask, dest, 998 (icrval & APIC_DESTMODE_LOG) == 0, false, 999 vlapic_x2mode(vlapic)); 1000 break; 1001 case APIC_DEST_SELF: 1002 CPU_SETOF(vlapic->vcpuid, &dmask); 1003 break; 1004 case APIC_DEST_ALLISELF: 1005 dmask = vm_active_cpus(vlapic->vm); 1006 break; 1007 case APIC_DEST_ALLESELF: 1008 dmask = vm_active_cpus(vlapic->vm); 1009 CPU_CLR(vlapic->vcpuid, &dmask); 1010 break; 1011 default: 1012 /* 1013 * All possible delivery notations are covered above. 1014 * We should never end up here. 1015 */ 1016 panic("unknown delivery shorthand: %x", dsh); 1017 } 1018 1019 while ((i = CPU_FFS(&dmask)) != 0) { 1020 i--; 1021 CPU_CLR(i, &dmask); 1022 switch (mode) { 1023 case APIC_DELMODE_FIXED: 1024 (void) lapic_intr_edge(vlapic->vm, i, vec); 1025 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, 1026 VLAPIC_IPI_SEND, 1); 1027 vmm_stat_incr(vlapic->vm, i, 1028 VLAPIC_IPI_RECV, 1); 1029 break; 1030 case APIC_DELMODE_NMI: 1031 (void) vm_inject_nmi(vlapic->vm, i); 1032 break; 1033 case APIC_DELMODE_INIT: 1034 (void) vm_inject_init(vlapic->vm, i); 1035 break; 1036 case APIC_DELMODE_STARTUP: 1037 (void) vm_inject_sipi(vlapic->vm, i, vec); 1038 break; 1039 case APIC_DELMODE_LOWPRIO: 1040 case APIC_DELMODE_SMI: 1041 default: 1042 /* Unhandled IPI modes (for now) */ 1043 break; 1044 } 1045 } 1046 } 1047 1048 void 1049 vlapic_self_ipi_handler(struct vlapic *vlapic, uint32_t val) 1050 { 1051 const int vec = val & 0xff; 1052 1053 /* self-IPI is only exposed via x2APIC */ 1054 ASSERT(vlapic_x2mode(vlapic)); 1055 1056 (void) lapic_intr_edge(vlapic->vm, vlapic->vcpuid, vec); 1057 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_IPI_SEND, 1); 1058 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_IPI_RECV, 1); 1059 } 1060 1061 int 1062 vlapic_pending_intr(struct vlapic *vlapic, int *vecptr) 1063 { 1064 struct LAPIC *lapic = vlapic->apic_page; 1065 int idx, i, bitpos, vector; 1066 uint32_t *irrptr, val; 1067 1068 if (vlapic->ops.sync_state) { 1069 (*vlapic->ops.sync_state)(vlapic); 1070 } 1071 1072 irrptr = &lapic->irr0; 1073 1074 for (i = 7; i >= 0; i--) { 1075 idx = i * 4; 1076 val = atomic_load_acq_int(&irrptr[idx]); 1077 bitpos = fls(val); 1078 if (bitpos != 0) { 1079 vector = i * 32 + (bitpos - 1); 1080 if (PRIO(vector) > PRIO(lapic->ppr)) { 1081 if (vecptr != NULL) 1082 *vecptr = vector; 1083 return (1); 1084 } else 1085 break; 1086 } 1087 } 1088 return (0); 1089 } 1090 1091 void 1092 vlapic_intr_accepted(struct vlapic *vlapic, int vector) 1093 { 1094 struct LAPIC *lapic = vlapic->apic_page; 1095 uint32_t *irrptr, *isrptr; 1096 int idx; 1097 1098 KASSERT(vector >= 16 && vector < 256, ("invalid vector %d", vector)); 1099 1100 if (vlapic->ops.intr_accepted) 1101 return ((*vlapic->ops.intr_accepted)(vlapic, vector)); 1102 1103 /* 1104 * clear the ready bit for vector being accepted in irr 1105 * and set the vector as in service in isr. 1106 */ 1107 idx = (vector / 32) * 4; 1108 1109 irrptr = &lapic->irr0; 1110 atomic_clear_int(&irrptr[idx], 1 << (vector % 32)); 1111 1112 isrptr = &lapic->isr0; 1113 isrptr[idx] |= 1 << (vector % 32); 1114 1115 /* 1116 * The only way a fresh vector could be accepted into ISR is if it was 1117 * of a higher priority than the current PPR. With that vector now 1118 * in-service, the PPR must be raised. 1119 */ 1120 vlapic_raise_ppr(vlapic, vector); 1121 } 1122 1123 void 1124 vlapic_svr_write_handler(struct vlapic *vlapic) 1125 { 1126 struct LAPIC *lapic; 1127 uint32_t old, new, changed; 1128 1129 lapic = vlapic->apic_page; 1130 1131 new = lapic->svr; 1132 old = vlapic->svr_last; 1133 vlapic->svr_last = new; 1134 1135 changed = old ^ new; 1136 if ((changed & APIC_SVR_ENABLE) != 0) { 1137 if ((new & APIC_SVR_ENABLE) == 0) { 1138 /* 1139 * The apic is now disabled so stop the apic timer 1140 * and mask all the LVT entries. 1141 */ 1142 VLAPIC_TIMER_LOCK(vlapic); 1143 callout_stop(&vlapic->callout); 1144 VLAPIC_TIMER_UNLOCK(vlapic); 1145 vlapic_mask_lvts(vlapic); 1146 } else { 1147 /* 1148 * The apic is now enabled so restart the apic timer 1149 * if it is configured in periodic mode. 1150 */ 1151 if (vlapic_periodic_timer(vlapic)) 1152 vlapic_icrtmr_write_handler(vlapic); 1153 } 1154 } 1155 } 1156 1157 static bool 1158 vlapic_read(struct vlapic *vlapic, uint16_t offset, uint32_t *outp) 1159 { 1160 struct LAPIC *lapic = vlapic->apic_page; 1161 uint32_t *reg; 1162 int i; 1163 1164 ASSERT3U(offset & 0x3, ==, 0); 1165 ASSERT3U(offset, <, PAGESIZE); 1166 ASSERT3P(outp, !=, NULL); 1167 1168 uint32_t data = 0; 1169 switch (offset) { 1170 case APIC_OFFSET_ID: 1171 data = lapic->id; 1172 break; 1173 case APIC_OFFSET_VER: 1174 data = lapic->version; 1175 break; 1176 case APIC_OFFSET_TPR: 1177 data = lapic->tpr; 1178 break; 1179 case APIC_OFFSET_APR: 1180 data = lapic->apr; 1181 break; 1182 case APIC_OFFSET_PPR: 1183 data = lapic->ppr; 1184 break; 1185 case APIC_OFFSET_LDR: 1186 data = lapic->ldr; 1187 break; 1188 case APIC_OFFSET_DFR: 1189 data = lapic->dfr; 1190 break; 1191 case APIC_OFFSET_SVR: 1192 data = lapic->svr; 1193 break; 1194 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1195 i = (offset - APIC_OFFSET_ISR0) >> 2; 1196 reg = &lapic->isr0; 1197 data = *(reg + i); 1198 break; 1199 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1200 i = (offset - APIC_OFFSET_TMR0) >> 2; 1201 reg = &lapic->tmr0; 1202 data = *(reg + i); 1203 break; 1204 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1205 i = (offset - APIC_OFFSET_IRR0) >> 2; 1206 reg = &lapic->irr0; 1207 data = atomic_load_acq_int(reg + i); 1208 break; 1209 case APIC_OFFSET_ESR: 1210 data = lapic->esr; 1211 break; 1212 case APIC_OFFSET_ICR_LOW: 1213 data = lapic->icr_lo; 1214 break; 1215 case APIC_OFFSET_ICR_HI: 1216 data = lapic->icr_hi; 1217 break; 1218 case APIC_OFFSET_CMCI_LVT: 1219 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1220 data = vlapic_get_lvt(vlapic, offset); 1221 #ifdef INVARIANTS 1222 reg = vlapic_get_lvtptr(vlapic, offset); 1223 ASSERT3U(data, ==, *reg); 1224 #endif 1225 break; 1226 case APIC_OFFSET_TIMER_ICR: 1227 data = lapic->icr_timer; 1228 break; 1229 case APIC_OFFSET_TIMER_CCR: 1230 data = vlapic_get_ccr(vlapic); 1231 break; 1232 case APIC_OFFSET_TIMER_DCR: 1233 data = lapic->dcr_timer; 1234 break; 1235 case APIC_OFFSET_RRR: 1236 data = 0; 1237 break; 1238 1239 case APIC_OFFSET_SELF_IPI: 1240 case APIC_OFFSET_EOI: 1241 /* Write-only register */ 1242 *outp = 0; 1243 return (false); 1244 1245 default: 1246 /* Invalid register */ 1247 *outp = 0; 1248 return (false); 1249 } 1250 1251 *outp = data; 1252 return (true); 1253 } 1254 1255 static bool 1256 vlapic_write(struct vlapic *vlapic, uint16_t offset, uint32_t data) 1257 { 1258 struct LAPIC *lapic = vlapic->apic_page; 1259 uint32_t *regptr; 1260 1261 ASSERT3U(offset & 0xf, ==, 0); 1262 ASSERT3U(offset, <, PAGESIZE); 1263 1264 switch (offset) { 1265 case APIC_OFFSET_ID: 1266 lapic->id = data; 1267 vlapic_id_write_handler(vlapic); 1268 break; 1269 case APIC_OFFSET_TPR: 1270 vlapic_set_tpr(vlapic, data & 0xff); 1271 break; 1272 case APIC_OFFSET_EOI: 1273 vlapic_process_eoi(vlapic); 1274 break; 1275 case APIC_OFFSET_LDR: 1276 lapic->ldr = data; 1277 vlapic_ldr_write_handler(vlapic); 1278 break; 1279 case APIC_OFFSET_DFR: 1280 lapic->dfr = data; 1281 vlapic_dfr_write_handler(vlapic); 1282 break; 1283 case APIC_OFFSET_SVR: 1284 lapic->svr = data; 1285 vlapic_svr_write_handler(vlapic); 1286 break; 1287 case APIC_OFFSET_ICR_LOW: 1288 lapic->icr_lo = data; 1289 vlapic_icrlo_write_handler(vlapic); 1290 break; 1291 case APIC_OFFSET_ICR_HI: 1292 lapic->icr_hi = data; 1293 break; 1294 case APIC_OFFSET_CMCI_LVT: 1295 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1296 regptr = vlapic_get_lvtptr(vlapic, offset); 1297 *regptr = data; 1298 vlapic_lvt_write_handler(vlapic, offset); 1299 break; 1300 case APIC_OFFSET_TIMER_ICR: 1301 lapic->icr_timer = data; 1302 vlapic_icrtmr_write_handler(vlapic); 1303 break; 1304 1305 case APIC_OFFSET_TIMER_DCR: 1306 lapic->dcr_timer = data; 1307 vlapic_dcr_write_handler(vlapic); 1308 break; 1309 1310 case APIC_OFFSET_ESR: 1311 vlapic_esr_write_handler(vlapic); 1312 break; 1313 1314 case APIC_OFFSET_SELF_IPI: 1315 if (vlapic_x2mode(vlapic)) 1316 vlapic_self_ipi_handler(vlapic, data); 1317 break; 1318 1319 case APIC_OFFSET_VER: 1320 case APIC_OFFSET_APR: 1321 case APIC_OFFSET_PPR: 1322 case APIC_OFFSET_RRR: 1323 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1324 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1325 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1326 case APIC_OFFSET_TIMER_CCR: 1327 /* Read-only register */ 1328 return (false); 1329 1330 default: 1331 /* Invalid register */ 1332 return (false); 1333 } 1334 1335 return (true); 1336 } 1337 1338 void 1339 vlapic_reset(struct vlapic *vlapic) 1340 { 1341 struct LAPIC *lapic = vlapic->apic_page; 1342 uint32_t *isrptr, *tmrptr, *irrptr; 1343 1344 /* Reset any timer-related state first */ 1345 VLAPIC_TIMER_LOCK(vlapic); 1346 callout_stop(&vlapic->callout); 1347 lapic->icr_timer = 0; 1348 lapic->ccr_timer = 0; 1349 lapic->dcr_timer = 0; 1350 vlapic_update_divider(vlapic); 1351 VLAPIC_TIMER_UNLOCK(vlapic); 1352 1353 /* 1354 * Sync any APIC acceleration (APICv/AVIC) state into the APIC page so 1355 * it is not leftover after the reset. This is performed after the APIC 1356 * timer has been stopped, in case it happened to fire just prior to 1357 * being deactivated. 1358 */ 1359 if (vlapic->ops.sync_state) { 1360 (*vlapic->ops.sync_state)(vlapic); 1361 } 1362 1363 vlapic->msr_apicbase = DEFAULT_APIC_BASE | APICBASE_ENABLED; 1364 if (vlapic->vcpuid == 0) 1365 vlapic->msr_apicbase |= APICBASE_BSP; 1366 1367 lapic->id = vlapic_get_id(vlapic); 1368 lapic->version = VLAPIC_VERSION; 1369 lapic->version |= (VLAPIC_MAXLVT_INDEX << MAXLVTSHIFT); 1370 1371 lapic->tpr = 0; 1372 lapic->apr = 0; 1373 lapic->ppr = 0; 1374 1375 lapic->eoi = 0; 1376 lapic->ldr = 0; 1377 lapic->dfr = 0xffffffff; 1378 lapic->svr = APIC_SVR_VECTOR; 1379 vlapic->svr_last = lapic->svr; 1380 1381 isrptr = &lapic->isr0; 1382 tmrptr = &lapic->tmr0; 1383 irrptr = &lapic->irr0; 1384 for (uint_t i = 0; i < 8; i++) { 1385 atomic_store_rel_int(&isrptr[i * 4], 0); 1386 atomic_store_rel_int(&tmrptr[i * 4], 0); 1387 atomic_store_rel_int(&irrptr[i * 4], 0); 1388 } 1389 1390 lapic->esr = 0; 1391 vlapic->esr_pending = 0; 1392 lapic->icr_lo = 0; 1393 lapic->icr_hi = 0; 1394 1395 lapic->lvt_cmci = 0; 1396 lapic->lvt_timer = 0; 1397 lapic->lvt_thermal = 0; 1398 lapic->lvt_pcint = 0; 1399 lapic->lvt_lint0 = 0; 1400 lapic->lvt_lint1 = 0; 1401 lapic->lvt_error = 0; 1402 vlapic_mask_lvts(vlapic); 1403 } 1404 1405 void 1406 vlapic_init(struct vlapic *vlapic) 1407 { 1408 KASSERT(vlapic->vm != NULL, ("vlapic_init: vm is not initialized")); 1409 KASSERT(vlapic->vcpuid >= 0 && 1410 vlapic->vcpuid < vm_get_maxcpus(vlapic->vm), 1411 ("vlapic_init: vcpuid is not initialized")); 1412 KASSERT(vlapic->apic_page != NULL, ("vlapic_init: apic_page is not " 1413 "initialized")); 1414 1415 /* 1416 * If the vlapic is configured in x2apic mode then it will be 1417 * accessed in the critical section via the MSR emulation code. 1418 * 1419 * Therefore the timer mutex must be a spinlock because blockable 1420 * mutexes cannot be acquired in a critical section. 1421 */ 1422 mutex_init(&vlapic->timer_lock, NULL, MUTEX_ADAPTIVE, NULL); 1423 callout_init(&vlapic->callout, 1); 1424 1425 vlapic_reset(vlapic); 1426 } 1427 1428 void 1429 vlapic_cleanup(struct vlapic *vlapic) 1430 { 1431 callout_drain(&vlapic->callout); 1432 mutex_destroy(&vlapic->timer_lock); 1433 } 1434 1435 int 1436 vlapic_mmio_read(struct vlapic *vlapic, uint64_t gpa, uint64_t *valp, 1437 uint_t size) 1438 { 1439 ASSERT3U(gpa, >=, DEFAULT_APIC_BASE); 1440 ASSERT3U(gpa, <, DEFAULT_APIC_BASE + PAGE_SIZE); 1441 1442 /* Ignore MMIO accesses when in x2APIC mode or hardware disabled */ 1443 if (vlapic_x2mode(vlapic) || vlapic_hw_disabled(vlapic)) { 1444 *valp = UINT64_MAX; 1445 return (0); 1446 } 1447 1448 const uint16_t off = gpa - DEFAULT_APIC_BASE; 1449 uint32_t raw = 0; 1450 (void) vlapic_read(vlapic, off & ~0xf, &raw); 1451 1452 /* Shift and mask reads which are small and/or unaligned */ 1453 const uint8_t align = off & 0xf; 1454 if (align < 4) { 1455 *valp = (uint64_t)raw << (align * 8); 1456 } else { 1457 *valp = 0; 1458 } 1459 1460 return (0); 1461 } 1462 1463 int 1464 vlapic_mmio_write(struct vlapic *vlapic, uint64_t gpa, uint64_t val, 1465 uint_t size) 1466 { 1467 ASSERT3U(gpa, >=, DEFAULT_APIC_BASE); 1468 ASSERT3U(gpa, <, DEFAULT_APIC_BASE + PAGE_SIZE); 1469 1470 /* Ignore MMIO accesses when in x2APIC mode or hardware disabled */ 1471 if (vlapic_x2mode(vlapic) || vlapic_hw_disabled(vlapic)) { 1472 return (0); 1473 } 1474 1475 const uint16_t off = gpa - DEFAULT_APIC_BASE; 1476 /* Ignore writes which are not 32-bits wide and 16-byte aligned */ 1477 if ((off & 0xf) != 0 || size != 4) { 1478 return (0); 1479 } 1480 1481 (void) vlapic_write(vlapic, off, (uint32_t)val); 1482 return (0); 1483 } 1484 1485 /* Should attempts to change the APIC base address be rejected with a #GP? */ 1486 int vlapic_gp_on_addr_change = 1; 1487 1488 static vm_msr_result_t 1489 vlapic_set_apicbase(struct vlapic *vlapic, uint64_t val) 1490 { 1491 const uint64_t diff = vlapic->msr_apicbase ^ val; 1492 1493 /* 1494 * Until the LAPIC emulation for switching between xAPIC and x2APIC 1495 * modes is more polished, it will remain off-limits from being altered 1496 * by the guest. 1497 */ 1498 const uint64_t reserved_bits = APICBASE_RESERVED | APICBASE_X2APIC | 1499 APICBASE_BSP; 1500 if ((diff & reserved_bits) != 0) { 1501 return (VMR_GP); 1502 } 1503 1504 /* We do not presently allow the LAPIC access address to be modified. */ 1505 if ((diff & APICBASE_ADDR_MASK) != 0) { 1506 /* 1507 * Explicitly rebuffing such requests with a #GP is the most 1508 * straightforward way to handle the situation, but certain 1509 * consumers (such as the KVM unit tests) may balk at the 1510 * otherwise unexpected exception. 1511 */ 1512 if (vlapic_gp_on_addr_change) { 1513 return (VMR_GP); 1514 } 1515 1516 /* If silence is required, just ignore the address change. */ 1517 val = (val & ~APICBASE_ADDR_MASK) | DEFAULT_APIC_BASE; 1518 } 1519 1520 vlapic->msr_apicbase = val; 1521 return (VMR_OK); 1522 } 1523 1524 static __inline uint16_t 1525 vlapic_msr_to_regoff(uint32_t msr) 1526 { 1527 ASSERT3U(msr, >=, MSR_APIC_000); 1528 ASSERT3U(msr, <, (MSR_APIC_000 + 0x100)); 1529 1530 return ((msr - MSR_APIC_000) << 4); 1531 } 1532 1533 bool 1534 vlapic_owned_msr(uint32_t msr) 1535 { 1536 if (msr == MSR_APICBASE) { 1537 return (true); 1538 } 1539 if (msr >= MSR_APIC_000 && 1540 msr < (MSR_APIC_000 + 0x100)) { 1541 return (true); 1542 } 1543 return (false); 1544 } 1545 1546 vm_msr_result_t 1547 vlapic_rdmsr(struct vlapic *vlapic, uint32_t msr, uint64_t *valp) 1548 { 1549 ASSERT(vlapic_owned_msr(msr)); 1550 ASSERT3P(valp, !=, NULL); 1551 1552 if (msr == MSR_APICBASE) { 1553 *valp = vlapic->msr_apicbase; 1554 return (VMR_OK); 1555 } 1556 1557 /* #GP for x2APIC MSR accesses in xAPIC mode */ 1558 if (!vlapic_x2mode(vlapic)) { 1559 return (VMR_GP); 1560 } 1561 1562 uint64_t out = 0; 1563 const uint16_t reg = vlapic_msr_to_regoff(msr); 1564 switch (reg) { 1565 case APIC_OFFSET_ICR_LOW: { 1566 /* Read from ICR register gets entire (64-bit) value */ 1567 uint32_t low = 0, high = 0; 1568 bool valid; 1569 1570 valid = vlapic_read(vlapic, APIC_OFFSET_ICR_HI, &high); 1571 VERIFY(valid); 1572 valid = vlapic_read(vlapic, APIC_OFFSET_ICR_LOW, &low); 1573 VERIFY(valid); 1574 1575 *valp = ((uint64_t)high << 32) | low; 1576 return (VMR_OK); 1577 } 1578 case APIC_OFFSET_ICR_HI: 1579 /* Already covered by ICR_LOW */ 1580 return (VMR_GP); 1581 default: 1582 break; 1583 } 1584 if (!vlapic_read(vlapic, reg, (uint32_t *)&out)) { 1585 return (VMR_GP); 1586 } 1587 *valp = out; 1588 return (VMR_OK); 1589 } 1590 1591 vm_msr_result_t 1592 vlapic_wrmsr(struct vlapic *vlapic, uint32_t msr, uint64_t val) 1593 { 1594 ASSERT(vlapic_owned_msr(msr)); 1595 1596 if (msr == MSR_APICBASE) { 1597 return (vlapic_set_apicbase(vlapic, val)); 1598 } 1599 1600 /* #GP for x2APIC MSR accesses in xAPIC mode */ 1601 if (!vlapic_x2mode(vlapic)) { 1602 return (VMR_GP); 1603 } 1604 1605 const uint16_t reg = vlapic_msr_to_regoff(msr); 1606 switch (reg) { 1607 case APIC_OFFSET_ICR_LOW: { 1608 /* Write to ICR register sets entire (64-bit) value */ 1609 bool valid; 1610 1611 valid = vlapic_write(vlapic, APIC_OFFSET_ICR_HI, val >> 32); 1612 VERIFY(valid); 1613 valid = vlapic_write(vlapic, APIC_OFFSET_ICR_LOW, val); 1614 VERIFY(valid); 1615 return (VMR_OK); 1616 } 1617 case APIC_OFFSET_ICR_HI: 1618 /* Already covered by ICR_LOW */ 1619 return (VMR_GP); 1620 case APIC_OFFSET_ESR: 1621 /* Only 0 may be written from x2APIC mode */ 1622 if (val != 0) { 1623 return (VMR_GP); 1624 } 1625 break; 1626 default: 1627 break; 1628 } 1629 if (!vlapic_write(vlapic, reg, val)) { 1630 return (VMR_GP); 1631 } 1632 return (VMR_OK); 1633 } 1634 1635 void 1636 vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 1637 { 1638 struct vlapic *vlapic; 1639 struct LAPIC *lapic; 1640 1641 vlapic = vm_lapic(vm, vcpuid); 1642 1643 if (state == X2APIC_DISABLED) 1644 vlapic->msr_apicbase &= ~APICBASE_X2APIC; 1645 else 1646 vlapic->msr_apicbase |= APICBASE_X2APIC; 1647 1648 /* 1649 * Reset the local APIC registers whose values are mode-dependent. 1650 * 1651 * XXX this works because the APIC mode can be changed only at vcpu 1652 * initialization time. 1653 */ 1654 lapic = vlapic->apic_page; 1655 lapic->id = vlapic_get_id(vlapic); 1656 if (vlapic_x2mode(vlapic)) { 1657 lapic->ldr = x2apic_ldr(vlapic); 1658 lapic->dfr = 0; 1659 } else { 1660 lapic->ldr = 0; 1661 lapic->dfr = 0xffffffff; 1662 } 1663 1664 if (state == X2APIC_ENABLED) { 1665 if (vlapic->ops.enable_x2apic_mode) 1666 (*vlapic->ops.enable_x2apic_mode)(vlapic); 1667 } 1668 } 1669 1670 void 1671 vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys, 1672 int delmode, int vec) 1673 { 1674 bool lowprio; 1675 int vcpuid; 1676 cpuset_t dmask; 1677 1678 if (delmode != IOART_DELFIXED && 1679 delmode != IOART_DELLOPRI && 1680 delmode != IOART_DELEXINT) { 1681 /* Invalid delivery mode */ 1682 return; 1683 } 1684 lowprio = (delmode == IOART_DELLOPRI); 1685 1686 /* 1687 * We don't provide any virtual interrupt redirection hardware so 1688 * all interrupts originating from the ioapic or MSI specify the 1689 * 'dest' in the legacy xAPIC format. 1690 */ 1691 vlapic_calcdest(vm, &dmask, dest, phys, lowprio, false); 1692 1693 while ((vcpuid = CPU_FFS(&dmask)) != 0) { 1694 vcpuid--; 1695 CPU_CLR(vcpuid, &dmask); 1696 if (delmode == IOART_DELEXINT) { 1697 (void) vm_inject_extint(vm, vcpuid); 1698 } else { 1699 (void) lapic_set_intr(vm, vcpuid, vec, level); 1700 } 1701 } 1702 } 1703 1704 void 1705 vlapic_post_intr(struct vlapic *vlapic, int hostcpu) 1706 { 1707 /* 1708 * Post an interrupt to the vcpu currently running on 'hostcpu'. 1709 * 1710 * This is done by leveraging features like Posted Interrupts (Intel) 1711 * Doorbell MSR (AMD AVIC) that avoid a VM exit. 1712 * 1713 * If neither of these features are available then fallback to 1714 * sending an IPI to 'hostcpu'. 1715 */ 1716 if (vlapic->ops.post_intr) 1717 (*vlapic->ops.post_intr)(vlapic, hostcpu); 1718 else 1719 poke_cpu(hostcpu); 1720 } 1721 1722 void 1723 vlapic_localize_resources(struct vlapic *vlapic) 1724 { 1725 vmm_glue_callout_localize(&vlapic->callout); 1726 } 1727 1728 void 1729 vlapic_pause(struct vlapic *vlapic) 1730 { 1731 VLAPIC_TIMER_LOCK(vlapic); 1732 callout_stop(&vlapic->callout); 1733 VLAPIC_TIMER_UNLOCK(vlapic); 1734 1735 } 1736 1737 void 1738 vlapic_resume(struct vlapic *vlapic) 1739 { 1740 VLAPIC_TIMER_LOCK(vlapic); 1741 if (vlapic->timer_fire_when != 0) { 1742 vlapic_callout_reset(vlapic); 1743 } 1744 VLAPIC_TIMER_UNLOCK(vlapic); 1745 } 1746 1747 static int 1748 vlapic_data_read(void *datap, const vmm_data_req_t *req) 1749 { 1750 VERIFY3U(req->vdr_class, ==, VDC_LAPIC); 1751 VERIFY3U(req->vdr_version, ==, 1); 1752 VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_lapic_v1)); 1753 1754 struct vlapic *vlapic = datap; 1755 struct vdi_lapic_v1 *out = req->vdr_data; 1756 1757 VLAPIC_TIMER_LOCK(vlapic); 1758 1759 if (vlapic->ops.sync_state) { 1760 (*vlapic->ops.sync_state)(vlapic); 1761 } 1762 1763 out->vl_msr_apicbase = vlapic->msr_apicbase; 1764 out->vl_esr_pending = vlapic->esr_pending; 1765 if (vlapic->timer_fire_when != 0) { 1766 out->vl_timer_target = 1767 vm_normalize_hrtime(vlapic->vm, vlapic->timer_fire_when); 1768 } else { 1769 out->vl_timer_target = 0; 1770 } 1771 1772 const struct LAPIC *lapic = vlapic->apic_page; 1773 struct vdi_lapic_page_v1 *out_page = &out->vl_lapic; 1774 1775 /* 1776 * While this might appear, at first glance, to be missing some fields, 1777 * they are intentionally omitted: 1778 * - PPR: its contents are always generated at runtime 1779 * - EOI: write-only, and contents are ignored after handling 1780 * - RRD: (aka RRR) read-only and always 0 1781 * - CCR: calculated from underlying timer data 1782 */ 1783 out_page->vlp_id = lapic->id; 1784 out_page->vlp_version = lapic->version; 1785 out_page->vlp_tpr = lapic->tpr; 1786 out_page->vlp_apr = lapic->apr; 1787 out_page->vlp_ldr = lapic->ldr; 1788 out_page->vlp_dfr = lapic->dfr; 1789 out_page->vlp_svr = lapic->svr; 1790 out_page->vlp_esr = lapic->esr; 1791 out_page->vlp_icr = ((uint64_t)lapic->icr_hi << 32) | lapic->icr_lo; 1792 out_page->vlp_icr_timer = lapic->icr_timer; 1793 out_page->vlp_dcr_timer = lapic->dcr_timer; 1794 1795 out_page->vlp_lvt_cmci = lapic->lvt_cmci; 1796 out_page->vlp_lvt_timer = lapic->lvt_timer; 1797 out_page->vlp_lvt_thermal = lapic->lvt_thermal; 1798 out_page->vlp_lvt_pcint = lapic->lvt_pcint; 1799 out_page->vlp_lvt_lint0 = lapic->lvt_lint0; 1800 out_page->vlp_lvt_lint1 = lapic->lvt_lint1; 1801 out_page->vlp_lvt_error = lapic->lvt_error; 1802 1803 const uint32_t *isrptr = &lapic->isr0; 1804 const uint32_t *tmrptr = &lapic->tmr0; 1805 const uint32_t *irrptr = &lapic->irr0; 1806 for (uint_t i = 0; i < 8; i++) { 1807 out_page->vlp_isr[i] = isrptr[i * 4]; 1808 out_page->vlp_tmr[i] = tmrptr[i * 4]; 1809 out_page->vlp_irr[i] = irrptr[i * 4]; 1810 } 1811 VLAPIC_TIMER_UNLOCK(vlapic); 1812 1813 return (0); 1814 } 1815 1816 static uint8_t 1817 popc8(uint8_t val) 1818 { 1819 uint8_t cnt; 1820 1821 for (cnt = 0; val != 0; val &= (val - 1)) { 1822 cnt++; 1823 } 1824 return (cnt); 1825 } 1826 1827 /* 1828 * Descriptions for the various failures which can occur when validating 1829 * to-be-written vlapic state. 1830 */ 1831 enum vlapic_validation_error { 1832 VVE_OK, 1833 VVE_BAD_ID, 1834 VVE_BAD_VERSION, 1835 VVE_BAD_MSR_BASE, 1836 VVE_BAD_ESR, 1837 VVE_BAD_TPR, 1838 VVE_LOW_VECTOR, 1839 VVE_ISR_PRIORITY, 1840 }; 1841 1842 static enum vlapic_validation_error 1843 vlapic_data_validate(const struct vlapic *vlapic, const vmm_data_req_t *req) 1844 { 1845 ASSERT(req->vdr_version == 1 && 1846 req->vdr_len >= sizeof (struct vdi_lapic_v1)); 1847 const struct vdi_lapic_v1 *src = req->vdr_data; 1848 1849 if ((src->vl_esr_pending & ~APIC_VALID_MASK_ESR) != 0 || 1850 (src->vl_lapic.vlp_esr & ~APIC_VALID_MASK_ESR) != 0) { 1851 return (VVE_BAD_ESR); 1852 } 1853 1854 /* Use the same restrictions as the wrmsr accessor for now */ 1855 const uint64_t apicbase_reserved = APICBASE_RESERVED | APICBASE_X2APIC | 1856 APICBASE_BSP; 1857 const uint64_t diff = src->vl_msr_apicbase ^ vlapic->msr_apicbase; 1858 if ((diff & apicbase_reserved) != 0) { 1859 return (VVE_BAD_MSR_BASE); 1860 } 1861 1862 const struct vdi_lapic_page_v1 *page = &src->vl_lapic; 1863 /* 1864 * Demand that ID match for now. This can be further updated when some 1865 * of the x2apic handling is improved. 1866 */ 1867 if (page->vlp_id != vlapic_get_id(vlapic)) { 1868 return (VVE_BAD_ID); 1869 } 1870 1871 if (page->vlp_version != vlapic->apic_page->version) { 1872 return (VVE_BAD_VERSION); 1873 } 1874 1875 if (page->vlp_tpr > 0xff) { 1876 return (VVE_BAD_TPR); 1877 } 1878 1879 /* Vectors 0-15 are not expected to be handled by the lapic */ 1880 if ((page->vlp_isr[0] & 0xffff) != 0 || 1881 (page->vlp_irr[0] & 0xffff) != 0 || 1882 (page->vlp_tmr[0] & 0xffff) != 0) { 1883 return (VVE_LOW_VECTOR); 1884 } 1885 1886 /* Only one interrupt should be in-service for each priority level */ 1887 for (uint_t i = 0; i < 8; i++) { 1888 if (popc8((uint8_t)page->vlp_isr[i]) > 1 || 1889 popc8((uint8_t)(page->vlp_isr[i] >> 8)) > 1 || 1890 popc8((uint8_t)(page->vlp_isr[i] >> 16)) > 1 || 1891 popc8((uint8_t)(page->vlp_isr[i] >> 24)) > 1) { 1892 return (VVE_ISR_PRIORITY); 1893 } 1894 } 1895 1896 return (VVE_OK); 1897 } 1898 1899 static int 1900 vlapic_data_write(void *datap, const vmm_data_req_t *req) 1901 { 1902 VERIFY3U(req->vdr_class, ==, VDC_LAPIC); 1903 VERIFY3U(req->vdr_version, ==, 1); 1904 VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_lapic_v1)); 1905 1906 struct vlapic *vlapic = datap; 1907 if (vlapic_data_validate(vlapic, req) != VVE_OK) { 1908 return (EINVAL); 1909 } 1910 const struct vdi_lapic_v1 *src = req->vdr_data; 1911 const struct vdi_lapic_page_v1 *page = &src->vl_lapic; 1912 struct LAPIC *lapic = vlapic->apic_page; 1913 1914 VLAPIC_TIMER_LOCK(vlapic); 1915 1916 /* Already ensured by vlapic_data_validate() */ 1917 VERIFY3U(page->vlp_version, ==, lapic->version); 1918 1919 vlapic->msr_apicbase = src->vl_msr_apicbase; 1920 vlapic->esr_pending = src->vl_esr_pending; 1921 1922 lapic->tpr = page->vlp_tpr; 1923 lapic->apr = page->vlp_apr; 1924 lapic->ldr = page->vlp_ldr; 1925 lapic->dfr = page->vlp_dfr; 1926 lapic->svr = page->vlp_svr; 1927 lapic->esr = page->vlp_esr; 1928 lapic->icr_lo = (uint32_t)page->vlp_icr; 1929 lapic->icr_hi = (uint32_t)(page->vlp_icr >> 32); 1930 1931 lapic->icr_timer = page->vlp_icr_timer; 1932 lapic->dcr_timer = page->vlp_dcr_timer; 1933 vlapic_update_divider(vlapic); 1934 1935 /* cleanse LDR/DFR */ 1936 vlapic_ldr_write_handler(vlapic); 1937 vlapic_dfr_write_handler(vlapic); 1938 1939 lapic->lvt_cmci = page->vlp_lvt_cmci; 1940 lapic->lvt_timer = page->vlp_lvt_timer; 1941 lapic->lvt_thermal = page->vlp_lvt_thermal; 1942 lapic->lvt_pcint = page->vlp_lvt_pcint; 1943 lapic->lvt_lint0 = page->vlp_lvt_lint0; 1944 lapic->lvt_lint1 = page->vlp_lvt_lint1; 1945 lapic->lvt_error = page->vlp_lvt_error; 1946 /* cleanse LVTs */ 1947 vlapic_refresh_lvts(vlapic); 1948 1949 uint32_t *isrptr = &lapic->isr0; 1950 uint32_t *tmrptr = &lapic->tmr0; 1951 uint32_t *irrptr = &lapic->irr0; 1952 for (uint_t i = 0; i < 8; i++) { 1953 isrptr[i * 4] = page->vlp_isr[i]; 1954 tmrptr[i * 4] = page->vlp_tmr[i]; 1955 irrptr[i * 4] = page->vlp_irr[i]; 1956 } 1957 1958 if (src->vl_timer_target != 0) { 1959 vlapic->timer_fire_when = 1960 vm_denormalize_hrtime(vlapic->vm, src->vl_timer_target); 1961 1962 /* 1963 * Check to see if timer expiration would result computed CCR 1964 * values in excess of what is configured in ICR/DCR. 1965 */ 1966 const hrtime_t now = gethrtime(); 1967 if (vlapic->timer_fire_when > now) { 1968 const uint32_t ccr = hrt_freq_count( 1969 vlapic->timer_fire_when - now, 1970 vlapic->timer_cur_freq); 1971 1972 /* 1973 * Until we have a richer event/logging system 1974 * available, just note such an overage as a stat. 1975 */ 1976 if (ccr > lapic->icr_timer) { 1977 vlapic->stats.vs_import_timer_overage++; 1978 } 1979 } 1980 1981 if (!vm_is_paused(vlapic->vm)) { 1982 vlapic_callout_reset(vlapic); 1983 } 1984 } else { 1985 vlapic->timer_fire_when = 0; 1986 } 1987 1988 if (vlapic->ops.sync_state) { 1989 (*vlapic->ops.sync_state)(vlapic); 1990 } 1991 VLAPIC_TIMER_UNLOCK(vlapic); 1992 1993 return (0); 1994 } 1995 1996 static const vmm_data_version_entry_t lapic_v1 = { 1997 .vdve_class = VDC_LAPIC, 1998 .vdve_version = 1, 1999 .vdve_len_expect = sizeof (struct vdi_lapic_v1), 2000 .vdve_readf = vlapic_data_read, 2001 .vdve_writef = vlapic_data_write, 2002 }; 2003 VMM_DATA_VERSION(lapic_v1); 2004