1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/lock.h> 34 #include <sys/kernel.h> 35 #include <sys/malloc.h> 36 #include <sys/mutex.h> 37 #include <sys/systm.h> 38 #include <sys/smp.h> 39 40 #include <x86/specialreg.h> 41 #include <x86/apicreg.h> 42 43 #include <machine/clock.h> 44 #include <machine/smp.h> 45 46 #include <machine/vmm.h> 47 48 #include "vmm_ipi.h" 49 #include "vmm_lapic.h" 50 #include "vmm_ktr.h" 51 #include "vmm_stat.h" 52 53 #include "vlapic.h" 54 #include "vlapic_priv.h" 55 #include "vioapic.h" 56 57 #define PRIO(x) ((x) >> 4) 58 59 #define VLAPIC_VERSION (16) 60 61 #define x2apic(vlapic) (((vlapic)->msr_apicbase & APICBASE_X2APIC) ? 1 : 0) 62 63 /* 64 * The 'vlapic->timer_mtx' is used to provide mutual exclusion between the 65 * vlapic_callout_handler() and vcpu accesses to: 66 * - timer_freq_bt, timer_period_bt, timer_fire_bt 67 * - timer LVT register 68 */ 69 #define VLAPIC_TIMER_LOCK(vlapic) mtx_lock_spin(&((vlapic)->timer_mtx)) 70 #define VLAPIC_TIMER_UNLOCK(vlapic) mtx_unlock_spin(&((vlapic)->timer_mtx)) 71 #define VLAPIC_TIMER_LOCKED(vlapic) mtx_owned(&((vlapic)->timer_mtx)) 72 73 #define VLAPIC_BUS_FREQ tsc_freq 74 75 static __inline uint32_t 76 vlapic_get_id(struct vlapic *vlapic) 77 { 78 79 if (x2apic(vlapic)) 80 return (vlapic->vcpuid); 81 else 82 return (vlapic->vcpuid << 24); 83 } 84 85 static uint32_t 86 x2apic_ldr(struct vlapic *vlapic) 87 { 88 int apicid; 89 uint32_t ldr; 90 91 apicid = vlapic_get_id(vlapic); 92 ldr = 1 << (apicid & 0xf); 93 ldr |= (apicid & 0xffff0) << 12; 94 return (ldr); 95 } 96 97 void 98 vlapic_dfr_write_handler(struct vlapic *vlapic) 99 { 100 struct LAPIC *lapic; 101 102 lapic = vlapic->apic_page; 103 if (x2apic(vlapic)) { 104 VM_CTR1(vlapic->vm, "ignoring write to DFR in x2apic mode: %#x", 105 lapic->dfr); 106 lapic->dfr = 0; 107 return; 108 } 109 110 lapic->dfr &= APIC_DFR_MODEL_MASK; 111 lapic->dfr |= APIC_DFR_RESERVED; 112 113 if ((lapic->dfr & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_FLAT) 114 VLAPIC_CTR0(vlapic, "vlapic DFR in Flat Model"); 115 else if ((lapic->dfr & APIC_DFR_MODEL_MASK) == APIC_DFR_MODEL_CLUSTER) 116 VLAPIC_CTR0(vlapic, "vlapic DFR in Cluster Model"); 117 else 118 VLAPIC_CTR1(vlapic, "DFR in Unknown Model %#x", lapic->dfr); 119 } 120 121 void 122 vlapic_ldr_write_handler(struct vlapic *vlapic) 123 { 124 struct LAPIC *lapic; 125 126 lapic = vlapic->apic_page; 127 128 /* LDR is read-only in x2apic mode */ 129 if (x2apic(vlapic)) { 130 VLAPIC_CTR1(vlapic, "ignoring write to LDR in x2apic mode: %#x", 131 lapic->ldr); 132 lapic->ldr = x2apic_ldr(vlapic); 133 } else { 134 lapic->ldr &= ~APIC_LDR_RESERVED; 135 VLAPIC_CTR1(vlapic, "vlapic LDR set to %#x", lapic->ldr); 136 } 137 } 138 139 void 140 vlapic_id_write_handler(struct vlapic *vlapic) 141 { 142 struct LAPIC *lapic; 143 144 /* 145 * We don't allow the ID register to be modified so reset it back to 146 * its default value. 147 */ 148 lapic = vlapic->apic_page; 149 lapic->id = vlapic_get_id(vlapic); 150 } 151 152 static int 153 vlapic_timer_divisor(uint32_t dcr) 154 { 155 switch (dcr & 0xB) { 156 case APIC_TDCR_1: 157 return (1); 158 case APIC_TDCR_2: 159 return (2); 160 case APIC_TDCR_4: 161 return (4); 162 case APIC_TDCR_8: 163 return (8); 164 case APIC_TDCR_16: 165 return (16); 166 case APIC_TDCR_32: 167 return (32); 168 case APIC_TDCR_64: 169 return (64); 170 case APIC_TDCR_128: 171 return (128); 172 default: 173 panic("vlapic_timer_divisor: invalid dcr 0x%08x", dcr); 174 } 175 } 176 177 #if 0 178 static inline void 179 vlapic_dump_lvt(uint32_t offset, uint32_t *lvt) 180 { 181 printf("Offset %x: lvt %08x (V:%02x DS:%x M:%x)\n", offset, 182 *lvt, *lvt & APIC_LVTT_VECTOR, *lvt & APIC_LVTT_DS, 183 *lvt & APIC_LVTT_M); 184 } 185 #endif 186 187 static uint32_t 188 vlapic_get_ccr(struct vlapic *vlapic) 189 { 190 struct bintime bt_now, bt_rem; 191 struct LAPIC *lapic; 192 uint32_t ccr; 193 194 ccr = 0; 195 lapic = vlapic->apic_page; 196 197 VLAPIC_TIMER_LOCK(vlapic); 198 if (callout_active(&vlapic->callout)) { 199 /* 200 * If the timer is scheduled to expire in the future then 201 * compute the value of 'ccr' based on the remaining time. 202 */ 203 binuptime(&bt_now); 204 if (bintime_cmp(&vlapic->timer_fire_bt, &bt_now, >)) { 205 bt_rem = vlapic->timer_fire_bt; 206 bintime_sub(&bt_rem, &bt_now); 207 ccr += bt_rem.sec * BT2FREQ(&vlapic->timer_freq_bt); 208 ccr += bt_rem.frac / vlapic->timer_freq_bt.frac; 209 } 210 } 211 KASSERT(ccr <= lapic->icr_timer, ("vlapic_get_ccr: invalid ccr %#x, " 212 "icr_timer is %#x", ccr, lapic->icr_timer)); 213 VLAPIC_CTR2(vlapic, "vlapic ccr_timer = %#x, icr_timer = %#x", 214 ccr, lapic->icr_timer); 215 VLAPIC_TIMER_UNLOCK(vlapic); 216 return (ccr); 217 } 218 219 void 220 vlapic_dcr_write_handler(struct vlapic *vlapic) 221 { 222 struct LAPIC *lapic; 223 int divisor; 224 225 lapic = vlapic->apic_page; 226 VLAPIC_TIMER_LOCK(vlapic); 227 228 divisor = vlapic_timer_divisor(lapic->dcr_timer); 229 VLAPIC_CTR2(vlapic, "vlapic dcr_timer=%#x, divisor=%d", 230 lapic->dcr_timer, divisor); 231 232 /* 233 * Update the timer frequency and the timer period. 234 * 235 * XXX changes to the frequency divider will not take effect until 236 * the timer is reloaded. 237 */ 238 FREQ2BT(VLAPIC_BUS_FREQ / divisor, &vlapic->timer_freq_bt); 239 vlapic->timer_period_bt = vlapic->timer_freq_bt; 240 bintime_mul(&vlapic->timer_period_bt, lapic->icr_timer); 241 242 VLAPIC_TIMER_UNLOCK(vlapic); 243 } 244 245 void 246 vlapic_esr_write_handler(struct vlapic *vlapic) 247 { 248 struct LAPIC *lapic; 249 250 lapic = vlapic->apic_page; 251 lapic->esr = vlapic->esr_pending; 252 vlapic->esr_pending = 0; 253 } 254 255 int 256 vlapic_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 257 { 258 struct LAPIC *lapic; 259 uint32_t *irrptr, *tmrptr, mask; 260 int idx; 261 262 KASSERT(vector >= 0 && vector < 256, ("invalid vector %d", vector)); 263 264 lapic = vlapic->apic_page; 265 if (!(lapic->svr & APIC_SVR_ENABLE)) { 266 VLAPIC_CTR1(vlapic, "vlapic is software disabled, ignoring " 267 "interrupt %d", vector); 268 return (0); 269 } 270 271 if (vector < 16) { 272 vlapic_set_error(vlapic, APIC_ESR_RECEIVE_ILLEGAL_VECTOR); 273 VLAPIC_CTR1(vlapic, "vlapic ignoring interrupt to vector %d", 274 vector); 275 return (1); 276 } 277 278 if (vlapic->ops.set_intr_ready) 279 return ((*vlapic->ops.set_intr_ready)(vlapic, vector, level)); 280 281 idx = (vector / 32) * 4; 282 mask = 1 << (vector % 32); 283 284 irrptr = &lapic->irr0; 285 atomic_set_int(&irrptr[idx], mask); 286 287 /* 288 * Upon acceptance of an interrupt into the IRR the corresponding 289 * TMR bit is cleared for edge-triggered interrupts and set for 290 * level-triggered interrupts. 291 */ 292 tmrptr = &lapic->tmr0; 293 if (level) 294 atomic_set_int(&tmrptr[idx], mask); 295 else 296 atomic_clear_int(&tmrptr[idx], mask); 297 298 VLAPIC_CTR_IRR(vlapic, "vlapic_set_intr_ready"); 299 return (1); 300 } 301 302 static __inline uint32_t * 303 vlapic_get_lvtptr(struct vlapic *vlapic, uint32_t offset) 304 { 305 struct LAPIC *lapic = vlapic->apic_page; 306 int i; 307 308 switch (offset) { 309 case APIC_OFFSET_CMCI_LVT: 310 return (&lapic->lvt_cmci); 311 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 312 i = (offset - APIC_OFFSET_TIMER_LVT) >> 2; 313 return ((&lapic->lvt_timer) + i);; 314 default: 315 panic("vlapic_get_lvt: invalid LVT\n"); 316 } 317 } 318 319 static __inline int 320 lvt_off_to_idx(uint32_t offset) 321 { 322 int index; 323 324 switch (offset) { 325 case APIC_OFFSET_CMCI_LVT: 326 index = APIC_LVT_CMCI; 327 break; 328 case APIC_OFFSET_TIMER_LVT: 329 index = APIC_LVT_TIMER; 330 break; 331 case APIC_OFFSET_THERM_LVT: 332 index = APIC_LVT_THERMAL; 333 break; 334 case APIC_OFFSET_PERF_LVT: 335 index = APIC_LVT_PMC; 336 break; 337 case APIC_OFFSET_LINT0_LVT: 338 index = APIC_LVT_LINT0; 339 break; 340 case APIC_OFFSET_LINT1_LVT: 341 index = APIC_LVT_LINT1; 342 break; 343 case APIC_OFFSET_ERROR_LVT: 344 index = APIC_LVT_ERROR; 345 break; 346 default: 347 index = -1; 348 break; 349 } 350 KASSERT(index >= 0 && index <= VLAPIC_MAXLVT_INDEX, ("lvt_off_to_idx: " 351 "invalid lvt index %d for offset %#x", index, offset)); 352 353 return (index); 354 } 355 356 static __inline uint32_t 357 vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset) 358 { 359 int idx; 360 uint32_t val; 361 362 idx = lvt_off_to_idx(offset); 363 val = atomic_load_acq_32(&vlapic->lvt_last[idx]); 364 return (val); 365 } 366 367 void 368 vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset) 369 { 370 uint32_t *lvtptr, mask, val; 371 struct LAPIC *lapic; 372 int idx; 373 374 lapic = vlapic->apic_page; 375 lvtptr = vlapic_get_lvtptr(vlapic, offset); 376 val = *lvtptr; 377 idx = lvt_off_to_idx(offset); 378 379 if (!(lapic->svr & APIC_SVR_ENABLE)) 380 val |= APIC_LVT_M; 381 mask = APIC_LVT_M | APIC_LVT_DS | APIC_LVT_VECTOR; 382 switch (offset) { 383 case APIC_OFFSET_TIMER_LVT: 384 mask |= APIC_LVTT_TM; 385 break; 386 case APIC_OFFSET_ERROR_LVT: 387 break; 388 case APIC_OFFSET_LINT0_LVT: 389 case APIC_OFFSET_LINT1_LVT: 390 mask |= APIC_LVT_TM | APIC_LVT_RIRR | APIC_LVT_IIPP; 391 /* FALLTHROUGH */ 392 default: 393 mask |= APIC_LVT_DM; 394 break; 395 } 396 val &= mask; 397 *lvtptr = val; 398 atomic_store_rel_32(&vlapic->lvt_last[idx], val); 399 } 400 401 static void 402 vlapic_mask_lvts(struct vlapic *vlapic) 403 { 404 struct LAPIC *lapic = vlapic->apic_page; 405 406 lapic->lvt_cmci |= APIC_LVT_M; 407 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_CMCI_LVT); 408 409 lapic->lvt_timer |= APIC_LVT_M; 410 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_TIMER_LVT); 411 412 lapic->lvt_thermal |= APIC_LVT_M; 413 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_THERM_LVT); 414 415 lapic->lvt_pcint |= APIC_LVT_M; 416 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_PERF_LVT); 417 418 lapic->lvt_lint0 |= APIC_LVT_M; 419 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_LINT0_LVT); 420 421 lapic->lvt_lint1 |= APIC_LVT_M; 422 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_LINT1_LVT); 423 424 lapic->lvt_error |= APIC_LVT_M; 425 vlapic_lvt_write_handler(vlapic, APIC_OFFSET_ERROR_LVT); 426 } 427 428 static int 429 vlapic_fire_lvt(struct vlapic *vlapic, uint32_t lvt) 430 { 431 uint32_t vec, mode; 432 433 if (lvt & APIC_LVT_M) 434 return (0); 435 436 vec = lvt & APIC_LVT_VECTOR; 437 mode = lvt & APIC_LVT_DM; 438 439 switch (mode) { 440 case APIC_LVT_DM_FIXED: 441 if (vec < 16) { 442 vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR); 443 return (0); 444 } 445 if (vlapic_set_intr_ready(vlapic, vec, false)) 446 vcpu_notify_event(vlapic->vm, vlapic->vcpuid, true); 447 break; 448 case APIC_LVT_DM_NMI: 449 vm_inject_nmi(vlapic->vm, vlapic->vcpuid); 450 break; 451 default: 452 // Other modes ignored 453 return (0); 454 } 455 return (1); 456 } 457 458 #if 1 459 static void 460 dump_isrvec_stk(struct vlapic *vlapic) 461 { 462 int i; 463 uint32_t *isrptr; 464 465 isrptr = &vlapic->apic_page->isr0; 466 for (i = 0; i < 8; i++) 467 printf("ISR%d 0x%08x\n", i, isrptr[i * 4]); 468 469 for (i = 0; i <= vlapic->isrvec_stk_top; i++) 470 printf("isrvec_stk[%d] = %d\n", i, vlapic->isrvec_stk[i]); 471 } 472 #endif 473 474 /* 475 * Algorithm adopted from section "Interrupt, Task and Processor Priority" 476 * in Intel Architecture Manual Vol 3a. 477 */ 478 static void 479 vlapic_update_ppr(struct vlapic *vlapic) 480 { 481 int isrvec, tpr, ppr; 482 483 /* 484 * Note that the value on the stack at index 0 is always 0. 485 * 486 * This is a placeholder for the value of ISRV when none of the 487 * bits is set in the ISRx registers. 488 */ 489 isrvec = vlapic->isrvec_stk[vlapic->isrvec_stk_top]; 490 tpr = vlapic->apic_page->tpr; 491 492 #if 1 493 { 494 int i, lastprio, curprio, vector, idx; 495 uint32_t *isrptr; 496 497 if (vlapic->isrvec_stk_top == 0 && isrvec != 0) 498 panic("isrvec_stk is corrupted: %d", isrvec); 499 500 /* 501 * Make sure that the priority of the nested interrupts is 502 * always increasing. 503 */ 504 lastprio = -1; 505 for (i = 1; i <= vlapic->isrvec_stk_top; i++) { 506 curprio = PRIO(vlapic->isrvec_stk[i]); 507 if (curprio <= lastprio) { 508 dump_isrvec_stk(vlapic); 509 panic("isrvec_stk does not satisfy invariant"); 510 } 511 lastprio = curprio; 512 } 513 514 /* 515 * Make sure that each bit set in the ISRx registers has a 516 * corresponding entry on the isrvec stack. 517 */ 518 i = 1; 519 isrptr = &vlapic->apic_page->isr0; 520 for (vector = 0; vector < 256; vector++) { 521 idx = (vector / 32) * 4; 522 if (isrptr[idx] & (1 << (vector % 32))) { 523 if (i > vlapic->isrvec_stk_top || 524 vlapic->isrvec_stk[i] != vector) { 525 dump_isrvec_stk(vlapic); 526 panic("ISR and isrvec_stk out of sync"); 527 } 528 i++; 529 } 530 } 531 } 532 #endif 533 534 if (PRIO(tpr) >= PRIO(isrvec)) 535 ppr = tpr; 536 else 537 ppr = isrvec & 0xf0; 538 539 vlapic->apic_page->ppr = ppr; 540 VLAPIC_CTR1(vlapic, "vlapic_update_ppr 0x%02x", ppr); 541 } 542 543 static void 544 vlapic_process_eoi(struct vlapic *vlapic) 545 { 546 struct LAPIC *lapic = vlapic->apic_page; 547 uint32_t *isrptr, *tmrptr; 548 int i, idx, bitpos, vector; 549 550 isrptr = &lapic->isr0; 551 tmrptr = &lapic->tmr0; 552 553 /* 554 * The x86 architecture reserves the the first 32 vectors for use 555 * by the processor. 556 */ 557 for (i = 7; i > 0; i--) { 558 idx = i * 4; 559 bitpos = fls(isrptr[idx]); 560 if (bitpos-- != 0) { 561 if (vlapic->isrvec_stk_top <= 0) { 562 panic("invalid vlapic isrvec_stk_top %d", 563 vlapic->isrvec_stk_top); 564 } 565 isrptr[idx] &= ~(1 << bitpos); 566 VLAPIC_CTR_ISR(vlapic, "vlapic_process_eoi"); 567 vlapic->isrvec_stk_top--; 568 vlapic_update_ppr(vlapic); 569 if ((tmrptr[idx] & (1 << bitpos)) != 0) { 570 vector = i * 32 + bitpos; 571 vioapic_process_eoi(vlapic->vm, vlapic->vcpuid, 572 vector); 573 } 574 return; 575 } 576 } 577 } 578 579 static __inline int 580 vlapic_get_lvt_field(uint32_t lvt, uint32_t mask) 581 { 582 583 return (lvt & mask); 584 } 585 586 static __inline int 587 vlapic_periodic_timer(struct vlapic *vlapic) 588 { 589 uint32_t lvt; 590 591 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT); 592 593 return (vlapic_get_lvt_field(lvt, APIC_LVTT_TM_PERIODIC)); 594 } 595 596 static VMM_STAT(VLAPIC_INTR_ERROR, "error interrupts generated by vlapic"); 597 598 void 599 vlapic_set_error(struct vlapic *vlapic, uint32_t mask) 600 { 601 uint32_t lvt; 602 603 vlapic->esr_pending |= mask; 604 if (vlapic->esr_firing) 605 return; 606 vlapic->esr_firing = 1; 607 608 // The error LVT always uses the fixed delivery mode. 609 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_ERROR_LVT); 610 if (vlapic_fire_lvt(vlapic, lvt | APIC_LVT_DM_FIXED)) { 611 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_ERROR, 1); 612 } 613 vlapic->esr_firing = 0; 614 } 615 616 static VMM_STAT(VLAPIC_INTR_TIMER, "timer interrupts generated by vlapic"); 617 618 static void 619 vlapic_fire_timer(struct vlapic *vlapic) 620 { 621 uint32_t lvt; 622 623 KASSERT(VLAPIC_TIMER_LOCKED(vlapic), ("vlapic_fire_timer not locked")); 624 625 // The timer LVT always uses the fixed delivery mode. 626 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT); 627 if (vlapic_fire_lvt(vlapic, lvt | APIC_LVT_DM_FIXED)) { 628 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_TIMER, 1); 629 } 630 } 631 632 static VMM_STAT(VLAPIC_INTR_CMC, 633 "corrected machine check interrupts generated by vlapic"); 634 635 void 636 vlapic_fire_cmci(struct vlapic *vlapic) 637 { 638 uint32_t lvt; 639 640 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_CMCI_LVT); 641 if (vlapic_fire_lvt(vlapic, lvt)) { 642 vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_CMC, 1); 643 } 644 } 645 646 static VMM_STAT_ARRAY(LVTS_TRIGGERRED, VLAPIC_MAXLVT_INDEX + 1, 647 "lvts triggered"); 648 649 int 650 vlapic_trigger_lvt(struct vlapic *vlapic, int vector) 651 { 652 uint32_t lvt; 653 654 switch (vector) { 655 case APIC_LVT_LINT0: 656 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_LINT0_LVT); 657 break; 658 case APIC_LVT_LINT1: 659 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_LINT1_LVT); 660 break; 661 case APIC_LVT_TIMER: 662 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_TIMER_LVT); 663 lvt |= APIC_LVT_DM_FIXED; 664 break; 665 case APIC_LVT_ERROR: 666 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_ERROR_LVT); 667 lvt |= APIC_LVT_DM_FIXED; 668 break; 669 case APIC_LVT_PMC: 670 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_PERF_LVT); 671 break; 672 case APIC_LVT_THERMAL: 673 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_THERM_LVT); 674 break; 675 case APIC_LVT_CMCI: 676 lvt = vlapic_get_lvt(vlapic, APIC_OFFSET_CMCI_LVT); 677 break; 678 default: 679 return (EINVAL); 680 } 681 if (vlapic_fire_lvt(vlapic, lvt)) { 682 vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid, 683 LVTS_TRIGGERRED, vector, 1); 684 } 685 return (0); 686 } 687 688 static void 689 vlapic_callout_handler(void *arg) 690 { 691 struct vlapic *vlapic; 692 struct bintime bt, btnow; 693 sbintime_t rem_sbt; 694 695 vlapic = arg; 696 697 VLAPIC_TIMER_LOCK(vlapic); 698 if (callout_pending(&vlapic->callout)) /* callout was reset */ 699 goto done; 700 701 if (!callout_active(&vlapic->callout)) /* callout was stopped */ 702 goto done; 703 704 callout_deactivate(&vlapic->callout); 705 706 vlapic_fire_timer(vlapic); 707 708 if (vlapic_periodic_timer(vlapic)) { 709 binuptime(&btnow); 710 KASSERT(bintime_cmp(&btnow, &vlapic->timer_fire_bt, >=), 711 ("vlapic callout at %#lx.%#lx, expected at %#lx.#%lx", 712 btnow.sec, btnow.frac, vlapic->timer_fire_bt.sec, 713 vlapic->timer_fire_bt.frac)); 714 715 /* 716 * Compute the delta between when the timer was supposed to 717 * fire and the present time. 718 */ 719 bt = btnow; 720 bintime_sub(&bt, &vlapic->timer_fire_bt); 721 722 rem_sbt = bttosbt(vlapic->timer_period_bt); 723 if (bintime_cmp(&bt, &vlapic->timer_period_bt, <)) { 724 /* 725 * Adjust the time until the next countdown downward 726 * to account for the lost time. 727 */ 728 rem_sbt -= bttosbt(bt); 729 } else { 730 /* 731 * If the delta is greater than the timer period then 732 * just reset our time base instead of trying to catch 733 * up. 734 */ 735 vlapic->timer_fire_bt = btnow; 736 VLAPIC_CTR2(vlapic, "vlapic timer lagging by %lu " 737 "usecs, period is %lu usecs - resetting time base", 738 bttosbt(bt) / SBT_1US, 739 bttosbt(vlapic->timer_period_bt) / SBT_1US); 740 } 741 742 bintime_add(&vlapic->timer_fire_bt, &vlapic->timer_period_bt); 743 callout_reset_sbt(&vlapic->callout, rem_sbt, 0, 744 vlapic_callout_handler, vlapic, 0); 745 } 746 done: 747 VLAPIC_TIMER_UNLOCK(vlapic); 748 } 749 750 void 751 vlapic_icrtmr_write_handler(struct vlapic *vlapic) 752 { 753 struct LAPIC *lapic; 754 sbintime_t sbt; 755 uint32_t icr_timer; 756 757 VLAPIC_TIMER_LOCK(vlapic); 758 759 lapic = vlapic->apic_page; 760 icr_timer = lapic->icr_timer; 761 762 vlapic->timer_period_bt = vlapic->timer_freq_bt; 763 bintime_mul(&vlapic->timer_period_bt, icr_timer); 764 765 if (icr_timer != 0) { 766 binuptime(&vlapic->timer_fire_bt); 767 bintime_add(&vlapic->timer_fire_bt, &vlapic->timer_period_bt); 768 769 sbt = bttosbt(vlapic->timer_period_bt); 770 callout_reset_sbt(&vlapic->callout, sbt, 0, 771 vlapic_callout_handler, vlapic, 0); 772 } else 773 callout_stop(&vlapic->callout); 774 775 VLAPIC_TIMER_UNLOCK(vlapic); 776 } 777 778 /* 779 * This function populates 'dmask' with the set of vcpus that match the 780 * addressing specified by the (dest, phys, lowprio) tuple. 781 * 782 * 'x2apic_dest' specifies whether 'dest' is interpreted as x2APIC (32-bit) 783 * or xAPIC (8-bit) destination field. 784 */ 785 static void 786 vlapic_calcdest(struct vm *vm, cpuset_t *dmask, uint32_t dest, bool phys, 787 bool lowprio, bool x2apic_dest) 788 { 789 struct vlapic *vlapic; 790 uint32_t dfr, ldr, ldest, cluster; 791 uint32_t mda_flat_ldest, mda_cluster_ldest, mda_ldest, mda_cluster_id; 792 cpuset_t amask; 793 int vcpuid; 794 795 if ((x2apic_dest && dest == 0xffffffff) || 796 (!x2apic_dest && dest == 0xff)) { 797 /* 798 * Broadcast in both logical and physical modes. 799 */ 800 *dmask = vm_active_cpus(vm); 801 return; 802 } 803 804 if (phys) { 805 /* 806 * Physical mode: destination is APIC ID. 807 */ 808 CPU_ZERO(dmask); 809 vcpuid = vm_apicid2vcpuid(vm, dest); 810 if (vcpuid < VM_MAXCPU) 811 CPU_SET(vcpuid, dmask); 812 } else { 813 /* 814 * In the "Flat Model" the MDA is interpreted as an 8-bit wide 815 * bitmask. This model is only avilable in the xAPIC mode. 816 */ 817 mda_flat_ldest = dest & 0xff; 818 819 /* 820 * In the "Cluster Model" the MDA is used to identify a 821 * specific cluster and a set of APICs in that cluster. 822 */ 823 if (x2apic_dest) { 824 mda_cluster_id = dest >> 16; 825 mda_cluster_ldest = dest & 0xffff; 826 } else { 827 mda_cluster_id = (dest >> 4) & 0xf; 828 mda_cluster_ldest = dest & 0xf; 829 } 830 831 /* 832 * Logical mode: match each APIC that has a bit set 833 * in it's LDR that matches a bit in the ldest. 834 */ 835 CPU_ZERO(dmask); 836 amask = vm_active_cpus(vm); 837 while ((vcpuid = CPU_FFS(&amask)) != 0) { 838 vcpuid--; 839 CPU_CLR(vcpuid, &amask); 840 841 vlapic = vm_lapic(vm, vcpuid); 842 dfr = vlapic->apic_page->dfr; 843 ldr = vlapic->apic_page->ldr; 844 845 if ((dfr & APIC_DFR_MODEL_MASK) == 846 APIC_DFR_MODEL_FLAT) { 847 ldest = ldr >> 24; 848 mda_ldest = mda_flat_ldest; 849 } else if ((dfr & APIC_DFR_MODEL_MASK) == 850 APIC_DFR_MODEL_CLUSTER) { 851 if (x2apic(vlapic)) { 852 cluster = ldr >> 16; 853 ldest = ldr & 0xffff; 854 } else { 855 cluster = ldr >> 28; 856 ldest = (ldr >> 24) & 0xf; 857 } 858 if (cluster != mda_cluster_id) 859 continue; 860 mda_ldest = mda_cluster_ldest; 861 } else { 862 /* 863 * Guest has configured a bad logical 864 * model for this vcpu - skip it. 865 */ 866 VLAPIC_CTR1(vlapic, "vlapic has bad logical " 867 "model %x - cannot deliver interrupt", dfr); 868 continue; 869 } 870 871 if ((mda_ldest & ldest) != 0) { 872 CPU_SET(vcpuid, dmask); 873 if (lowprio) 874 break; 875 } 876 } 877 } 878 } 879 880 static VMM_STAT_ARRAY(IPIS_SENT, VM_MAXCPU, "ipis sent to vcpu"); 881 882 int 883 vlapic_icrlo_write_handler(struct vlapic *vlapic, bool *retu) 884 { 885 int i; 886 bool phys; 887 cpuset_t dmask; 888 uint64_t icrval; 889 uint32_t dest, vec, mode; 890 struct vlapic *vlapic2; 891 struct vm_exit *vmexit; 892 struct LAPIC *lapic; 893 894 lapic = vlapic->apic_page; 895 lapic->icr_lo &= ~APIC_DELSTAT_PEND; 896 icrval = ((uint64_t)lapic->icr_hi << 32) | lapic->icr_lo; 897 898 if (x2apic(vlapic)) 899 dest = icrval >> 32; 900 else 901 dest = icrval >> (32 + 24); 902 vec = icrval & APIC_VECTOR_MASK; 903 mode = icrval & APIC_DELMODE_MASK; 904 905 if (mode == APIC_DELMODE_FIXED && vec < 16) { 906 vlapic_set_error(vlapic, APIC_ESR_SEND_ILLEGAL_VECTOR); 907 VLAPIC_CTR1(vlapic, "Ignoring invalid IPI %d", vec); 908 return (0); 909 } 910 911 VLAPIC_CTR2(vlapic, "icrlo 0x%016lx triggered ipi %d", icrval, vec); 912 913 if (mode == APIC_DELMODE_FIXED || mode == APIC_DELMODE_NMI) { 914 switch (icrval & APIC_DEST_MASK) { 915 case APIC_DEST_DESTFLD: 916 phys = ((icrval & APIC_DESTMODE_LOG) == 0); 917 vlapic_calcdest(vlapic->vm, &dmask, dest, phys, false, 918 x2apic(vlapic)); 919 break; 920 case APIC_DEST_SELF: 921 CPU_SETOF(vlapic->vcpuid, &dmask); 922 break; 923 case APIC_DEST_ALLISELF: 924 dmask = vm_active_cpus(vlapic->vm); 925 break; 926 case APIC_DEST_ALLESELF: 927 dmask = vm_active_cpus(vlapic->vm); 928 CPU_CLR(vlapic->vcpuid, &dmask); 929 break; 930 default: 931 CPU_ZERO(&dmask); /* satisfy gcc */ 932 break; 933 } 934 935 while ((i = CPU_FFS(&dmask)) != 0) { 936 i--; 937 CPU_CLR(i, &dmask); 938 if (mode == APIC_DELMODE_FIXED) { 939 lapic_intr_edge(vlapic->vm, i, vec); 940 vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid, 941 IPIS_SENT, i, 1); 942 VLAPIC_CTR2(vlapic, "vlapic sending ipi %d " 943 "to vcpuid %d", vec, i); 944 } else { 945 vm_inject_nmi(vlapic->vm, i); 946 VLAPIC_CTR1(vlapic, "vlapic sending ipi nmi " 947 "to vcpuid %d", i); 948 } 949 } 950 951 return (0); /* handled completely in the kernel */ 952 } 953 954 if (mode == APIC_DELMODE_INIT) { 955 if ((icrval & APIC_LEVEL_MASK) == APIC_LEVEL_DEASSERT) 956 return (0); 957 958 if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) { 959 vlapic2 = vm_lapic(vlapic->vm, dest); 960 961 /* move from INIT to waiting-for-SIPI state */ 962 if (vlapic2->boot_state == BS_INIT) { 963 vlapic2->boot_state = BS_SIPI; 964 } 965 966 return (0); 967 } 968 } 969 970 if (mode == APIC_DELMODE_STARTUP) { 971 if (vlapic->vcpuid == 0 && dest != 0 && dest < VM_MAXCPU) { 972 vlapic2 = vm_lapic(vlapic->vm, dest); 973 974 /* 975 * Ignore SIPIs in any state other than wait-for-SIPI 976 */ 977 if (vlapic2->boot_state != BS_SIPI) 978 return (0); 979 980 /* 981 * XXX this assumes that the startup IPI always succeeds 982 */ 983 vlapic2->boot_state = BS_RUNNING; 984 vm_activate_cpu(vlapic2->vm, dest); 985 986 *retu = true; 987 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); 988 vmexit->exitcode = VM_EXITCODE_SPINUP_AP; 989 vmexit->u.spinup_ap.vcpu = dest; 990 vmexit->u.spinup_ap.rip = vec << PAGE_SHIFT; 991 992 return (0); 993 } 994 } 995 996 /* 997 * This will cause a return to userland. 998 */ 999 return (1); 1000 } 1001 1002 int 1003 vlapic_pending_intr(struct vlapic *vlapic, int *vecptr) 1004 { 1005 struct LAPIC *lapic = vlapic->apic_page; 1006 int idx, i, bitpos, vector; 1007 uint32_t *irrptr, val; 1008 1009 if (vlapic->ops.pending_intr) 1010 return ((*vlapic->ops.pending_intr)(vlapic, vecptr)); 1011 1012 irrptr = &lapic->irr0; 1013 1014 /* 1015 * The x86 architecture reserves the the first 32 vectors for use 1016 * by the processor. 1017 */ 1018 for (i = 7; i > 0; i--) { 1019 idx = i * 4; 1020 val = atomic_load_acq_int(&irrptr[idx]); 1021 bitpos = fls(val); 1022 if (bitpos != 0) { 1023 vector = i * 32 + (bitpos - 1); 1024 if (PRIO(vector) > PRIO(lapic->ppr)) { 1025 VLAPIC_CTR1(vlapic, "pending intr %d", vector); 1026 if (vecptr != NULL) 1027 *vecptr = vector; 1028 return (1); 1029 } else 1030 break; 1031 } 1032 } 1033 return (0); 1034 } 1035 1036 void 1037 vlapic_intr_accepted(struct vlapic *vlapic, int vector) 1038 { 1039 struct LAPIC *lapic = vlapic->apic_page; 1040 uint32_t *irrptr, *isrptr; 1041 int idx, stk_top; 1042 1043 if (vlapic->ops.intr_accepted) 1044 return ((*vlapic->ops.intr_accepted)(vlapic, vector)); 1045 1046 /* 1047 * clear the ready bit for vector being accepted in irr 1048 * and set the vector as in service in isr. 1049 */ 1050 idx = (vector / 32) * 4; 1051 1052 irrptr = &lapic->irr0; 1053 atomic_clear_int(&irrptr[idx], 1 << (vector % 32)); 1054 VLAPIC_CTR_IRR(vlapic, "vlapic_intr_accepted"); 1055 1056 isrptr = &lapic->isr0; 1057 isrptr[idx] |= 1 << (vector % 32); 1058 VLAPIC_CTR_ISR(vlapic, "vlapic_intr_accepted"); 1059 1060 /* 1061 * Update the PPR 1062 */ 1063 vlapic->isrvec_stk_top++; 1064 1065 stk_top = vlapic->isrvec_stk_top; 1066 if (stk_top >= ISRVEC_STK_SIZE) 1067 panic("isrvec_stk_top overflow %d", stk_top); 1068 1069 vlapic->isrvec_stk[stk_top] = vector; 1070 vlapic_update_ppr(vlapic); 1071 } 1072 1073 void 1074 vlapic_svr_write_handler(struct vlapic *vlapic) 1075 { 1076 struct LAPIC *lapic; 1077 uint32_t old, new, changed; 1078 1079 lapic = vlapic->apic_page; 1080 1081 new = lapic->svr; 1082 old = vlapic->svr_last; 1083 vlapic->svr_last = new; 1084 1085 changed = old ^ new; 1086 if ((changed & APIC_SVR_ENABLE) != 0) { 1087 if ((new & APIC_SVR_ENABLE) == 0) { 1088 /* 1089 * The apic is now disabled so stop the apic timer 1090 * and mask all the LVT entries. 1091 */ 1092 VLAPIC_CTR0(vlapic, "vlapic is software-disabled"); 1093 VLAPIC_TIMER_LOCK(vlapic); 1094 callout_stop(&vlapic->callout); 1095 VLAPIC_TIMER_UNLOCK(vlapic); 1096 vlapic_mask_lvts(vlapic); 1097 } else { 1098 /* 1099 * The apic is now enabled so restart the apic timer 1100 * if it is configured in periodic mode. 1101 */ 1102 VLAPIC_CTR0(vlapic, "vlapic is software-enabled"); 1103 if (vlapic_periodic_timer(vlapic)) 1104 vlapic_icrtmr_write_handler(vlapic); 1105 } 1106 } 1107 } 1108 1109 int 1110 vlapic_read(struct vlapic *vlapic, uint64_t offset, uint64_t *data, bool *retu) 1111 { 1112 struct LAPIC *lapic = vlapic->apic_page; 1113 uint32_t *reg; 1114 int i; 1115 1116 if (offset > sizeof(*lapic)) { 1117 *data = 0; 1118 goto done; 1119 } 1120 1121 offset &= ~3; 1122 switch(offset) 1123 { 1124 case APIC_OFFSET_ID: 1125 *data = lapic->id; 1126 break; 1127 case APIC_OFFSET_VER: 1128 *data = lapic->version; 1129 break; 1130 case APIC_OFFSET_TPR: 1131 *data = lapic->tpr; 1132 break; 1133 case APIC_OFFSET_APR: 1134 *data = lapic->apr; 1135 break; 1136 case APIC_OFFSET_PPR: 1137 *data = lapic->ppr; 1138 break; 1139 case APIC_OFFSET_EOI: 1140 *data = lapic->eoi; 1141 break; 1142 case APIC_OFFSET_LDR: 1143 *data = lapic->ldr; 1144 break; 1145 case APIC_OFFSET_DFR: 1146 *data = lapic->dfr; 1147 break; 1148 case APIC_OFFSET_SVR: 1149 *data = lapic->svr; 1150 break; 1151 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1152 i = (offset - APIC_OFFSET_ISR0) >> 2; 1153 reg = &lapic->isr0; 1154 *data = *(reg + i); 1155 break; 1156 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1157 i = (offset - APIC_OFFSET_TMR0) >> 2; 1158 reg = &lapic->tmr0; 1159 *data = *(reg + i); 1160 break; 1161 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1162 i = (offset - APIC_OFFSET_IRR0) >> 2; 1163 reg = &lapic->irr0; 1164 *data = atomic_load_acq_int(reg + i); 1165 break; 1166 case APIC_OFFSET_ESR: 1167 *data = lapic->esr; 1168 break; 1169 case APIC_OFFSET_ICR_LOW: 1170 *data = lapic->icr_lo; 1171 if (x2apic(vlapic)) 1172 *data |= (uint64_t)lapic->icr_hi << 32; 1173 break; 1174 case APIC_OFFSET_ICR_HI: 1175 *data = lapic->icr_hi; 1176 break; 1177 case APIC_OFFSET_CMCI_LVT: 1178 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1179 *data = vlapic_get_lvt(vlapic, offset); 1180 #ifdef INVARIANTS 1181 reg = vlapic_get_lvtptr(vlapic, offset); 1182 KASSERT(*data == *reg, ("inconsistent lvt value at " 1183 "offset %#lx: %#lx/%#x", offset, *data, *reg)); 1184 #endif 1185 break; 1186 case APIC_OFFSET_TIMER_ICR: 1187 *data = lapic->icr_timer; 1188 break; 1189 case APIC_OFFSET_TIMER_CCR: 1190 *data = vlapic_get_ccr(vlapic); 1191 break; 1192 case APIC_OFFSET_TIMER_DCR: 1193 *data = lapic->dcr_timer; 1194 break; 1195 case APIC_OFFSET_RRR: 1196 default: 1197 *data = 0; 1198 break; 1199 } 1200 done: 1201 VLAPIC_CTR2(vlapic, "vlapic read offset %#x, data %#lx", offset, *data); 1202 return 0; 1203 } 1204 1205 int 1206 vlapic_write(struct vlapic *vlapic, uint64_t offset, uint64_t data, bool *retu) 1207 { 1208 struct LAPIC *lapic = vlapic->apic_page; 1209 uint32_t *regptr; 1210 int retval; 1211 1212 KASSERT((offset & 0xf) == 0 && offset < PAGE_SIZE, 1213 ("vlapic_write: invalid offset %#lx", offset)); 1214 1215 VLAPIC_CTR2(vlapic, "vlapic write offset %#x, data %#lx", offset, data); 1216 1217 if (offset > sizeof(*lapic)) { 1218 return 0; 1219 } 1220 1221 retval = 0; 1222 switch(offset) 1223 { 1224 case APIC_OFFSET_ID: 1225 lapic->id = data; 1226 vlapic_id_write_handler(vlapic); 1227 break; 1228 case APIC_OFFSET_TPR: 1229 lapic->tpr = data & 0xff; 1230 vlapic_update_ppr(vlapic); 1231 break; 1232 case APIC_OFFSET_EOI: 1233 vlapic_process_eoi(vlapic); 1234 break; 1235 case APIC_OFFSET_LDR: 1236 lapic->ldr = data; 1237 vlapic_ldr_write_handler(vlapic); 1238 break; 1239 case APIC_OFFSET_DFR: 1240 lapic->dfr = data; 1241 vlapic_dfr_write_handler(vlapic); 1242 break; 1243 case APIC_OFFSET_SVR: 1244 lapic->svr = data; 1245 vlapic_svr_write_handler(vlapic); 1246 break; 1247 case APIC_OFFSET_ICR_LOW: 1248 lapic->icr_lo = data; 1249 if (x2apic(vlapic)) 1250 lapic->icr_hi = data >> 32; 1251 retval = vlapic_icrlo_write_handler(vlapic, retu); 1252 break; 1253 case APIC_OFFSET_ICR_HI: 1254 lapic->icr_hi = data; 1255 break; 1256 case APIC_OFFSET_CMCI_LVT: 1257 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1258 regptr = vlapic_get_lvtptr(vlapic, offset); 1259 *regptr = data; 1260 vlapic_lvt_write_handler(vlapic, offset); 1261 break; 1262 case APIC_OFFSET_TIMER_ICR: 1263 lapic->icr_timer = data; 1264 vlapic_icrtmr_write_handler(vlapic); 1265 break; 1266 1267 case APIC_OFFSET_TIMER_DCR: 1268 lapic->dcr_timer = data; 1269 vlapic_dcr_write_handler(vlapic); 1270 break; 1271 1272 case APIC_OFFSET_ESR: 1273 vlapic_esr_write_handler(vlapic); 1274 break; 1275 case APIC_OFFSET_VER: 1276 case APIC_OFFSET_APR: 1277 case APIC_OFFSET_PPR: 1278 case APIC_OFFSET_RRR: 1279 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 1280 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 1281 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 1282 case APIC_OFFSET_TIMER_CCR: 1283 default: 1284 // Read only. 1285 break; 1286 } 1287 1288 return (retval); 1289 } 1290 1291 static void 1292 vlapic_reset(struct vlapic *vlapic) 1293 { 1294 struct LAPIC *lapic; 1295 1296 lapic = vlapic->apic_page; 1297 bzero(lapic, sizeof(struct LAPIC)); 1298 1299 lapic->id = vlapic_get_id(vlapic); 1300 lapic->version = VLAPIC_VERSION; 1301 lapic->version |= (VLAPIC_MAXLVT_INDEX << MAXLVTSHIFT); 1302 lapic->dfr = 0xffffffff; 1303 lapic->svr = APIC_SVR_VECTOR; 1304 vlapic_mask_lvts(vlapic); 1305 1306 lapic->dcr_timer = 0; 1307 vlapic_dcr_write_handler(vlapic); 1308 1309 if (vlapic->vcpuid == 0) 1310 vlapic->boot_state = BS_RUNNING; /* BSP */ 1311 else 1312 vlapic->boot_state = BS_INIT; /* AP */ 1313 1314 vlapic->svr_last = lapic->svr; 1315 } 1316 1317 void 1318 vlapic_init(struct vlapic *vlapic) 1319 { 1320 KASSERT(vlapic->vm != NULL, ("vlapic_init: vm is not initialized")); 1321 KASSERT(vlapic->vcpuid >= 0 && vlapic->vcpuid < VM_MAXCPU, 1322 ("vlapic_init: vcpuid is not initialized")); 1323 KASSERT(vlapic->apic_page != NULL, ("vlapic_init: apic_page is not " 1324 "initialized")); 1325 1326 /* 1327 * If the vlapic is configured in x2apic mode then it will be 1328 * accessed in the critical section via the MSR emulation code. 1329 * 1330 * Therefore the timer mutex must be a spinlock because blockable 1331 * mutexes cannot be acquired in a critical section. 1332 */ 1333 mtx_init(&vlapic->timer_mtx, "vlapic timer mtx", NULL, MTX_SPIN); 1334 callout_init(&vlapic->callout, 1); 1335 1336 vlapic->msr_apicbase = DEFAULT_APIC_BASE | APICBASE_ENABLED; 1337 1338 if (vlapic->vcpuid == 0) 1339 vlapic->msr_apicbase |= APICBASE_BSP; 1340 1341 vlapic_reset(vlapic); 1342 } 1343 1344 void 1345 vlapic_cleanup(struct vlapic *vlapic) 1346 { 1347 1348 callout_drain(&vlapic->callout); 1349 } 1350 1351 uint64_t 1352 vlapic_get_apicbase(struct vlapic *vlapic) 1353 { 1354 1355 return (vlapic->msr_apicbase); 1356 } 1357 1358 void 1359 vlapic_set_apicbase(struct vlapic *vlapic, uint64_t new) 1360 { 1361 struct LAPIC *lapic; 1362 enum x2apic_state state; 1363 uint64_t old; 1364 int err; 1365 1366 err = vm_get_x2apic_state(vlapic->vm, vlapic->vcpuid, &state); 1367 if (err) 1368 panic("vlapic_set_apicbase: err %d fetching x2apic state", err); 1369 1370 if (state == X2APIC_DISABLED) 1371 new &= ~APICBASE_X2APIC; 1372 1373 old = vlapic->msr_apicbase; 1374 vlapic->msr_apicbase = new; 1375 1376 /* 1377 * If the vlapic is switching between xAPIC and x2APIC modes then 1378 * reset the mode-dependent registers. 1379 */ 1380 if ((old ^ new) & APICBASE_X2APIC) { 1381 lapic = vlapic->apic_page; 1382 lapic->id = vlapic_get_id(vlapic); 1383 if (x2apic(vlapic)) { 1384 lapic->ldr = x2apic_ldr(vlapic); 1385 lapic->dfr = 0; 1386 } else { 1387 lapic->ldr = 0; 1388 lapic->dfr = 0xffffffff; 1389 } 1390 } 1391 } 1392 1393 void 1394 vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 1395 { 1396 struct vlapic *vlapic; 1397 1398 vlapic = vm_lapic(vm, vcpuid); 1399 1400 if (state == X2APIC_DISABLED) 1401 vlapic->msr_apicbase &= ~APICBASE_X2APIC; 1402 } 1403 1404 void 1405 vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys, 1406 int delmode, int vec) 1407 { 1408 bool lowprio; 1409 int vcpuid; 1410 cpuset_t dmask; 1411 1412 if (delmode != APIC_DELMODE_FIXED && delmode != APIC_DELMODE_LOWPRIO) { 1413 VM_CTR1(vm, "vlapic intr invalid delmode %#x", delmode); 1414 return; 1415 } 1416 lowprio = (delmode == APIC_DELMODE_LOWPRIO); 1417 1418 /* 1419 * We don't provide any virtual interrupt redirection hardware so 1420 * all interrupts originating from the ioapic or MSI specify the 1421 * 'dest' in the legacy xAPIC format. 1422 */ 1423 vlapic_calcdest(vm, &dmask, dest, phys, lowprio, false); 1424 1425 while ((vcpuid = CPU_FFS(&dmask)) != 0) { 1426 vcpuid--; 1427 CPU_CLR(vcpuid, &dmask); 1428 lapic_set_intr(vm, vcpuid, vec, level); 1429 } 1430 } 1431 1432 void 1433 vlapic_post_intr(struct vlapic *vlapic, int hostcpu, int ipinum) 1434 { 1435 /* 1436 * Post an interrupt to the vcpu currently running on 'hostcpu'. 1437 * 1438 * This is done by leveraging features like Posted Interrupts (Intel) 1439 * Doorbell MSR (AMD AVIC) that avoid a VM exit. 1440 * 1441 * If neither of these features are available then fallback to 1442 * sending an IPI to 'hostcpu'. 1443 */ 1444 if (vlapic->ops.post_intr) 1445 (*vlapic->ops.post_intr)(vlapic, hostcpu); 1446 else 1447 ipi_cpu(hostcpu, ipinum); 1448 } 1449 1450 bool 1451 vlapic_enabled(struct vlapic *vlapic) 1452 { 1453 struct LAPIC *lapic = vlapic->apic_page; 1454 1455 if ((vlapic->msr_apicbase & APICBASE_ENABLED) != 0 && 1456 (lapic->svr & APIC_SVR_ENABLE) != 0) 1457 return (true); 1458 else 1459 return (false); 1460 } 1461