1 /*- 2 * Copyright (c) 2013 Tycho Nightingale <tycho.nightingale@pluribusnetworks.com> 3 * Copyright (c) 2013 Neel Natu <neel@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/kernel.h> 37 #include <sys/malloc.h> 38 #include <sys/systm.h> 39 #include <sys/cpuset.h> 40 41 #include <dev/acpica/acpi_hpet.h> 42 43 #include <machine/vmm.h> 44 #include <machine/vmm_dev.h> 45 46 #include "vmm_lapic.h" 47 #include "vatpic.h" 48 #include "vioapic.h" 49 #include "vhpet.h" 50 51 #include "vmm_ktr.h" 52 53 static MALLOC_DEFINE(M_VHPET, "vhpet", "bhyve virtual hpet"); 54 55 #define HPET_FREQ 10000000 /* 10.0 Mhz */ 56 #define FS_PER_S 1000000000000000ul 57 58 /* Timer N Configuration and Capabilities Register */ 59 #define HPET_TCAP_RO_MASK (HPET_TCAP_INT_ROUTE | \ 60 HPET_TCAP_FSB_INT_DEL | \ 61 HPET_TCAP_SIZE | \ 62 HPET_TCAP_PER_INT) 63 /* 64 * HPET requires at least 3 timers and up to 32 timers per block. 65 */ 66 #define VHPET_NUM_TIMERS 8 67 CTASSERT(VHPET_NUM_TIMERS >= 3 && VHPET_NUM_TIMERS <= 32); 68 69 struct vhpet_callout_arg { 70 struct vhpet *vhpet; 71 int timer_num; 72 }; 73 74 struct vhpet { 75 struct vm *vm; 76 struct mtx mtx; 77 sbintime_t freq_sbt; 78 79 uint64_t config; /* Configuration */ 80 uint64_t isr; /* Interrupt Status */ 81 uint32_t countbase; /* HPET counter base value */ 82 sbintime_t countbase_sbt; /* uptime corresponding to base value */ 83 84 struct { 85 uint64_t cap_config; /* Configuration */ 86 uint64_t msireg; /* FSB interrupt routing */ 87 uint32_t compval; /* Comparator */ 88 uint32_t comprate; 89 struct callout callout; 90 sbintime_t callout_sbt; /* time when counter==compval */ 91 struct vhpet_callout_arg arg; 92 } timer[VHPET_NUM_TIMERS]; 93 }; 94 95 #define VHPET_LOCK(vhp) mtx_lock(&((vhp)->mtx)) 96 #define VHPET_UNLOCK(vhp) mtx_unlock(&((vhp)->mtx)) 97 98 static void vhpet_start_timer(struct vhpet *vhpet, int n, uint32_t counter, 99 sbintime_t now); 100 101 static uint64_t 102 vhpet_capabilities(void) 103 { 104 uint64_t cap = 0; 105 106 cap |= 0x8086 << 16; /* vendor id */ 107 cap |= HPET_CAP_LEG_RT; /* legacy routing capable */ 108 cap |= (VHPET_NUM_TIMERS - 1) << 8; /* number of timers */ 109 cap |= 1; /* revision */ 110 cap &= ~HPET_CAP_COUNT_SIZE; /* 32-bit timer */ 111 112 cap &= 0xffffffff; 113 cap |= (FS_PER_S / HPET_FREQ) << 32; /* tick period in fs */ 114 115 return (cap); 116 } 117 118 static __inline bool 119 vhpet_counter_enabled(struct vhpet *vhpet) 120 { 121 122 return ((vhpet->config & HPET_CNF_ENABLE) ? true : false); 123 } 124 125 static __inline bool 126 vhpet_timer_msi_enabled(struct vhpet *vhpet, int n) 127 { 128 const uint64_t msi_enable = HPET_TCAP_FSB_INT_DEL | HPET_TCNF_FSB_EN; 129 130 /* 131 * LegacyReplacement Route configuration takes precedence over MSI 132 * for timers 0 and 1. 133 */ 134 if (n == 0 || n == 1) { 135 if (vhpet->config & HPET_CNF_LEG_RT) 136 return (false); 137 } 138 139 if ((vhpet->timer[n].cap_config & msi_enable) == msi_enable) 140 return (true); 141 else 142 return (false); 143 } 144 145 static __inline int 146 vhpet_timer_ioapic_pin(struct vhpet *vhpet, int n) 147 { 148 /* 149 * If the timer is configured to use MSI then treat it as if the 150 * timer is not connected to the ioapic. 151 */ 152 if (vhpet_timer_msi_enabled(vhpet, n)) 153 return (0); 154 155 if (vhpet->config & HPET_CNF_LEG_RT) { 156 /* 157 * In "legacy routing" timers 0 and 1 are connected to 158 * ioapic pins 2 and 8 respectively. 159 */ 160 switch (n) { 161 case 0: 162 return (2); 163 case 1: 164 return (8); 165 } 166 } 167 168 return ((vhpet->timer[n].cap_config & HPET_TCNF_INT_ROUTE) >> 9); 169 } 170 171 static __inline int 172 vhpet_timer_atpic_pin(struct vhpet *vhpet, int n) 173 { 174 if (vhpet->config & HPET_CNF_LEG_RT) { 175 /* 176 * In "legacy routing" timers 0 and 1 are connected to 177 * 8259 master pin 0 and slave pin 0 respectively. 178 */ 179 switch (n) { 180 case 0: 181 return (0); 182 case 1: 183 return (8); 184 } 185 } 186 187 return (-1); 188 } 189 190 static uint32_t 191 vhpet_counter(struct vhpet *vhpet, sbintime_t *nowptr) 192 { 193 uint32_t val; 194 sbintime_t now, delta; 195 196 val = vhpet->countbase; 197 if (vhpet_counter_enabled(vhpet)) { 198 now = sbinuptime(); 199 delta = now - vhpet->countbase_sbt; 200 KASSERT(delta >= 0, ("vhpet_counter: uptime went backwards: " 201 "%#lx to %#lx", vhpet->countbase_sbt, now)); 202 val += delta / vhpet->freq_sbt; 203 if (nowptr != NULL) 204 *nowptr = now; 205 } else { 206 /* 207 * The sbinuptime corresponding to the 'countbase' is 208 * meaningless when the counter is disabled. Make sure 209 * that the the caller doesn't want to use it. 210 */ 211 KASSERT(nowptr == NULL, ("vhpet_counter: nowptr must be NULL")); 212 } 213 return (val); 214 } 215 216 static void 217 vhpet_timer_clear_isr(struct vhpet *vhpet, int n) 218 { 219 int pin, legacy_pin; 220 221 if (vhpet->isr & (1 << n)) { 222 pin = vhpet_timer_ioapic_pin(vhpet, n); 223 KASSERT(pin != 0, ("vhpet timer %d irq incorrectly routed", n)); 224 vioapic_deassert_irq(vhpet->vm, pin); 225 226 legacy_pin = vhpet_timer_atpic_pin(vhpet, n); 227 if (legacy_pin != -1) 228 vatpic_deassert_irq(vhpet->vm, legacy_pin); 229 230 vhpet->isr &= ~(1 << n); 231 } 232 } 233 234 static __inline bool 235 vhpet_periodic_timer(struct vhpet *vhpet, int n) 236 { 237 238 return ((vhpet->timer[n].cap_config & HPET_TCNF_TYPE) != 0); 239 } 240 241 static __inline bool 242 vhpet_timer_interrupt_enabled(struct vhpet *vhpet, int n) 243 { 244 245 return ((vhpet->timer[n].cap_config & HPET_TCNF_INT_ENB) != 0); 246 } 247 248 static __inline bool 249 vhpet_timer_edge_trig(struct vhpet *vhpet, int n) 250 { 251 252 KASSERT(!vhpet_timer_msi_enabled(vhpet, n), ("vhpet_timer_edge_trig: " 253 "timer %d is using MSI", n)); 254 255 /* The legacy replacement interrupts are always edge triggered */ 256 if (vhpet->config & HPET_CNF_LEG_RT) { 257 if (n == 0 || n == 1) 258 return (true); 259 } 260 261 if ((vhpet->timer[n].cap_config & HPET_TCNF_INT_TYPE) == 0) 262 return (true); 263 else 264 return (false); 265 } 266 267 static void 268 vhpet_timer_interrupt(struct vhpet *vhpet, int n) 269 { 270 int pin, legacy_pin; 271 272 /* If interrupts are not enabled for this timer then just return. */ 273 if (!vhpet_timer_interrupt_enabled(vhpet, n)) 274 return; 275 276 /* 277 * If a level triggered interrupt is already asserted then just return. 278 */ 279 if ((vhpet->isr & (1 << n)) != 0) { 280 VM_CTR1(vhpet->vm, "hpet t%d intr is already asserted", n); 281 return; 282 } 283 284 if (vhpet_timer_msi_enabled(vhpet, n)) { 285 lapic_intr_msi(vhpet->vm, vhpet->timer[n].msireg >> 32, 286 vhpet->timer[n].msireg & 0xffffffff); 287 return; 288 } 289 290 pin = vhpet_timer_ioapic_pin(vhpet, n); 291 if (pin == 0) { 292 VM_CTR1(vhpet->vm, "hpet t%d intr is not routed to ioapic", n); 293 return; 294 } 295 296 legacy_pin = vhpet_timer_atpic_pin(vhpet, n); 297 298 if (vhpet_timer_edge_trig(vhpet, n)) { 299 vioapic_pulse_irq(vhpet->vm, pin); 300 if (legacy_pin != -1) 301 vatpic_pulse_irq(vhpet->vm, legacy_pin); 302 } else { 303 vhpet->isr |= 1 << n; 304 vioapic_assert_irq(vhpet->vm, pin); 305 if (legacy_pin != -1) 306 vatpic_assert_irq(vhpet->vm, legacy_pin); 307 } 308 } 309 310 static void 311 vhpet_adjust_compval(struct vhpet *vhpet, int n, uint32_t counter) 312 { 313 uint32_t compval, comprate, compnext; 314 315 KASSERT(vhpet->timer[n].comprate != 0, ("hpet t%d is not periodic", n)); 316 317 compval = vhpet->timer[n].compval; 318 comprate = vhpet->timer[n].comprate; 319 320 /* 321 * Calculate the comparator value to be used for the next periodic 322 * interrupt. 323 * 324 * This function is commonly called from the callout handler. 325 * In this scenario the 'counter' is ahead of 'compval'. To find 326 * the next value to program into the accumulator we divide the 327 * number space between 'compval' and 'counter' into 'comprate' 328 * sized units. The 'compval' is rounded up such that is "ahead" 329 * of 'counter'. 330 */ 331 compnext = compval + ((counter - compval) / comprate + 1) * comprate; 332 333 vhpet->timer[n].compval = compnext; 334 } 335 336 static void 337 vhpet_handler(void *a) 338 { 339 int n; 340 uint32_t counter; 341 sbintime_t now; 342 struct vhpet *vhpet; 343 struct callout *callout; 344 struct vhpet_callout_arg *arg; 345 346 arg = a; 347 vhpet = arg->vhpet; 348 n = arg->timer_num; 349 callout = &vhpet->timer[n].callout; 350 351 VM_CTR1(vhpet->vm, "hpet t%d fired", n); 352 353 VHPET_LOCK(vhpet); 354 355 if (callout_pending(callout)) /* callout was reset */ 356 goto done; 357 358 if (!callout_active(callout)) /* callout was stopped */ 359 goto done; 360 361 callout_deactivate(callout); 362 363 if (!vhpet_counter_enabled(vhpet)) 364 panic("vhpet(%p) callout with counter disabled", vhpet); 365 366 counter = vhpet_counter(vhpet, &now); 367 vhpet_start_timer(vhpet, n, counter, now); 368 vhpet_timer_interrupt(vhpet, n); 369 done: 370 VHPET_UNLOCK(vhpet); 371 return; 372 } 373 374 static void 375 vhpet_stop_timer(struct vhpet *vhpet, int n, sbintime_t now) 376 { 377 378 VM_CTR1(vhpet->vm, "hpet t%d stopped", n); 379 callout_stop(&vhpet->timer[n].callout); 380 381 /* 382 * If the callout was scheduled to expire in the past but hasn't 383 * had a chance to execute yet then trigger the timer interrupt 384 * here. Failing to do so will result in a missed timer interrupt 385 * in the guest. This is especially bad in one-shot mode because 386 * the next interrupt has to wait for the counter to wrap around. 387 */ 388 if (vhpet->timer[n].callout_sbt < now) { 389 VM_CTR1(vhpet->vm, "hpet t%d interrupt triggered after " 390 "stopping timer", n); 391 vhpet_timer_interrupt(vhpet, n); 392 } 393 } 394 395 static void 396 vhpet_start_timer(struct vhpet *vhpet, int n, uint32_t counter, sbintime_t now) 397 { 398 sbintime_t delta, precision; 399 400 if (vhpet->timer[n].comprate != 0) 401 vhpet_adjust_compval(vhpet, n, counter); 402 else { 403 /* 404 * In one-shot mode it is the guest's responsibility to make 405 * sure that the comparator value is not in the "past". The 406 * hardware doesn't have any belt-and-suspenders to deal with 407 * this so we don't either. 408 */ 409 } 410 411 delta = (vhpet->timer[n].compval - counter) * vhpet->freq_sbt; 412 precision = delta >> tc_precexp; 413 vhpet->timer[n].callout_sbt = now + delta; 414 callout_reset_sbt(&vhpet->timer[n].callout, vhpet->timer[n].callout_sbt, 415 precision, vhpet_handler, &vhpet->timer[n].arg, C_ABSOLUTE); 416 } 417 418 static void 419 vhpet_start_counting(struct vhpet *vhpet) 420 { 421 int i; 422 423 vhpet->countbase_sbt = sbinuptime(); 424 for (i = 0; i < VHPET_NUM_TIMERS; i++) { 425 /* 426 * Restart the timers based on the value of the main counter 427 * when it stopped counting. 428 */ 429 vhpet_start_timer(vhpet, i, vhpet->countbase, 430 vhpet->countbase_sbt); 431 } 432 } 433 434 static void 435 vhpet_stop_counting(struct vhpet *vhpet, uint32_t counter, sbintime_t now) 436 { 437 int i; 438 439 vhpet->countbase = counter; 440 for (i = 0; i < VHPET_NUM_TIMERS; i++) 441 vhpet_stop_timer(vhpet, i, now); 442 } 443 444 static __inline void 445 update_register(uint64_t *regptr, uint64_t data, uint64_t mask) 446 { 447 448 *regptr &= ~mask; 449 *regptr |= (data & mask); 450 } 451 452 static void 453 vhpet_timer_update_config(struct vhpet *vhpet, int n, uint64_t data, 454 uint64_t mask) 455 { 456 bool clear_isr; 457 int old_pin, new_pin; 458 uint32_t allowed_irqs; 459 uint64_t oldval, newval; 460 461 if (vhpet_timer_msi_enabled(vhpet, n) || 462 vhpet_timer_edge_trig(vhpet, n)) { 463 if (vhpet->isr & (1 << n)) 464 panic("vhpet timer %d isr should not be asserted", n); 465 } 466 old_pin = vhpet_timer_ioapic_pin(vhpet, n); 467 oldval = vhpet->timer[n].cap_config; 468 469 newval = oldval; 470 update_register(&newval, data, mask); 471 newval &= ~(HPET_TCAP_RO_MASK | HPET_TCNF_32MODE); 472 newval |= oldval & HPET_TCAP_RO_MASK; 473 474 if (newval == oldval) 475 return; 476 477 vhpet->timer[n].cap_config = newval; 478 VM_CTR2(vhpet->vm, "hpet t%d cap_config set to 0x%016x", n, newval); 479 480 /* 481 * Validate the interrupt routing in the HPET_TCNF_INT_ROUTE field. 482 * If it does not match the bits set in HPET_TCAP_INT_ROUTE then set 483 * it to the default value of 0. 484 */ 485 allowed_irqs = vhpet->timer[n].cap_config >> 32; 486 new_pin = vhpet_timer_ioapic_pin(vhpet, n); 487 if (new_pin != 0 && (allowed_irqs & (1 << new_pin)) == 0) { 488 VM_CTR3(vhpet->vm, "hpet t%d configured invalid irq %d, " 489 "allowed_irqs 0x%08x", n, new_pin, allowed_irqs); 490 new_pin = 0; 491 vhpet->timer[n].cap_config &= ~HPET_TCNF_INT_ROUTE; 492 } 493 494 if (!vhpet_periodic_timer(vhpet, n)) 495 vhpet->timer[n].comprate = 0; 496 497 /* 498 * If the timer's ISR bit is set then clear it in the following cases: 499 * - interrupt is disabled 500 * - interrupt type is changed from level to edge or fsb. 501 * - interrupt routing is changed 502 * 503 * This is to ensure that this timer's level triggered interrupt does 504 * not remain asserted forever. 505 */ 506 if (vhpet->isr & (1 << n)) { 507 KASSERT(old_pin != 0, ("timer %d isr asserted to ioapic pin %d", 508 n, old_pin)); 509 if (!vhpet_timer_interrupt_enabled(vhpet, n)) 510 clear_isr = true; 511 else if (vhpet_timer_msi_enabled(vhpet, n)) 512 clear_isr = true; 513 else if (vhpet_timer_edge_trig(vhpet, n)) 514 clear_isr = true; 515 else if (vhpet_timer_ioapic_pin(vhpet, n) != old_pin) 516 clear_isr = true; 517 else 518 clear_isr = false; 519 520 if (clear_isr) { 521 VM_CTR1(vhpet->vm, "hpet t%d isr cleared due to " 522 "configuration change", n); 523 vioapic_deassert_irq(vhpet->vm, old_pin); 524 vhpet->isr &= ~(1 << n); 525 } 526 } 527 } 528 529 int 530 vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val, int size, 531 void *arg) 532 { 533 struct vhpet *vhpet; 534 uint64_t data, mask, oldval, val64; 535 uint32_t isr_clear_mask, old_compval, old_comprate, counter; 536 sbintime_t now, *nowptr; 537 int i, offset; 538 539 vhpet = vm_hpet(vm); 540 offset = gpa - VHPET_BASE; 541 542 VHPET_LOCK(vhpet); 543 544 /* Accesses to the HPET should be 4 or 8 bytes wide */ 545 switch (size) { 546 case 8: 547 mask = 0xffffffffffffffff; 548 data = val; 549 break; 550 case 4: 551 mask = 0xffffffff; 552 data = val; 553 if ((offset & 0x4) != 0) { 554 mask <<= 32; 555 data <<= 32; 556 } 557 break; 558 default: 559 VM_CTR2(vhpet->vm, "hpet invalid mmio write: " 560 "offset 0x%08x, size %d", offset, size); 561 goto done; 562 } 563 564 /* Access to the HPET should be naturally aligned to its width */ 565 if (offset & (size - 1)) { 566 VM_CTR2(vhpet->vm, "hpet invalid mmio write: " 567 "offset 0x%08x, size %d", offset, size); 568 goto done; 569 } 570 571 if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) { 572 /* 573 * Get the most recent value of the counter before updating 574 * the 'config' register. If the HPET is going to be disabled 575 * then we need to update 'countbase' with the value right 576 * before it is disabled. 577 */ 578 nowptr = vhpet_counter_enabled(vhpet) ? &now : NULL; 579 counter = vhpet_counter(vhpet, nowptr); 580 oldval = vhpet->config; 581 update_register(&vhpet->config, data, mask); 582 if ((oldval ^ vhpet->config) & HPET_CNF_ENABLE) { 583 if (vhpet_counter_enabled(vhpet)) { 584 vhpet_start_counting(vhpet); 585 VM_CTR0(vhpet->vm, "hpet enabled"); 586 } else { 587 vhpet_stop_counting(vhpet, counter, now); 588 VM_CTR0(vhpet->vm, "hpet disabled"); 589 } 590 } 591 goto done; 592 } 593 594 if (offset == HPET_ISR || offset == HPET_ISR + 4) { 595 isr_clear_mask = vhpet->isr & data; 596 for (i = 0; i < VHPET_NUM_TIMERS; i++) { 597 if ((isr_clear_mask & (1 << i)) != 0) { 598 VM_CTR1(vhpet->vm, "hpet t%d isr cleared", i); 599 vhpet_timer_clear_isr(vhpet, i); 600 } 601 } 602 goto done; 603 } 604 605 if (offset == HPET_MAIN_COUNTER || offset == HPET_MAIN_COUNTER + 4) { 606 /* Zero-extend the counter to 64-bits before updating it */ 607 val64 = vhpet_counter(vhpet, NULL); 608 update_register(&val64, data, mask); 609 vhpet->countbase = val64; 610 if (vhpet_counter_enabled(vhpet)) 611 vhpet_start_counting(vhpet); 612 goto done; 613 } 614 615 for (i = 0; i < VHPET_NUM_TIMERS; i++) { 616 if (offset == HPET_TIMER_CAP_CNF(i) || 617 offset == HPET_TIMER_CAP_CNF(i) + 4) { 618 vhpet_timer_update_config(vhpet, i, data, mask); 619 break; 620 } 621 622 if (offset == HPET_TIMER_COMPARATOR(i) || 623 offset == HPET_TIMER_COMPARATOR(i) + 4) { 624 old_compval = vhpet->timer[i].compval; 625 old_comprate = vhpet->timer[i].comprate; 626 if (vhpet_periodic_timer(vhpet, i)) { 627 /* 628 * In periodic mode writes to the comparator 629 * change the 'compval' register only if the 630 * HPET_TCNF_VAL_SET bit is set in the config 631 * register. 632 */ 633 val64 = vhpet->timer[i].comprate; 634 update_register(&val64, data, mask); 635 vhpet->timer[i].comprate = val64; 636 if ((vhpet->timer[i].cap_config & 637 HPET_TCNF_VAL_SET) != 0) { 638 vhpet->timer[i].compval = val64; 639 } 640 } else { 641 KASSERT(vhpet->timer[i].comprate == 0, 642 ("vhpet one-shot timer %d has invalid " 643 "rate %u", i, vhpet->timer[i].comprate)); 644 val64 = vhpet->timer[i].compval; 645 update_register(&val64, data, mask); 646 vhpet->timer[i].compval = val64; 647 } 648 vhpet->timer[i].cap_config &= ~HPET_TCNF_VAL_SET; 649 650 if (vhpet->timer[i].compval != old_compval || 651 vhpet->timer[i].comprate != old_comprate) { 652 if (vhpet_counter_enabled(vhpet)) { 653 counter = vhpet_counter(vhpet, &now); 654 vhpet_start_timer(vhpet, i, counter, 655 now); 656 } 657 } 658 break; 659 } 660 661 if (offset == HPET_TIMER_FSB_VAL(i) || 662 offset == HPET_TIMER_FSB_ADDR(i)) { 663 update_register(&vhpet->timer[i].msireg, data, mask); 664 break; 665 } 666 } 667 done: 668 VHPET_UNLOCK(vhpet); 669 return (0); 670 } 671 672 int 673 vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *rval, int size, 674 void *arg) 675 { 676 int i, offset; 677 struct vhpet *vhpet; 678 uint64_t data; 679 680 vhpet = vm_hpet(vm); 681 offset = gpa - VHPET_BASE; 682 683 VHPET_LOCK(vhpet); 684 685 /* Accesses to the HPET should be 4 or 8 bytes wide */ 686 if (size != 4 && size != 8) { 687 VM_CTR2(vhpet->vm, "hpet invalid mmio read: " 688 "offset 0x%08x, size %d", offset, size); 689 data = 0; 690 goto done; 691 } 692 693 /* Access to the HPET should be naturally aligned to its width */ 694 if (offset & (size - 1)) { 695 VM_CTR2(vhpet->vm, "hpet invalid mmio read: " 696 "offset 0x%08x, size %d", offset, size); 697 data = 0; 698 goto done; 699 } 700 701 if (offset == HPET_CAPABILITIES || offset == HPET_CAPABILITIES + 4) { 702 data = vhpet_capabilities(); 703 goto done; 704 } 705 706 if (offset == HPET_CONFIG || offset == HPET_CONFIG + 4) { 707 data = vhpet->config; 708 goto done; 709 } 710 711 if (offset == HPET_ISR || offset == HPET_ISR + 4) { 712 data = vhpet->isr; 713 goto done; 714 } 715 716 if (offset == HPET_MAIN_COUNTER || offset == HPET_MAIN_COUNTER + 4) { 717 data = vhpet_counter(vhpet, NULL); 718 goto done; 719 } 720 721 for (i = 0; i < VHPET_NUM_TIMERS; i++) { 722 if (offset == HPET_TIMER_CAP_CNF(i) || 723 offset == HPET_TIMER_CAP_CNF(i) + 4) { 724 data = vhpet->timer[i].cap_config; 725 break; 726 } 727 728 if (offset == HPET_TIMER_COMPARATOR(i) || 729 offset == HPET_TIMER_COMPARATOR(i) + 4) { 730 data = vhpet->timer[i].compval; 731 break; 732 } 733 734 if (offset == HPET_TIMER_FSB_VAL(i) || 735 offset == HPET_TIMER_FSB_ADDR(i)) { 736 data = vhpet->timer[i].msireg; 737 break; 738 } 739 } 740 741 if (i >= VHPET_NUM_TIMERS) 742 data = 0; 743 done: 744 VHPET_UNLOCK(vhpet); 745 746 if (size == 4) { 747 if (offset & 0x4) 748 data >>= 32; 749 } 750 *rval = data; 751 return (0); 752 } 753 754 struct vhpet * 755 vhpet_init(struct vm *vm) 756 { 757 int i, pincount; 758 struct vhpet *vhpet; 759 uint64_t allowed_irqs; 760 struct vhpet_callout_arg *arg; 761 struct bintime bt; 762 763 vhpet = malloc(sizeof(struct vhpet), M_VHPET, M_WAITOK | M_ZERO); 764 vhpet->vm = vm; 765 mtx_init(&vhpet->mtx, "vhpet lock", NULL, MTX_DEF); 766 767 FREQ2BT(HPET_FREQ, &bt); 768 vhpet->freq_sbt = bttosbt(bt); 769 770 pincount = vioapic_pincount(vm); 771 if (pincount >= 24) 772 allowed_irqs = 0x00f00000; /* irqs 20, 21, 22 and 23 */ 773 else 774 allowed_irqs = 0; 775 776 /* 777 * Initialize HPET timer hardware state. 778 */ 779 for (i = 0; i < VHPET_NUM_TIMERS; i++) { 780 vhpet->timer[i].cap_config = allowed_irqs << 32; 781 vhpet->timer[i].cap_config |= HPET_TCAP_PER_INT; 782 vhpet->timer[i].cap_config |= HPET_TCAP_FSB_INT_DEL; 783 784 vhpet->timer[i].compval = 0xffffffff; 785 callout_init(&vhpet->timer[i].callout, 1); 786 787 arg = &vhpet->timer[i].arg; 788 arg->vhpet = vhpet; 789 arg->timer_num = i; 790 } 791 792 return (vhpet); 793 } 794 795 void 796 vhpet_cleanup(struct vhpet *vhpet) 797 { 798 int i; 799 800 for (i = 0; i < VHPET_NUM_TIMERS; i++) 801 callout_drain(&vhpet->timer[i].callout); 802 803 free(vhpet, M_VHPET); 804 } 805 806 int 807 vhpet_getcap(struct vm_hpet_cap *cap) 808 { 809 810 cap->capabilities = vhpet_capabilities(); 811 return (0); 812 } 813