1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2017 The FreeBSD Foundation 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the company nor the name of the author may be used to 15 * endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 #include <sys/types.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/mutex.h> 38 #include <sys/rman.h> 39 #include <sys/time.h> 40 #include <sys/timeet.h> 41 #include <sys/timetc.h> 42 43 #include <machine/bus.h> 44 #include <machine/machdep.h> 45 #include <machine/vmm.h> 46 #include <machine/armreg.h> 47 48 #include <arm64/vmm/arm64.h> 49 50 #include "vgic.h" 51 #include "vtimer.h" 52 53 #define RES1 0xffffffffffffffffUL 54 55 #define timer_enabled(ctl) \ 56 (!((ctl) & CNTP_CTL_IMASK) && ((ctl) & CNTP_CTL_ENABLE)) 57 58 static uint64_t cnthctl_el2_reg; 59 static uint32_t tmr_frq; 60 61 #define timer_condition_met(ctl) ((ctl) & CNTP_CTL_ISTATUS) 62 63 static void vtimer_schedule_irq(struct hypctx *hypctx, bool phys); 64 65 static int 66 vtimer_virtual_timer_intr(void *arg) 67 { 68 struct hypctx *hypctx; 69 uint64_t cntpct_el0; 70 uint32_t cntv_ctl; 71 72 hypctx = arm64_get_active_vcpu(); 73 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0); 74 75 if (!hypctx) { 76 /* vm_destroy() was called. */ 77 eprintf("No active vcpu\n"); 78 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0); 79 goto out; 80 } 81 if (!timer_enabled(cntv_ctl)) { 82 eprintf("Timer not enabled\n"); 83 goto out; 84 } 85 if (!timer_condition_met(cntv_ctl)) { 86 eprintf("Timer condition not met\n"); 87 goto out; 88 } 89 90 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - 91 hypctx->hyp->vtimer.cntvoff_el2; 92 if (hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 < cntpct_el0) 93 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), 94 GT_VIRT_IRQ, true); 95 96 cntv_ctl = hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0; 97 98 out: 99 /* 100 * Disable the timer interrupt. This will prevent the interrupt from 101 * being reasserted as soon as we exit the handler and getting stuck 102 * in an infinite loop. 103 * 104 * This is safe to do because the guest disabled the timer, and then 105 * enables it as part of the interrupt handling routine. 106 */ 107 cntv_ctl &= ~CNTP_CTL_ENABLE; 108 WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl); 109 110 return (FILTER_HANDLED); 111 } 112 113 int 114 vtimer_init(uint64_t cnthctl_el2) 115 { 116 cnthctl_el2_reg = cnthctl_el2; 117 /* 118 * The guest *MUST* use the same timer frequency as the host. The 119 * register CNTFRQ_EL0 is accessible to the guest and a different value 120 * in the guest dts file might have unforseen consequences. 121 */ 122 tmr_frq = READ_SPECIALREG(cntfrq_el0); 123 124 return (0); 125 } 126 127 void 128 vtimer_vminit(struct hyp *hyp) 129 { 130 uint64_t now; 131 132 hyp->vtimer.cnthctl_el2 = cnthctl_el2_reg; 133 134 /* 135 * Configure the Counter-timer Hypervisor Control Register for the VM. 136 */ 137 if (in_vhe()) { 138 /* 139 * CNTHCTL_E2H_EL0PCTEN: trap EL0 access to CNTP{CT,CTSS}_EL0 140 * CNTHCTL_E2H_EL1VCTEN: don't trap EL0 access to 141 * CNTV{CT,CTSS}_EL0 142 * CNTHCTL_E2H_EL0VTEN: don't trap EL0 access to 143 * CNTV_{CTL,CVAL,TVAL}_EL0 144 * CNTHCTL_E2H_EL0PTEN: trap EL0 access to 145 * CNTP_{CTL,CVAL,TVAL}_EL0 146 * CNTHCTL_E2H_EL1PCEN: trap EL1 access to 147 CNTP_{CTL,CVAL,TVAL}_EL0 148 * CNTHCTL_E2H_EL1PCTEN: trap access to CNTPCT_EL0 149 * 150 * TODO: Don't trap when FEAT_ECV is present 151 */ 152 hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL0PCTEN; 153 hyp->vtimer.cnthctl_el2 |= CNTHCTL_E2H_EL0VCTEN; 154 hyp->vtimer.cnthctl_el2 |= CNTHCTL_E2H_EL0VTEN; 155 hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL0PTEN; 156 157 hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL1PTEN; 158 hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_E2H_EL1PCTEN; 159 } else { 160 /* 161 * CNTHCTL_EL1PCEN: trap access to CNTP_{CTL, CVAL, TVAL}_EL0 162 * from EL1 163 * CNTHCTL_EL1PCTEN: trap access to CNTPCT_EL0 164 */ 165 hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCEN; 166 hyp->vtimer.cnthctl_el2 &= ~CNTHCTL_EL1PCTEN; 167 } 168 169 now = READ_SPECIALREG(cntpct_el0); 170 hyp->vtimer.cntvoff_el2 = now; 171 172 return; 173 } 174 175 void 176 vtimer_cpuinit(struct hypctx *hypctx) 177 { 178 struct vtimer_cpu *vtimer_cpu; 179 180 vtimer_cpu = &hypctx->vtimer_cpu; 181 /* 182 * Configure physical timer interrupts for the VCPU. 183 * 184 * CNTP_CTL_IMASK: mask interrupts 185 * ~CNTP_CTL_ENABLE: disable the timer 186 */ 187 vtimer_cpu->phys_timer.cntx_ctl_el0 = CNTP_CTL_IMASK & ~CNTP_CTL_ENABLE; 188 189 mtx_init(&vtimer_cpu->phys_timer.mtx, "vtimer phys callout mutex", NULL, 190 MTX_DEF); 191 callout_init_mtx(&vtimer_cpu->phys_timer.callout, 192 &vtimer_cpu->phys_timer.mtx, 0); 193 vtimer_cpu->phys_timer.irqid = GT_PHYS_NS_IRQ; 194 195 mtx_init(&vtimer_cpu->virt_timer.mtx, "vtimer virt callout mutex", NULL, 196 MTX_DEF); 197 callout_init_mtx(&vtimer_cpu->virt_timer.callout, 198 &vtimer_cpu->virt_timer.mtx, 0); 199 vtimer_cpu->virt_timer.irqid = GT_VIRT_IRQ; 200 } 201 202 void 203 vtimer_cpucleanup(struct hypctx *hypctx) 204 { 205 struct vtimer_cpu *vtimer_cpu; 206 207 vtimer_cpu = &hypctx->vtimer_cpu; 208 callout_drain(&vtimer_cpu->phys_timer.callout); 209 callout_drain(&vtimer_cpu->virt_timer.callout); 210 mtx_destroy(&vtimer_cpu->phys_timer.mtx); 211 mtx_destroy(&vtimer_cpu->virt_timer.mtx); 212 } 213 214 void 215 vtimer_vmcleanup(struct hyp *hyp) 216 { 217 struct hypctx *hypctx; 218 uint32_t cntv_ctl; 219 220 hypctx = arm64_get_active_vcpu(); 221 if (!hypctx) { 222 /* The active VM was destroyed, stop the timer. */ 223 cntv_ctl = READ_SPECIALREG(cntv_ctl_el0); 224 cntv_ctl &= ~CNTP_CTL_ENABLE; 225 WRITE_SPECIALREG(cntv_ctl_el0, cntv_ctl); 226 } 227 } 228 229 void 230 vtimer_cleanup(void) 231 { 232 } 233 234 void 235 vtimer_sync_hwstate(struct hypctx *hypctx) 236 { 237 struct vtimer_timer *timer; 238 uint64_t cntpct_el0; 239 240 timer = &hypctx->vtimer_cpu.virt_timer; 241 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - 242 hypctx->hyp->vtimer.cntvoff_el2; 243 if (!timer_enabled(timer->cntx_ctl_el0)) { 244 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), 245 timer->irqid, false); 246 } else if (timer->cntx_cval_el0 < cntpct_el0) { 247 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), 248 timer->irqid, true); 249 } else { 250 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), 251 timer->irqid, false); 252 vtimer_schedule_irq(hypctx, false); 253 } 254 } 255 256 static void 257 vtimer_inject_irq_callout_phys(void *context) 258 { 259 struct hypctx *hypctx; 260 261 hypctx = context; 262 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), 263 hypctx->vtimer_cpu.phys_timer.irqid, true); 264 } 265 266 static void 267 vtimer_inject_irq_callout_virt(void *context) 268 { 269 struct hypctx *hypctx; 270 271 hypctx = context; 272 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), 273 hypctx->vtimer_cpu.virt_timer.irqid, true); 274 } 275 276 static void 277 vtimer_schedule_irq(struct hypctx *hypctx, bool phys) 278 { 279 sbintime_t time; 280 struct vtimer_timer *timer; 281 uint64_t cntpct_el0; 282 uint64_t diff; 283 284 if (phys) 285 timer = &hypctx->vtimer_cpu.phys_timer; 286 else 287 timer = &hypctx->vtimer_cpu.virt_timer; 288 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - 289 hypctx->hyp->vtimer.cntvoff_el2; 290 if (timer->cntx_cval_el0 < cntpct_el0) { 291 /* Timer set in the past, trigger interrupt */ 292 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), 293 timer->irqid, true); 294 } else { 295 diff = timer->cntx_cval_el0 - cntpct_el0; 296 time = diff * SBT_1S / tmr_frq; 297 if (phys) 298 callout_reset_sbt(&timer->callout, time, 0, 299 vtimer_inject_irq_callout_phys, hypctx, 0); 300 else 301 callout_reset_sbt(&timer->callout, time, 0, 302 vtimer_inject_irq_callout_virt, hypctx, 0); 303 } 304 } 305 306 static void 307 vtimer_remove_irq(struct hypctx *hypctx, struct vcpu *vcpu) 308 { 309 struct vtimer_cpu *vtimer_cpu; 310 struct vtimer_timer *timer; 311 312 vtimer_cpu = &hypctx->vtimer_cpu; 313 timer = &vtimer_cpu->phys_timer; 314 315 callout_drain(&timer->callout); 316 /* 317 * The interrupt needs to be deactivated here regardless of the callout 318 * function having been executed. The timer interrupt can be masked with 319 * the CNTP_CTL_EL0.IMASK bit instead of reading the IAR register. 320 * Masking the interrupt doesn't remove it from the list registers. 321 */ 322 vgic_inject_irq(hypctx->hyp, vcpu_vcpuid(vcpu), timer->irqid, false); 323 } 324 325 /* 326 * Timer emulation functions. 327 * 328 * The guest should use the virtual timer, however some software, e.g. u-boot, 329 * used the physical timer. Emulate this in software for the guest to use. 330 * 331 * Adjust for cntvoff_el2 so the physical and virtual timers are at similar 332 * times. This simplifies interrupt handling in the virtual timer as the 333 * adjustment will have already happened. 334 */ 335 336 int 337 vtimer_phys_ctl_read(struct vcpu *vcpu, uint64_t *rval, void *arg) 338 { 339 struct hyp *hyp; 340 struct hypctx *hypctx; 341 struct vtimer_cpu *vtimer_cpu; 342 uint64_t cntpct_el0; 343 344 hypctx = vcpu_get_cookie(vcpu); 345 hyp = hypctx->hyp; 346 vtimer_cpu = &hypctx->vtimer_cpu; 347 348 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2; 349 if (vtimer_cpu->phys_timer.cntx_cval_el0 < cntpct_el0) 350 /* Timer condition met */ 351 *rval = vtimer_cpu->phys_timer.cntx_ctl_el0 | CNTP_CTL_ISTATUS; 352 else 353 *rval = vtimer_cpu->phys_timer.cntx_ctl_el0 & ~CNTP_CTL_ISTATUS; 354 355 return (0); 356 } 357 358 int 359 vtimer_phys_ctl_write(struct vcpu *vcpu, uint64_t wval, void *arg) 360 { 361 struct hypctx *hypctx; 362 struct vtimer_cpu *vtimer_cpu; 363 uint64_t ctl_el0; 364 bool timer_toggled_on; 365 366 hypctx = vcpu_get_cookie(vcpu); 367 vtimer_cpu = &hypctx->vtimer_cpu; 368 369 timer_toggled_on = false; 370 ctl_el0 = vtimer_cpu->phys_timer.cntx_ctl_el0; 371 372 if (!timer_enabled(ctl_el0) && timer_enabled(wval)) 373 timer_toggled_on = true; 374 else if (timer_enabled(ctl_el0) && !timer_enabled(wval)) 375 vtimer_remove_irq(hypctx, vcpu); 376 377 vtimer_cpu->phys_timer.cntx_ctl_el0 = wval; 378 379 if (timer_toggled_on) 380 vtimer_schedule_irq(hypctx, true); 381 382 return (0); 383 } 384 385 int 386 vtimer_phys_cnt_read(struct vcpu *vcpu, uint64_t *rval, void *arg) 387 { 388 struct vm *vm; 389 struct hyp *hyp; 390 391 vm = vcpu_vm(vcpu); 392 hyp = vm_get_cookie(vm); 393 *rval = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2; 394 return (0); 395 } 396 397 int 398 vtimer_phys_cnt_write(struct vcpu *vcpu, uint64_t wval, void *arg) 399 { 400 return (0); 401 } 402 403 int 404 vtimer_phys_cval_read(struct vcpu *vcpu, uint64_t *rval, void *arg) 405 { 406 struct hypctx *hypctx; 407 struct vtimer_cpu *vtimer_cpu; 408 409 hypctx = vcpu_get_cookie(vcpu); 410 vtimer_cpu = &hypctx->vtimer_cpu; 411 412 *rval = vtimer_cpu->phys_timer.cntx_cval_el0; 413 414 return (0); 415 } 416 417 int 418 vtimer_phys_cval_write(struct vcpu *vcpu, uint64_t wval, void *arg) 419 { 420 struct hypctx *hypctx; 421 struct vtimer_cpu *vtimer_cpu; 422 423 hypctx = vcpu_get_cookie(vcpu); 424 vtimer_cpu = &hypctx->vtimer_cpu; 425 426 vtimer_cpu->phys_timer.cntx_cval_el0 = wval; 427 428 vtimer_remove_irq(hypctx, vcpu); 429 if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) { 430 vtimer_schedule_irq(hypctx, true); 431 } 432 433 return (0); 434 } 435 436 int 437 vtimer_phys_tval_read(struct vcpu *vcpu, uint64_t *rval, void *arg) 438 { 439 struct hyp *hyp; 440 struct hypctx *hypctx; 441 struct vtimer_cpu *vtimer_cpu; 442 uint32_t cntpct_el0; 443 444 hypctx = vcpu_get_cookie(vcpu); 445 hyp = hypctx->hyp; 446 vtimer_cpu = &hypctx->vtimer_cpu; 447 448 if (!(vtimer_cpu->phys_timer.cntx_ctl_el0 & CNTP_CTL_ENABLE)) { 449 /* 450 * ARMv8 Architecture Manual, p. D7-2702: the result of reading 451 * TVAL when the timer is disabled is UNKNOWN. I have chosen to 452 * return the maximum value possible on 32 bits which means the 453 * timer will fire very far into the future. 454 */ 455 *rval = (uint32_t)RES1; 456 } else { 457 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - 458 hyp->vtimer.cntvoff_el2; 459 *rval = vtimer_cpu->phys_timer.cntx_cval_el0 - cntpct_el0; 460 } 461 462 return (0); 463 } 464 465 int 466 vtimer_phys_tval_write(struct vcpu *vcpu, uint64_t wval, void *arg) 467 { 468 struct hyp *hyp; 469 struct hypctx *hypctx; 470 struct vtimer_cpu *vtimer_cpu; 471 uint64_t cntpct_el0; 472 473 hypctx = vcpu_get_cookie(vcpu); 474 hyp = hypctx->hyp; 475 vtimer_cpu = &hypctx->vtimer_cpu; 476 477 cntpct_el0 = READ_SPECIALREG(cntpct_el0) - hyp->vtimer.cntvoff_el2; 478 vtimer_cpu->phys_timer.cntx_cval_el0 = (int32_t)wval + cntpct_el0; 479 480 vtimer_remove_irq(hypctx, vcpu); 481 if (timer_enabled(vtimer_cpu->phys_timer.cntx_ctl_el0)) { 482 vtimer_schedule_irq(hypctx, true); 483 } 484 485 return (0); 486 } 487 488 struct vtimer_softc { 489 struct resource *res; 490 void *ihl; 491 int rid; 492 }; 493 494 static int 495 vtimer_probe(device_t dev) 496 { 497 device_set_desc(dev, "Virtual timer"); 498 return (BUS_PROBE_DEFAULT); 499 } 500 501 static int 502 vtimer_attach(device_t dev) 503 { 504 struct vtimer_softc *sc; 505 506 sc = device_get_softc(dev); 507 508 sc->rid = 0; 509 sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->rid, RF_ACTIVE); 510 if (sc->res == NULL) 511 return (ENXIO); 512 513 bus_setup_intr(dev, sc->res, INTR_TYPE_CLK, vtimer_virtual_timer_intr, 514 NULL, NULL, &sc->ihl); 515 516 return (0); 517 } 518 519 static device_method_t vtimer_methods[] = { 520 /* Device interface */ 521 DEVMETHOD(device_probe, vtimer_probe), 522 DEVMETHOD(device_attach, vtimer_attach), 523 524 /* End */ 525 DEVMETHOD_END 526 }; 527 528 DEFINE_CLASS_0(vtimer, vtimer_driver, vtimer_methods, 529 sizeof(struct vtimer_softc)); 530 531 DRIVER_MODULE(vtimer, generic_timer, vtimer_driver, 0, 0); 532