1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <linux/cpu.h> 8 #include <linux/kvm.h> 9 #include <linux/kvm_host.h> 10 #include <linux/interrupt.h> 11 #include <linux/irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/uaccess.h> 14 15 #include <clocksource/arm_arch_timer.h> 16 #include <asm/arch_timer.h> 17 #include <asm/kvm_emulate.h> 18 #include <asm/kvm_hyp.h> 19 20 #include <kvm/arm_vgic.h> 21 #include <kvm/arm_arch_timer.h> 22 23 #include "trace.h" 24 25 static struct timecounter *timecounter; 26 static unsigned int host_vtimer_irq; 27 static unsigned int host_ptimer_irq; 28 static u32 host_vtimer_irq_flags; 29 static u32 host_ptimer_irq_flags; 30 31 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); 32 33 static const struct kvm_irq_level default_ptimer_irq = { 34 .irq = 30, 35 .level = 1, 36 }; 37 38 static const struct kvm_irq_level default_vtimer_irq = { 39 .irq = 27, 40 .level = 1, 41 }; 42 43 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); 44 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, 45 struct arch_timer_context *timer_ctx); 46 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); 47 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, 48 struct arch_timer_context *timer, 49 enum kvm_arch_timer_regs treg, 50 u64 val); 51 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, 52 struct arch_timer_context *timer, 53 enum kvm_arch_timer_regs treg); 54 55 u32 timer_get_ctl(struct arch_timer_context *ctxt) 56 { 57 struct kvm_vcpu *vcpu = ctxt->vcpu; 58 59 switch(arch_timer_ctx_index(ctxt)) { 60 case TIMER_VTIMER: 61 return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0); 62 case TIMER_PTIMER: 63 return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0); 64 default: 65 WARN_ON(1); 66 return 0; 67 } 68 } 69 70 u64 timer_get_cval(struct arch_timer_context *ctxt) 71 { 72 struct kvm_vcpu *vcpu = ctxt->vcpu; 73 74 switch(arch_timer_ctx_index(ctxt)) { 75 case TIMER_VTIMER: 76 return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0); 77 case TIMER_PTIMER: 78 return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0); 79 default: 80 WARN_ON(1); 81 return 0; 82 } 83 } 84 85 static u64 timer_get_offset(struct arch_timer_context *ctxt) 86 { 87 struct kvm_vcpu *vcpu = ctxt->vcpu; 88 89 switch(arch_timer_ctx_index(ctxt)) { 90 case TIMER_VTIMER: 91 return __vcpu_sys_reg(vcpu, CNTVOFF_EL2); 92 default: 93 return 0; 94 } 95 } 96 97 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) 98 { 99 struct kvm_vcpu *vcpu = ctxt->vcpu; 100 101 switch(arch_timer_ctx_index(ctxt)) { 102 case TIMER_VTIMER: 103 __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl; 104 break; 105 case TIMER_PTIMER: 106 __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl; 107 break; 108 default: 109 WARN_ON(1); 110 } 111 } 112 113 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) 114 { 115 struct kvm_vcpu *vcpu = ctxt->vcpu; 116 117 switch(arch_timer_ctx_index(ctxt)) { 118 case TIMER_VTIMER: 119 __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval; 120 break; 121 case TIMER_PTIMER: 122 __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval; 123 break; 124 default: 125 WARN_ON(1); 126 } 127 } 128 129 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) 130 { 131 struct kvm_vcpu *vcpu = ctxt->vcpu; 132 133 switch(arch_timer_ctx_index(ctxt)) { 134 case TIMER_VTIMER: 135 __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset; 136 break; 137 default: 138 WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt)); 139 } 140 } 141 142 u64 kvm_phys_timer_read(void) 143 { 144 return timecounter->cc->read(timecounter->cc); 145 } 146 147 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map) 148 { 149 if (has_vhe()) { 150 map->direct_vtimer = vcpu_vtimer(vcpu); 151 map->direct_ptimer = vcpu_ptimer(vcpu); 152 map->emul_ptimer = NULL; 153 } else { 154 map->direct_vtimer = vcpu_vtimer(vcpu); 155 map->direct_ptimer = NULL; 156 map->emul_ptimer = vcpu_ptimer(vcpu); 157 } 158 159 trace_kvm_get_timer_map(vcpu->vcpu_id, map); 160 } 161 162 static inline bool userspace_irqchip(struct kvm *kvm) 163 { 164 return static_branch_unlikely(&userspace_irqchip_in_use) && 165 unlikely(!irqchip_in_kernel(kvm)); 166 } 167 168 static void soft_timer_start(struct hrtimer *hrt, u64 ns) 169 { 170 hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), 171 HRTIMER_MODE_ABS_HARD); 172 } 173 174 static void soft_timer_cancel(struct hrtimer *hrt) 175 { 176 hrtimer_cancel(hrt); 177 } 178 179 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) 180 { 181 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; 182 struct arch_timer_context *ctx; 183 struct timer_map map; 184 185 /* 186 * We may see a timer interrupt after vcpu_put() has been called which 187 * sets the CPU's vcpu pointer to NULL, because even though the timer 188 * has been disabled in timer_save_state(), the hardware interrupt 189 * signal may not have been retired from the interrupt controller yet. 190 */ 191 if (!vcpu) 192 return IRQ_HANDLED; 193 194 get_timer_map(vcpu, &map); 195 196 if (irq == host_vtimer_irq) 197 ctx = map.direct_vtimer; 198 else 199 ctx = map.direct_ptimer; 200 201 if (kvm_timer_should_fire(ctx)) 202 kvm_timer_update_irq(vcpu, true, ctx); 203 204 if (userspace_irqchip(vcpu->kvm) && 205 !static_branch_unlikely(&has_gic_active_state)) 206 disable_percpu_irq(host_vtimer_irq); 207 208 return IRQ_HANDLED; 209 } 210 211 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) 212 { 213 u64 cval, now; 214 215 cval = timer_get_cval(timer_ctx); 216 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx); 217 218 if (now < cval) { 219 u64 ns; 220 221 ns = cyclecounter_cyc2ns(timecounter->cc, 222 cval - now, 223 timecounter->mask, 224 &timecounter->frac); 225 return ns; 226 } 227 228 return 0; 229 } 230 231 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) 232 { 233 WARN_ON(timer_ctx && timer_ctx->loaded); 234 return timer_ctx && 235 ((timer_get_ctl(timer_ctx) & 236 (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE); 237 } 238 239 /* 240 * Returns the earliest expiration time in ns among guest timers. 241 * Note that it will return 0 if none of timers can fire. 242 */ 243 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) 244 { 245 u64 min_delta = ULLONG_MAX; 246 int i; 247 248 for (i = 0; i < NR_KVM_TIMERS; i++) { 249 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i]; 250 251 WARN(ctx->loaded, "timer %d loaded\n", i); 252 if (kvm_timer_irq_can_fire(ctx)) 253 min_delta = min(min_delta, kvm_timer_compute_delta(ctx)); 254 } 255 256 /* If none of timers can fire, then return 0 */ 257 if (min_delta == ULLONG_MAX) 258 return 0; 259 260 return min_delta; 261 } 262 263 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) 264 { 265 struct arch_timer_cpu *timer; 266 struct kvm_vcpu *vcpu; 267 u64 ns; 268 269 timer = container_of(hrt, struct arch_timer_cpu, bg_timer); 270 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); 271 272 /* 273 * Check that the timer has really expired from the guest's 274 * PoV (NTP on the host may have forced it to expire 275 * early). If we should have slept longer, restart it. 276 */ 277 ns = kvm_timer_earliest_exp(vcpu); 278 if (unlikely(ns)) { 279 hrtimer_forward_now(hrt, ns_to_ktime(ns)); 280 return HRTIMER_RESTART; 281 } 282 283 kvm_vcpu_wake_up(vcpu); 284 return HRTIMER_NORESTART; 285 } 286 287 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt) 288 { 289 struct arch_timer_context *ctx; 290 struct kvm_vcpu *vcpu; 291 u64 ns; 292 293 ctx = container_of(hrt, struct arch_timer_context, hrtimer); 294 vcpu = ctx->vcpu; 295 296 trace_kvm_timer_hrtimer_expire(ctx); 297 298 /* 299 * Check that the timer has really expired from the guest's 300 * PoV (NTP on the host may have forced it to expire 301 * early). If not ready, schedule for a later time. 302 */ 303 ns = kvm_timer_compute_delta(ctx); 304 if (unlikely(ns)) { 305 hrtimer_forward_now(hrt, ns_to_ktime(ns)); 306 return HRTIMER_RESTART; 307 } 308 309 kvm_timer_update_irq(vcpu, true, ctx); 310 return HRTIMER_NORESTART; 311 } 312 313 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) 314 { 315 enum kvm_arch_timers index; 316 u64 cval, now; 317 318 if (!timer_ctx) 319 return false; 320 321 index = arch_timer_ctx_index(timer_ctx); 322 323 if (timer_ctx->loaded) { 324 u32 cnt_ctl = 0; 325 326 switch (index) { 327 case TIMER_VTIMER: 328 cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL); 329 break; 330 case TIMER_PTIMER: 331 cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL); 332 break; 333 case NR_KVM_TIMERS: 334 /* GCC is braindead */ 335 cnt_ctl = 0; 336 break; 337 } 338 339 return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) && 340 (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) && 341 !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK); 342 } 343 344 if (!kvm_timer_irq_can_fire(timer_ctx)) 345 return false; 346 347 cval = timer_get_cval(timer_ctx); 348 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx); 349 350 return cval <= now; 351 } 352 353 bool kvm_timer_is_pending(struct kvm_vcpu *vcpu) 354 { 355 struct timer_map map; 356 357 get_timer_map(vcpu, &map); 358 359 return kvm_timer_should_fire(map.direct_vtimer) || 360 kvm_timer_should_fire(map.direct_ptimer) || 361 kvm_timer_should_fire(map.emul_ptimer); 362 } 363 364 /* 365 * Reflect the timer output level into the kvm_run structure 366 */ 367 void kvm_timer_update_run(struct kvm_vcpu *vcpu) 368 { 369 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 370 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 371 struct kvm_sync_regs *regs = &vcpu->run->s.regs; 372 373 /* Populate the device bitmap with the timer states */ 374 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER | 375 KVM_ARM_DEV_EL1_PTIMER); 376 if (kvm_timer_should_fire(vtimer)) 377 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER; 378 if (kvm_timer_should_fire(ptimer)) 379 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER; 380 } 381 382 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, 383 struct arch_timer_context *timer_ctx) 384 { 385 int ret; 386 387 timer_ctx->irq.level = new_level; 388 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, 389 timer_ctx->irq.level); 390 391 if (!userspace_irqchip(vcpu->kvm)) { 392 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, 393 timer_ctx->irq.irq, 394 timer_ctx->irq.level, 395 timer_ctx); 396 WARN_ON(ret); 397 } 398 } 399 400 /* Only called for a fully emulated timer */ 401 static void timer_emulate(struct arch_timer_context *ctx) 402 { 403 bool should_fire = kvm_timer_should_fire(ctx); 404 405 trace_kvm_timer_emulate(ctx, should_fire); 406 407 if (should_fire != ctx->irq.level) { 408 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx); 409 return; 410 } 411 412 /* 413 * If the timer can fire now, we don't need to have a soft timer 414 * scheduled for the future. If the timer cannot fire at all, 415 * then we also don't need a soft timer. 416 */ 417 if (!kvm_timer_irq_can_fire(ctx)) { 418 soft_timer_cancel(&ctx->hrtimer); 419 return; 420 } 421 422 soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx)); 423 } 424 425 static void timer_save_state(struct arch_timer_context *ctx) 426 { 427 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); 428 enum kvm_arch_timers index = arch_timer_ctx_index(ctx); 429 unsigned long flags; 430 431 if (!timer->enabled) 432 return; 433 434 local_irq_save(flags); 435 436 if (!ctx->loaded) 437 goto out; 438 439 switch (index) { 440 case TIMER_VTIMER: 441 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL)); 442 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL)); 443 444 /* Disable the timer */ 445 write_sysreg_el0(0, SYS_CNTV_CTL); 446 isb(); 447 448 break; 449 case TIMER_PTIMER: 450 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL)); 451 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL)); 452 453 /* Disable the timer */ 454 write_sysreg_el0(0, SYS_CNTP_CTL); 455 isb(); 456 457 break; 458 case NR_KVM_TIMERS: 459 BUG(); 460 } 461 462 trace_kvm_timer_save_state(ctx); 463 464 ctx->loaded = false; 465 out: 466 local_irq_restore(flags); 467 } 468 469 /* 470 * Schedule the background timer before calling kvm_vcpu_block, so that this 471 * thread is removed from its waitqueue and made runnable when there's a timer 472 * interrupt to handle. 473 */ 474 static void kvm_timer_blocking(struct kvm_vcpu *vcpu) 475 { 476 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 477 struct timer_map map; 478 479 get_timer_map(vcpu, &map); 480 481 /* 482 * If no timers are capable of raising interrupts (disabled or 483 * masked), then there's no more work for us to do. 484 */ 485 if (!kvm_timer_irq_can_fire(map.direct_vtimer) && 486 !kvm_timer_irq_can_fire(map.direct_ptimer) && 487 !kvm_timer_irq_can_fire(map.emul_ptimer)) 488 return; 489 490 /* 491 * At least one guest time will expire. Schedule a background timer. 492 * Set the earliest expiration time among the guest timers. 493 */ 494 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); 495 } 496 497 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) 498 { 499 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 500 501 soft_timer_cancel(&timer->bg_timer); 502 } 503 504 static void timer_restore_state(struct arch_timer_context *ctx) 505 { 506 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); 507 enum kvm_arch_timers index = arch_timer_ctx_index(ctx); 508 unsigned long flags; 509 510 if (!timer->enabled) 511 return; 512 513 local_irq_save(flags); 514 515 if (ctx->loaded) 516 goto out; 517 518 switch (index) { 519 case TIMER_VTIMER: 520 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL); 521 isb(); 522 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL); 523 break; 524 case TIMER_PTIMER: 525 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL); 526 isb(); 527 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL); 528 break; 529 case NR_KVM_TIMERS: 530 BUG(); 531 } 532 533 trace_kvm_timer_restore_state(ctx); 534 535 ctx->loaded = true; 536 out: 537 local_irq_restore(flags); 538 } 539 540 static void set_cntvoff(u64 cntvoff) 541 { 542 kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff); 543 } 544 545 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active) 546 { 547 int r; 548 r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active); 549 WARN_ON(r); 550 } 551 552 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx) 553 { 554 struct kvm_vcpu *vcpu = ctx->vcpu; 555 bool phys_active = false; 556 557 /* 558 * Update the timer output so that it is likely to match the 559 * state we're about to restore. If the timer expires between 560 * this point and the register restoration, we'll take the 561 * interrupt anyway. 562 */ 563 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx); 564 565 if (irqchip_in_kernel(vcpu->kvm)) 566 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq); 567 568 phys_active |= ctx->irq.level; 569 570 set_timer_irq_phys_active(ctx, phys_active); 571 } 572 573 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) 574 { 575 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 576 577 /* 578 * Update the timer output so that it is likely to match the 579 * state we're about to restore. If the timer expires between 580 * this point and the register restoration, we'll take the 581 * interrupt anyway. 582 */ 583 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer); 584 585 /* 586 * When using a userspace irqchip with the architected timers and a 587 * host interrupt controller that doesn't support an active state, we 588 * must still prevent continuously exiting from the guest, and 589 * therefore mask the physical interrupt by disabling it on the host 590 * interrupt controller when the virtual level is high, such that the 591 * guest can make forward progress. Once we detect the output level 592 * being de-asserted, we unmask the interrupt again so that we exit 593 * from the guest when the timer fires. 594 */ 595 if (vtimer->irq.level) 596 disable_percpu_irq(host_vtimer_irq); 597 else 598 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 599 } 600 601 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) 602 { 603 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 604 struct timer_map map; 605 606 if (unlikely(!timer->enabled)) 607 return; 608 609 get_timer_map(vcpu, &map); 610 611 if (static_branch_likely(&has_gic_active_state)) { 612 kvm_timer_vcpu_load_gic(map.direct_vtimer); 613 if (map.direct_ptimer) 614 kvm_timer_vcpu_load_gic(map.direct_ptimer); 615 } else { 616 kvm_timer_vcpu_load_nogic(vcpu); 617 } 618 619 set_cntvoff(timer_get_offset(map.direct_vtimer)); 620 621 kvm_timer_unblocking(vcpu); 622 623 timer_restore_state(map.direct_vtimer); 624 if (map.direct_ptimer) 625 timer_restore_state(map.direct_ptimer); 626 627 if (map.emul_ptimer) 628 timer_emulate(map.emul_ptimer); 629 } 630 631 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) 632 { 633 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 634 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 635 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; 636 bool vlevel, plevel; 637 638 if (likely(irqchip_in_kernel(vcpu->kvm))) 639 return false; 640 641 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER; 642 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER; 643 644 return kvm_timer_should_fire(vtimer) != vlevel || 645 kvm_timer_should_fire(ptimer) != plevel; 646 } 647 648 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) 649 { 650 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 651 struct timer_map map; 652 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); 653 654 if (unlikely(!timer->enabled)) 655 return; 656 657 get_timer_map(vcpu, &map); 658 659 timer_save_state(map.direct_vtimer); 660 if (map.direct_ptimer) 661 timer_save_state(map.direct_ptimer); 662 663 /* 664 * Cancel soft timer emulation, because the only case where we 665 * need it after a vcpu_put is in the context of a sleeping VCPU, and 666 * in that case we already factor in the deadline for the physical 667 * timer when scheduling the bg_timer. 668 * 669 * In any case, we re-schedule the hrtimer for the physical timer when 670 * coming back to the VCPU thread in kvm_timer_vcpu_load(). 671 */ 672 if (map.emul_ptimer) 673 soft_timer_cancel(&map.emul_ptimer->hrtimer); 674 675 if (rcuwait_active(wait)) 676 kvm_timer_blocking(vcpu); 677 678 /* 679 * The kernel may decide to run userspace after calling vcpu_put, so 680 * we reset cntvoff to 0 to ensure a consistent read between user 681 * accesses to the virtual counter and kernel access to the physical 682 * counter of non-VHE case. For VHE, the virtual counter uses a fixed 683 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register. 684 */ 685 set_cntvoff(0); 686 } 687 688 /* 689 * With a userspace irqchip we have to check if the guest de-asserted the 690 * timer and if so, unmask the timer irq signal on the host interrupt 691 * controller to ensure that we see future timer signals. 692 */ 693 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) 694 { 695 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 696 697 if (!kvm_timer_should_fire(vtimer)) { 698 kvm_timer_update_irq(vcpu, false, vtimer); 699 if (static_branch_likely(&has_gic_active_state)) 700 set_timer_irq_phys_active(vtimer, false); 701 else 702 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 703 } 704 } 705 706 void kvm_timer_sync_user(struct kvm_vcpu *vcpu) 707 { 708 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 709 710 if (unlikely(!timer->enabled)) 711 return; 712 713 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) 714 unmask_vtimer_irq_user(vcpu); 715 } 716 717 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) 718 { 719 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 720 struct timer_map map; 721 722 get_timer_map(vcpu, &map); 723 724 /* 725 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 726 * and to 0 for ARMv7. We provide an implementation that always 727 * resets the timer to be disabled and unmasked and is compliant with 728 * the ARMv7 architecture. 729 */ 730 timer_set_ctl(vcpu_vtimer(vcpu), 0); 731 timer_set_ctl(vcpu_ptimer(vcpu), 0); 732 733 if (timer->enabled) { 734 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); 735 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu)); 736 737 if (irqchip_in_kernel(vcpu->kvm)) { 738 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); 739 if (map.direct_ptimer) 740 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq); 741 } 742 } 743 744 if (map.emul_ptimer) 745 soft_timer_cancel(&map.emul_ptimer->hrtimer); 746 747 return 0; 748 } 749 750 /* Make the updates of cntvoff for all vtimer contexts atomic */ 751 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) 752 { 753 int i; 754 struct kvm *kvm = vcpu->kvm; 755 struct kvm_vcpu *tmp; 756 757 mutex_lock(&kvm->lock); 758 kvm_for_each_vcpu(i, tmp, kvm) 759 timer_set_offset(vcpu_vtimer(tmp), cntvoff); 760 761 /* 762 * When called from the vcpu create path, the CPU being created is not 763 * included in the loop above, so we just set it here as well. 764 */ 765 timer_set_offset(vcpu_vtimer(vcpu), cntvoff); 766 mutex_unlock(&kvm->lock); 767 } 768 769 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) 770 { 771 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 772 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 773 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 774 775 vtimer->vcpu = vcpu; 776 ptimer->vcpu = vcpu; 777 778 /* Synchronize cntvoff across all vtimers of a VM. */ 779 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); 780 timer_set_offset(ptimer, 0); 781 782 hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 783 timer->bg_timer.function = kvm_bg_timer_expire; 784 785 hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 786 hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); 787 vtimer->hrtimer.function = kvm_hrtimer_expire; 788 ptimer->hrtimer.function = kvm_hrtimer_expire; 789 790 vtimer->irq.irq = default_vtimer_irq.irq; 791 ptimer->irq.irq = default_ptimer_irq.irq; 792 793 vtimer->host_timer_irq = host_vtimer_irq; 794 ptimer->host_timer_irq = host_ptimer_irq; 795 796 vtimer->host_timer_irq_flags = host_vtimer_irq_flags; 797 ptimer->host_timer_irq_flags = host_ptimer_irq_flags; 798 } 799 800 static void kvm_timer_init_interrupt(void *info) 801 { 802 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); 803 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags); 804 } 805 806 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 807 { 808 struct arch_timer_context *timer; 809 810 switch (regid) { 811 case KVM_REG_ARM_TIMER_CTL: 812 timer = vcpu_vtimer(vcpu); 813 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); 814 break; 815 case KVM_REG_ARM_TIMER_CNT: 816 timer = vcpu_vtimer(vcpu); 817 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); 818 break; 819 case KVM_REG_ARM_TIMER_CVAL: 820 timer = vcpu_vtimer(vcpu); 821 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); 822 break; 823 case KVM_REG_ARM_PTIMER_CTL: 824 timer = vcpu_ptimer(vcpu); 825 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); 826 break; 827 case KVM_REG_ARM_PTIMER_CVAL: 828 timer = vcpu_ptimer(vcpu); 829 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); 830 break; 831 832 default: 833 return -1; 834 } 835 836 return 0; 837 } 838 839 static u64 read_timer_ctl(struct arch_timer_context *timer) 840 { 841 /* 842 * Set ISTATUS bit if it's expired. 843 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is 844 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit 845 * regardless of ENABLE bit for our implementation convenience. 846 */ 847 u32 ctl = timer_get_ctl(timer); 848 849 if (!kvm_timer_compute_delta(timer)) 850 ctl |= ARCH_TIMER_CTRL_IT_STAT; 851 852 return ctl; 853 } 854 855 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) 856 { 857 switch (regid) { 858 case KVM_REG_ARM_TIMER_CTL: 859 return kvm_arm_timer_read(vcpu, 860 vcpu_vtimer(vcpu), TIMER_REG_CTL); 861 case KVM_REG_ARM_TIMER_CNT: 862 return kvm_arm_timer_read(vcpu, 863 vcpu_vtimer(vcpu), TIMER_REG_CNT); 864 case KVM_REG_ARM_TIMER_CVAL: 865 return kvm_arm_timer_read(vcpu, 866 vcpu_vtimer(vcpu), TIMER_REG_CVAL); 867 case KVM_REG_ARM_PTIMER_CTL: 868 return kvm_arm_timer_read(vcpu, 869 vcpu_ptimer(vcpu), TIMER_REG_CTL); 870 case KVM_REG_ARM_PTIMER_CNT: 871 return kvm_arm_timer_read(vcpu, 872 vcpu_ptimer(vcpu), TIMER_REG_CNT); 873 case KVM_REG_ARM_PTIMER_CVAL: 874 return kvm_arm_timer_read(vcpu, 875 vcpu_ptimer(vcpu), TIMER_REG_CVAL); 876 } 877 return (u64)-1; 878 } 879 880 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, 881 struct arch_timer_context *timer, 882 enum kvm_arch_timer_regs treg) 883 { 884 u64 val; 885 886 switch (treg) { 887 case TIMER_REG_TVAL: 888 val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer); 889 val = lower_32_bits(val); 890 break; 891 892 case TIMER_REG_CTL: 893 val = read_timer_ctl(timer); 894 break; 895 896 case TIMER_REG_CVAL: 897 val = timer_get_cval(timer); 898 break; 899 900 case TIMER_REG_CNT: 901 val = kvm_phys_timer_read() - timer_get_offset(timer); 902 break; 903 904 default: 905 BUG(); 906 } 907 908 return val; 909 } 910 911 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, 912 enum kvm_arch_timers tmr, 913 enum kvm_arch_timer_regs treg) 914 { 915 u64 val; 916 917 preempt_disable(); 918 kvm_timer_vcpu_put(vcpu); 919 920 val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg); 921 922 kvm_timer_vcpu_load(vcpu); 923 preempt_enable(); 924 925 return val; 926 } 927 928 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, 929 struct arch_timer_context *timer, 930 enum kvm_arch_timer_regs treg, 931 u64 val) 932 { 933 switch (treg) { 934 case TIMER_REG_TVAL: 935 timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val); 936 break; 937 938 case TIMER_REG_CTL: 939 timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT); 940 break; 941 942 case TIMER_REG_CVAL: 943 timer_set_cval(timer, val); 944 break; 945 946 default: 947 BUG(); 948 } 949 } 950 951 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, 952 enum kvm_arch_timers tmr, 953 enum kvm_arch_timer_regs treg, 954 u64 val) 955 { 956 preempt_disable(); 957 kvm_timer_vcpu_put(vcpu); 958 959 kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val); 960 961 kvm_timer_vcpu_load(vcpu); 962 preempt_enable(); 963 } 964 965 static int kvm_timer_starting_cpu(unsigned int cpu) 966 { 967 kvm_timer_init_interrupt(NULL); 968 return 0; 969 } 970 971 static int kvm_timer_dying_cpu(unsigned int cpu) 972 { 973 disable_percpu_irq(host_vtimer_irq); 974 return 0; 975 } 976 977 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 978 { 979 if (vcpu) 980 irqd_set_forwarded_to_vcpu(d); 981 else 982 irqd_clr_forwarded_to_vcpu(d); 983 984 return 0; 985 } 986 987 static int timer_irq_set_irqchip_state(struct irq_data *d, 988 enum irqchip_irq_state which, bool val) 989 { 990 if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d)) 991 return irq_chip_set_parent_state(d, which, val); 992 993 if (val) 994 irq_chip_mask_parent(d); 995 else 996 irq_chip_unmask_parent(d); 997 998 return 0; 999 } 1000 1001 static void timer_irq_eoi(struct irq_data *d) 1002 { 1003 if (!irqd_is_forwarded_to_vcpu(d)) 1004 irq_chip_eoi_parent(d); 1005 } 1006 1007 static void timer_irq_ack(struct irq_data *d) 1008 { 1009 d = d->parent_data; 1010 if (d->chip->irq_ack) 1011 d->chip->irq_ack(d); 1012 } 1013 1014 static struct irq_chip timer_chip = { 1015 .name = "KVM", 1016 .irq_ack = timer_irq_ack, 1017 .irq_mask = irq_chip_mask_parent, 1018 .irq_unmask = irq_chip_unmask_parent, 1019 .irq_eoi = timer_irq_eoi, 1020 .irq_set_type = irq_chip_set_type_parent, 1021 .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity, 1022 .irq_set_irqchip_state = timer_irq_set_irqchip_state, 1023 }; 1024 1025 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1026 unsigned int nr_irqs, void *arg) 1027 { 1028 irq_hw_number_t hwirq = (uintptr_t)arg; 1029 1030 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq, 1031 &timer_chip, NULL); 1032 } 1033 1034 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1035 unsigned int nr_irqs) 1036 { 1037 } 1038 1039 static const struct irq_domain_ops timer_domain_ops = { 1040 .alloc = timer_irq_domain_alloc, 1041 .free = timer_irq_domain_free, 1042 }; 1043 1044 static struct irq_ops arch_timer_irq_ops = { 1045 .get_input_level = kvm_arch_timer_get_input_level, 1046 }; 1047 1048 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags) 1049 { 1050 *flags = irq_get_trigger_type(virq); 1051 if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) { 1052 kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n", 1053 virq); 1054 *flags = IRQF_TRIGGER_LOW; 1055 } 1056 } 1057 1058 static int kvm_irq_init(struct arch_timer_kvm_info *info) 1059 { 1060 struct irq_domain *domain = NULL; 1061 1062 if (info->virtual_irq <= 0) { 1063 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", 1064 info->virtual_irq); 1065 return -ENODEV; 1066 } 1067 1068 host_vtimer_irq = info->virtual_irq; 1069 kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags); 1070 1071 if (kvm_vgic_global_state.no_hw_deactivation) { 1072 struct fwnode_handle *fwnode; 1073 struct irq_data *data; 1074 1075 fwnode = irq_domain_alloc_named_fwnode("kvm-timer"); 1076 if (!fwnode) 1077 return -ENOMEM; 1078 1079 /* Assume both vtimer and ptimer in the same parent */ 1080 data = irq_get_irq_data(host_vtimer_irq); 1081 domain = irq_domain_create_hierarchy(data->domain, 0, 1082 NR_KVM_TIMERS, fwnode, 1083 &timer_domain_ops, NULL); 1084 if (!domain) { 1085 irq_domain_free_fwnode(fwnode); 1086 return -ENOMEM; 1087 } 1088 1089 arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE; 1090 WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq, 1091 (void *)TIMER_VTIMER)); 1092 } 1093 1094 if (info->physical_irq > 0) { 1095 host_ptimer_irq = info->physical_irq; 1096 kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags); 1097 1098 if (domain) 1099 WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq, 1100 (void *)TIMER_PTIMER)); 1101 } 1102 1103 return 0; 1104 } 1105 1106 int kvm_timer_hyp_init(bool has_gic) 1107 { 1108 struct arch_timer_kvm_info *info; 1109 int err; 1110 1111 info = arch_timer_get_kvm_info(); 1112 timecounter = &info->timecounter; 1113 1114 if (!timecounter->cc) { 1115 kvm_err("kvm_arch_timer: uninitialized timecounter\n"); 1116 return -ENODEV; 1117 } 1118 1119 err = kvm_irq_init(info); 1120 if (err) 1121 return err; 1122 1123 /* First, do the virtual EL1 timer irq */ 1124 1125 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, 1126 "kvm guest vtimer", kvm_get_running_vcpus()); 1127 if (err) { 1128 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n", 1129 host_vtimer_irq, err); 1130 return err; 1131 } 1132 1133 if (has_gic) { 1134 err = irq_set_vcpu_affinity(host_vtimer_irq, 1135 kvm_get_running_vcpus()); 1136 if (err) { 1137 kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); 1138 goto out_free_irq; 1139 } 1140 1141 static_branch_enable(&has_gic_active_state); 1142 } 1143 1144 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq); 1145 1146 /* Now let's do the physical EL1 timer irq */ 1147 1148 if (info->physical_irq > 0) { 1149 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler, 1150 "kvm guest ptimer", kvm_get_running_vcpus()); 1151 if (err) { 1152 kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n", 1153 host_ptimer_irq, err); 1154 return err; 1155 } 1156 1157 if (has_gic) { 1158 err = irq_set_vcpu_affinity(host_ptimer_irq, 1159 kvm_get_running_vcpus()); 1160 if (err) { 1161 kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); 1162 goto out_free_irq; 1163 } 1164 } 1165 1166 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq); 1167 } else if (has_vhe()) { 1168 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n", 1169 info->physical_irq); 1170 err = -ENODEV; 1171 goto out_free_irq; 1172 } 1173 1174 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, 1175 "kvm/arm/timer:starting", kvm_timer_starting_cpu, 1176 kvm_timer_dying_cpu); 1177 return 0; 1178 out_free_irq: 1179 free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); 1180 return err; 1181 } 1182 1183 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) 1184 { 1185 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 1186 1187 soft_timer_cancel(&timer->bg_timer); 1188 } 1189 1190 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) 1191 { 1192 int vtimer_irq, ptimer_irq; 1193 int i, ret; 1194 1195 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; 1196 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); 1197 if (ret) 1198 return false; 1199 1200 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq; 1201 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu)); 1202 if (ret) 1203 return false; 1204 1205 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) { 1206 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq || 1207 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq) 1208 return false; 1209 } 1210 1211 return true; 1212 } 1213 1214 bool kvm_arch_timer_get_input_level(int vintid) 1215 { 1216 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); 1217 struct arch_timer_context *timer; 1218 1219 if (vintid == vcpu_vtimer(vcpu)->irq.irq) 1220 timer = vcpu_vtimer(vcpu); 1221 else if (vintid == vcpu_ptimer(vcpu)->irq.irq) 1222 timer = vcpu_ptimer(vcpu); 1223 else 1224 BUG(); 1225 1226 return kvm_timer_should_fire(timer); 1227 } 1228 1229 int kvm_timer_enable(struct kvm_vcpu *vcpu) 1230 { 1231 struct arch_timer_cpu *timer = vcpu_timer(vcpu); 1232 struct timer_map map; 1233 int ret; 1234 1235 if (timer->enabled) 1236 return 0; 1237 1238 /* Without a VGIC we do not map virtual IRQs to physical IRQs */ 1239 if (!irqchip_in_kernel(vcpu->kvm)) 1240 goto no_vgic; 1241 1242 /* 1243 * At this stage, we have the guarantee that the vgic is both 1244 * available and initialized. 1245 */ 1246 if (!timer_irqs_are_valid(vcpu)) { 1247 kvm_debug("incorrectly configured timer irqs\n"); 1248 return -EINVAL; 1249 } 1250 1251 get_timer_map(vcpu, &map); 1252 1253 ret = kvm_vgic_map_phys_irq(vcpu, 1254 map.direct_vtimer->host_timer_irq, 1255 map.direct_vtimer->irq.irq, 1256 &arch_timer_irq_ops); 1257 if (ret) 1258 return ret; 1259 1260 if (map.direct_ptimer) { 1261 ret = kvm_vgic_map_phys_irq(vcpu, 1262 map.direct_ptimer->host_timer_irq, 1263 map.direct_ptimer->irq.irq, 1264 &arch_timer_irq_ops); 1265 } 1266 1267 if (ret) 1268 return ret; 1269 1270 no_vgic: 1271 timer->enabled = 1; 1272 return 0; 1273 } 1274 1275 /* 1276 * On VHE system, we only need to configure the EL2 timer trap register once, 1277 * not for every world switch. 1278 * The host kernel runs at EL2 with HCR_EL2.TGE == 1, 1279 * and this makes those bits have no effect for the host kernel execution. 1280 */ 1281 void kvm_timer_init_vhe(void) 1282 { 1283 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */ 1284 u32 cnthctl_shift = 10; 1285 u64 val; 1286 1287 /* 1288 * VHE systems allow the guest direct access to the EL1 physical 1289 * timer/counter. 1290 */ 1291 val = read_sysreg(cnthctl_el2); 1292 val |= (CNTHCTL_EL1PCEN << cnthctl_shift); 1293 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); 1294 write_sysreg(val, cnthctl_el2); 1295 } 1296 1297 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) 1298 { 1299 struct kvm_vcpu *vcpu; 1300 int i; 1301 1302 kvm_for_each_vcpu(i, vcpu, kvm) { 1303 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq; 1304 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq; 1305 } 1306 } 1307 1308 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1309 { 1310 int __user *uaddr = (int __user *)(long)attr->addr; 1311 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 1312 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 1313 int irq; 1314 1315 if (!irqchip_in_kernel(vcpu->kvm)) 1316 return -EINVAL; 1317 1318 if (get_user(irq, uaddr)) 1319 return -EFAULT; 1320 1321 if (!(irq_is_ppi(irq))) 1322 return -EINVAL; 1323 1324 if (vcpu->arch.timer_cpu.enabled) 1325 return -EBUSY; 1326 1327 switch (attr->attr) { 1328 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: 1329 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq); 1330 break; 1331 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: 1332 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq); 1333 break; 1334 default: 1335 return -ENXIO; 1336 } 1337 1338 return 0; 1339 } 1340 1341 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1342 { 1343 int __user *uaddr = (int __user *)(long)attr->addr; 1344 struct arch_timer_context *timer; 1345 int irq; 1346 1347 switch (attr->attr) { 1348 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: 1349 timer = vcpu_vtimer(vcpu); 1350 break; 1351 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: 1352 timer = vcpu_ptimer(vcpu); 1353 break; 1354 default: 1355 return -ENXIO; 1356 } 1357 1358 irq = timer->irq.irq; 1359 return put_user(irq, uaddr); 1360 } 1361 1362 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) 1363 { 1364 switch (attr->attr) { 1365 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: 1366 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: 1367 return 0; 1368 } 1369 1370 return -ENXIO; 1371 } 1372