xref: /linux/arch/arm64/kvm/arch_timer.c (revision 8ccd54fe45713cd458015b5b08d6098545e70543)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
14 
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19 
20 #include <kvm/arm_vgic.h>
21 #include <kvm/arm_arch_timer.h>
22 
23 #include "trace.h"
24 
25 static struct timecounter *timecounter;
26 static unsigned int host_vtimer_irq;
27 static unsigned int host_ptimer_irq;
28 static u32 host_vtimer_irq_flags;
29 static u32 host_ptimer_irq_flags;
30 
31 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
32 
33 static const struct kvm_irq_level default_ptimer_irq = {
34 	.irq	= 30,
35 	.level	= 1,
36 };
37 
38 static const struct kvm_irq_level default_vtimer_irq = {
39 	.irq	= 27,
40 	.level	= 1,
41 };
42 
43 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
44 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
45 				 struct arch_timer_context *timer_ctx);
46 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
47 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
48 				struct arch_timer_context *timer,
49 				enum kvm_arch_timer_regs treg,
50 				u64 val);
51 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
52 			      struct arch_timer_context *timer,
53 			      enum kvm_arch_timer_regs treg);
54 
55 u32 timer_get_ctl(struct arch_timer_context *ctxt)
56 {
57 	struct kvm_vcpu *vcpu = ctxt->vcpu;
58 
59 	switch(arch_timer_ctx_index(ctxt)) {
60 	case TIMER_VTIMER:
61 		return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
62 	case TIMER_PTIMER:
63 		return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
64 	default:
65 		WARN_ON(1);
66 		return 0;
67 	}
68 }
69 
70 u64 timer_get_cval(struct arch_timer_context *ctxt)
71 {
72 	struct kvm_vcpu *vcpu = ctxt->vcpu;
73 
74 	switch(arch_timer_ctx_index(ctxt)) {
75 	case TIMER_VTIMER:
76 		return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
77 	case TIMER_PTIMER:
78 		return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
79 	default:
80 		WARN_ON(1);
81 		return 0;
82 	}
83 }
84 
85 static u64 timer_get_offset(struct arch_timer_context *ctxt)
86 {
87 	if (ctxt->offset.vm_offset)
88 		return *ctxt->offset.vm_offset;
89 
90 	return 0;
91 }
92 
93 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
94 {
95 	struct kvm_vcpu *vcpu = ctxt->vcpu;
96 
97 	switch(arch_timer_ctx_index(ctxt)) {
98 	case TIMER_VTIMER:
99 		__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
100 		break;
101 	case TIMER_PTIMER:
102 		__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
103 		break;
104 	default:
105 		WARN_ON(1);
106 	}
107 }
108 
109 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
110 {
111 	struct kvm_vcpu *vcpu = ctxt->vcpu;
112 
113 	switch(arch_timer_ctx_index(ctxt)) {
114 	case TIMER_VTIMER:
115 		__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
116 		break;
117 	case TIMER_PTIMER:
118 		__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
119 		break;
120 	default:
121 		WARN_ON(1);
122 	}
123 }
124 
125 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
126 {
127 	if (!ctxt->offset.vm_offset) {
128 		WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
129 		return;
130 	}
131 
132 	WRITE_ONCE(*ctxt->offset.vm_offset, offset);
133 }
134 
135 u64 kvm_phys_timer_read(void)
136 {
137 	return timecounter->cc->read(timecounter->cc);
138 }
139 
140 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
141 {
142 	if (has_vhe()) {
143 		map->direct_vtimer = vcpu_vtimer(vcpu);
144 		map->direct_ptimer = vcpu_ptimer(vcpu);
145 		map->emul_ptimer = NULL;
146 	} else {
147 		map->direct_vtimer = vcpu_vtimer(vcpu);
148 		map->direct_ptimer = NULL;
149 		map->emul_ptimer = vcpu_ptimer(vcpu);
150 	}
151 
152 	trace_kvm_get_timer_map(vcpu->vcpu_id, map);
153 }
154 
155 static inline bool userspace_irqchip(struct kvm *kvm)
156 {
157 	return static_branch_unlikely(&userspace_irqchip_in_use) &&
158 		unlikely(!irqchip_in_kernel(kvm));
159 }
160 
161 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
162 {
163 	hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
164 		      HRTIMER_MODE_ABS_HARD);
165 }
166 
167 static void soft_timer_cancel(struct hrtimer *hrt)
168 {
169 	hrtimer_cancel(hrt);
170 }
171 
172 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
173 {
174 	struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
175 	struct arch_timer_context *ctx;
176 	struct timer_map map;
177 
178 	/*
179 	 * We may see a timer interrupt after vcpu_put() has been called which
180 	 * sets the CPU's vcpu pointer to NULL, because even though the timer
181 	 * has been disabled in timer_save_state(), the hardware interrupt
182 	 * signal may not have been retired from the interrupt controller yet.
183 	 */
184 	if (!vcpu)
185 		return IRQ_HANDLED;
186 
187 	get_timer_map(vcpu, &map);
188 
189 	if (irq == host_vtimer_irq)
190 		ctx = map.direct_vtimer;
191 	else
192 		ctx = map.direct_ptimer;
193 
194 	if (kvm_timer_should_fire(ctx))
195 		kvm_timer_update_irq(vcpu, true, ctx);
196 
197 	if (userspace_irqchip(vcpu->kvm) &&
198 	    !static_branch_unlikely(&has_gic_active_state))
199 		disable_percpu_irq(host_vtimer_irq);
200 
201 	return IRQ_HANDLED;
202 }
203 
204 static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
205 				     u64 val)
206 {
207 	u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
208 
209 	if (now < val) {
210 		u64 ns;
211 
212 		ns = cyclecounter_cyc2ns(timecounter->cc,
213 					 val - now,
214 					 timecounter->mask,
215 					 &timecounter->frac);
216 		return ns;
217 	}
218 
219 	return 0;
220 }
221 
222 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
223 {
224 	return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
225 }
226 
227 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
228 {
229 	WARN_ON(timer_ctx && timer_ctx->loaded);
230 	return timer_ctx &&
231 		((timer_get_ctl(timer_ctx) &
232 		  (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
233 }
234 
235 static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
236 {
237 	return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
238 		vcpu_get_flag(vcpu, IN_WFIT));
239 }
240 
241 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
242 {
243 	struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
244 	u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
245 
246 	return kvm_counter_compute_delta(ctx, val);
247 }
248 
249 /*
250  * Returns the earliest expiration time in ns among guest timers.
251  * Note that it will return 0 if none of timers can fire.
252  */
253 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
254 {
255 	u64 min_delta = ULLONG_MAX;
256 	int i;
257 
258 	for (i = 0; i < NR_KVM_TIMERS; i++) {
259 		struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
260 
261 		WARN(ctx->loaded, "timer %d loaded\n", i);
262 		if (kvm_timer_irq_can_fire(ctx))
263 			min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
264 	}
265 
266 	if (vcpu_has_wfit_active(vcpu))
267 		min_delta = min(min_delta, wfit_delay_ns(vcpu));
268 
269 	/* If none of timers can fire, then return 0 */
270 	if (min_delta == ULLONG_MAX)
271 		return 0;
272 
273 	return min_delta;
274 }
275 
276 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
277 {
278 	struct arch_timer_cpu *timer;
279 	struct kvm_vcpu *vcpu;
280 	u64 ns;
281 
282 	timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
283 	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
284 
285 	/*
286 	 * Check that the timer has really expired from the guest's
287 	 * PoV (NTP on the host may have forced it to expire
288 	 * early). If we should have slept longer, restart it.
289 	 */
290 	ns = kvm_timer_earliest_exp(vcpu);
291 	if (unlikely(ns)) {
292 		hrtimer_forward_now(hrt, ns_to_ktime(ns));
293 		return HRTIMER_RESTART;
294 	}
295 
296 	kvm_vcpu_wake_up(vcpu);
297 	return HRTIMER_NORESTART;
298 }
299 
300 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
301 {
302 	struct arch_timer_context *ctx;
303 	struct kvm_vcpu *vcpu;
304 	u64 ns;
305 
306 	ctx = container_of(hrt, struct arch_timer_context, hrtimer);
307 	vcpu = ctx->vcpu;
308 
309 	trace_kvm_timer_hrtimer_expire(ctx);
310 
311 	/*
312 	 * Check that the timer has really expired from the guest's
313 	 * PoV (NTP on the host may have forced it to expire
314 	 * early). If not ready, schedule for a later time.
315 	 */
316 	ns = kvm_timer_compute_delta(ctx);
317 	if (unlikely(ns)) {
318 		hrtimer_forward_now(hrt, ns_to_ktime(ns));
319 		return HRTIMER_RESTART;
320 	}
321 
322 	kvm_timer_update_irq(vcpu, true, ctx);
323 	return HRTIMER_NORESTART;
324 }
325 
326 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
327 {
328 	enum kvm_arch_timers index;
329 	u64 cval, now;
330 
331 	if (!timer_ctx)
332 		return false;
333 
334 	index = arch_timer_ctx_index(timer_ctx);
335 
336 	if (timer_ctx->loaded) {
337 		u32 cnt_ctl = 0;
338 
339 		switch (index) {
340 		case TIMER_VTIMER:
341 			cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
342 			break;
343 		case TIMER_PTIMER:
344 			cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
345 			break;
346 		case NR_KVM_TIMERS:
347 			/* GCC is braindead */
348 			cnt_ctl = 0;
349 			break;
350 		}
351 
352 		return  (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
353 		        (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
354 		       !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
355 	}
356 
357 	if (!kvm_timer_irq_can_fire(timer_ctx))
358 		return false;
359 
360 	cval = timer_get_cval(timer_ctx);
361 	now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
362 
363 	return cval <= now;
364 }
365 
366 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
367 {
368 	return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
369 }
370 
371 /*
372  * Reflect the timer output level into the kvm_run structure
373  */
374 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
375 {
376 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
377 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
378 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
379 
380 	/* Populate the device bitmap with the timer states */
381 	regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
382 				    KVM_ARM_DEV_EL1_PTIMER);
383 	if (kvm_timer_should_fire(vtimer))
384 		regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
385 	if (kvm_timer_should_fire(ptimer))
386 		regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
387 }
388 
389 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
390 				 struct arch_timer_context *timer_ctx)
391 {
392 	int ret;
393 
394 	timer_ctx->irq.level = new_level;
395 	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
396 				   timer_ctx->irq.level);
397 
398 	if (!userspace_irqchip(vcpu->kvm)) {
399 		ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
400 					  timer_ctx->irq.irq,
401 					  timer_ctx->irq.level,
402 					  timer_ctx);
403 		WARN_ON(ret);
404 	}
405 }
406 
407 /* Only called for a fully emulated timer */
408 static void timer_emulate(struct arch_timer_context *ctx)
409 {
410 	bool should_fire = kvm_timer_should_fire(ctx);
411 
412 	trace_kvm_timer_emulate(ctx, should_fire);
413 
414 	if (should_fire != ctx->irq.level) {
415 		kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
416 		return;
417 	}
418 
419 	/*
420 	 * If the timer can fire now, we don't need to have a soft timer
421 	 * scheduled for the future.  If the timer cannot fire at all,
422 	 * then we also don't need a soft timer.
423 	 */
424 	if (should_fire || !kvm_timer_irq_can_fire(ctx))
425 		return;
426 
427 	soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
428 }
429 
430 static void set_cntvoff(u64 cntvoff)
431 {
432 	kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
433 }
434 
435 static void timer_save_state(struct arch_timer_context *ctx)
436 {
437 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
438 	enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
439 	unsigned long flags;
440 
441 	if (!timer->enabled)
442 		return;
443 
444 	local_irq_save(flags);
445 
446 	if (!ctx->loaded)
447 		goto out;
448 
449 	switch (index) {
450 	case TIMER_VTIMER:
451 		timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
452 		timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
453 
454 		/* Disable the timer */
455 		write_sysreg_el0(0, SYS_CNTV_CTL);
456 		isb();
457 
458 		/*
459 		 * The kernel may decide to run userspace after
460 		 * calling vcpu_put, so we reset cntvoff to 0 to
461 		 * ensure a consistent read between user accesses to
462 		 * the virtual counter and kernel access to the
463 		 * physical counter of non-VHE case.
464 		 *
465 		 * For VHE, the virtual counter uses a fixed virtual
466 		 * offset of zero, so no need to zero CNTVOFF_EL2
467 		 * register, but this is actually useful when switching
468 		 * between EL1/vEL2 with NV.
469 		 *
470 		 * Do it unconditionally, as this is either unavoidable
471 		 * or dirt cheap.
472 		 */
473 		set_cntvoff(0);
474 		break;
475 	case TIMER_PTIMER:
476 		timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
477 		timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
478 
479 		/* Disable the timer */
480 		write_sysreg_el0(0, SYS_CNTP_CTL);
481 		isb();
482 
483 		break;
484 	case NR_KVM_TIMERS:
485 		BUG();
486 	}
487 
488 	trace_kvm_timer_save_state(ctx);
489 
490 	ctx->loaded = false;
491 out:
492 	local_irq_restore(flags);
493 }
494 
495 /*
496  * Schedule the background timer before calling kvm_vcpu_halt, so that this
497  * thread is removed from its waitqueue and made runnable when there's a timer
498  * interrupt to handle.
499  */
500 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
501 {
502 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
503 	struct timer_map map;
504 
505 	get_timer_map(vcpu, &map);
506 
507 	/*
508 	 * If no timers are capable of raising interrupts (disabled or
509 	 * masked), then there's no more work for us to do.
510 	 */
511 	if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
512 	    !kvm_timer_irq_can_fire(map.direct_ptimer) &&
513 	    !kvm_timer_irq_can_fire(map.emul_ptimer) &&
514 	    !vcpu_has_wfit_active(vcpu))
515 		return;
516 
517 	/*
518 	 * At least one guest time will expire. Schedule a background timer.
519 	 * Set the earliest expiration time among the guest timers.
520 	 */
521 	soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
522 }
523 
524 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
525 {
526 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
527 
528 	soft_timer_cancel(&timer->bg_timer);
529 }
530 
531 static void timer_restore_state(struct arch_timer_context *ctx)
532 {
533 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
534 	enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
535 	unsigned long flags;
536 
537 	if (!timer->enabled)
538 		return;
539 
540 	local_irq_save(flags);
541 
542 	if (ctx->loaded)
543 		goto out;
544 
545 	switch (index) {
546 	case TIMER_VTIMER:
547 		set_cntvoff(timer_get_offset(ctx));
548 		write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
549 		isb();
550 		write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
551 		break;
552 	case TIMER_PTIMER:
553 		write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
554 		isb();
555 		write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
556 		break;
557 	case NR_KVM_TIMERS:
558 		BUG();
559 	}
560 
561 	trace_kvm_timer_restore_state(ctx);
562 
563 	ctx->loaded = true;
564 out:
565 	local_irq_restore(flags);
566 }
567 
568 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
569 {
570 	int r;
571 	r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
572 	WARN_ON(r);
573 }
574 
575 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
576 {
577 	struct kvm_vcpu *vcpu = ctx->vcpu;
578 	bool phys_active = false;
579 
580 	/*
581 	 * Update the timer output so that it is likely to match the
582 	 * state we're about to restore. If the timer expires between
583 	 * this point and the register restoration, we'll take the
584 	 * interrupt anyway.
585 	 */
586 	kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
587 
588 	if (irqchip_in_kernel(vcpu->kvm))
589 		phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
590 
591 	phys_active |= ctx->irq.level;
592 
593 	set_timer_irq_phys_active(ctx, phys_active);
594 }
595 
596 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
597 {
598 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
599 
600 	/*
601 	 * Update the timer output so that it is likely to match the
602 	 * state we're about to restore. If the timer expires between
603 	 * this point and the register restoration, we'll take the
604 	 * interrupt anyway.
605 	 */
606 	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
607 
608 	/*
609 	 * When using a userspace irqchip with the architected timers and a
610 	 * host interrupt controller that doesn't support an active state, we
611 	 * must still prevent continuously exiting from the guest, and
612 	 * therefore mask the physical interrupt by disabling it on the host
613 	 * interrupt controller when the virtual level is high, such that the
614 	 * guest can make forward progress.  Once we detect the output level
615 	 * being de-asserted, we unmask the interrupt again so that we exit
616 	 * from the guest when the timer fires.
617 	 */
618 	if (vtimer->irq.level)
619 		disable_percpu_irq(host_vtimer_irq);
620 	else
621 		enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
622 }
623 
624 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
625 {
626 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
627 	struct timer_map map;
628 
629 	if (unlikely(!timer->enabled))
630 		return;
631 
632 	get_timer_map(vcpu, &map);
633 
634 	if (static_branch_likely(&has_gic_active_state)) {
635 		kvm_timer_vcpu_load_gic(map.direct_vtimer);
636 		if (map.direct_ptimer)
637 			kvm_timer_vcpu_load_gic(map.direct_ptimer);
638 	} else {
639 		kvm_timer_vcpu_load_nogic(vcpu);
640 	}
641 
642 	kvm_timer_unblocking(vcpu);
643 
644 	timer_restore_state(map.direct_vtimer);
645 	if (map.direct_ptimer)
646 		timer_restore_state(map.direct_ptimer);
647 
648 	if (map.emul_ptimer)
649 		timer_emulate(map.emul_ptimer);
650 }
651 
652 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
653 {
654 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
655 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
656 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
657 	bool vlevel, plevel;
658 
659 	if (likely(irqchip_in_kernel(vcpu->kvm)))
660 		return false;
661 
662 	vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
663 	plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
664 
665 	return kvm_timer_should_fire(vtimer) != vlevel ||
666 	       kvm_timer_should_fire(ptimer) != plevel;
667 }
668 
669 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
670 {
671 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
672 	struct timer_map map;
673 
674 	if (unlikely(!timer->enabled))
675 		return;
676 
677 	get_timer_map(vcpu, &map);
678 
679 	timer_save_state(map.direct_vtimer);
680 	if (map.direct_ptimer)
681 		timer_save_state(map.direct_ptimer);
682 
683 	/*
684 	 * Cancel soft timer emulation, because the only case where we
685 	 * need it after a vcpu_put is in the context of a sleeping VCPU, and
686 	 * in that case we already factor in the deadline for the physical
687 	 * timer when scheduling the bg_timer.
688 	 *
689 	 * In any case, we re-schedule the hrtimer for the physical timer when
690 	 * coming back to the VCPU thread in kvm_timer_vcpu_load().
691 	 */
692 	if (map.emul_ptimer)
693 		soft_timer_cancel(&map.emul_ptimer->hrtimer);
694 
695 	if (kvm_vcpu_is_blocking(vcpu))
696 		kvm_timer_blocking(vcpu);
697 }
698 
699 /*
700  * With a userspace irqchip we have to check if the guest de-asserted the
701  * timer and if so, unmask the timer irq signal on the host interrupt
702  * controller to ensure that we see future timer signals.
703  */
704 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
705 {
706 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
707 
708 	if (!kvm_timer_should_fire(vtimer)) {
709 		kvm_timer_update_irq(vcpu, false, vtimer);
710 		if (static_branch_likely(&has_gic_active_state))
711 			set_timer_irq_phys_active(vtimer, false);
712 		else
713 			enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
714 	}
715 }
716 
717 void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
718 {
719 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
720 
721 	if (unlikely(!timer->enabled))
722 		return;
723 
724 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
725 		unmask_vtimer_irq_user(vcpu);
726 }
727 
728 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
729 {
730 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
731 	struct timer_map map;
732 
733 	get_timer_map(vcpu, &map);
734 
735 	/*
736 	 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
737 	 * and to 0 for ARMv7.  We provide an implementation that always
738 	 * resets the timer to be disabled and unmasked and is compliant with
739 	 * the ARMv7 architecture.
740 	 */
741 	timer_set_ctl(vcpu_vtimer(vcpu), 0);
742 	timer_set_ctl(vcpu_ptimer(vcpu), 0);
743 
744 	if (timer->enabled) {
745 		kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
746 		kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
747 
748 		if (irqchip_in_kernel(vcpu->kvm)) {
749 			kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
750 			if (map.direct_ptimer)
751 				kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
752 		}
753 	}
754 
755 	if (map.emul_ptimer)
756 		soft_timer_cancel(&map.emul_ptimer->hrtimer);
757 
758 	return 0;
759 }
760 
761 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
762 {
763 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
764 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
765 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
766 
767 	vtimer->vcpu = vcpu;
768 	vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
769 	ptimer->vcpu = vcpu;
770 
771 	/* Synchronize cntvoff across all vtimers of a VM. */
772 	timer_set_offset(vtimer, kvm_phys_timer_read());
773 	timer_set_offset(ptimer, 0);
774 
775 	hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
776 	timer->bg_timer.function = kvm_bg_timer_expire;
777 
778 	hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
779 	hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
780 	vtimer->hrtimer.function = kvm_hrtimer_expire;
781 	ptimer->hrtimer.function = kvm_hrtimer_expire;
782 
783 	vtimer->irq.irq = default_vtimer_irq.irq;
784 	ptimer->irq.irq = default_ptimer_irq.irq;
785 
786 	vtimer->host_timer_irq = host_vtimer_irq;
787 	ptimer->host_timer_irq = host_ptimer_irq;
788 
789 	vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
790 	ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
791 }
792 
793 void kvm_timer_cpu_up(void)
794 {
795 	enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
796 	if (host_ptimer_irq)
797 		enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
798 }
799 
800 void kvm_timer_cpu_down(void)
801 {
802 	disable_percpu_irq(host_vtimer_irq);
803 	if (host_ptimer_irq)
804 		disable_percpu_irq(host_ptimer_irq);
805 }
806 
807 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
808 {
809 	struct arch_timer_context *timer;
810 
811 	switch (regid) {
812 	case KVM_REG_ARM_TIMER_CTL:
813 		timer = vcpu_vtimer(vcpu);
814 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
815 		break;
816 	case KVM_REG_ARM_TIMER_CNT:
817 		timer = vcpu_vtimer(vcpu);
818 		timer_set_offset(timer, kvm_phys_timer_read() - value);
819 		break;
820 	case KVM_REG_ARM_TIMER_CVAL:
821 		timer = vcpu_vtimer(vcpu);
822 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
823 		break;
824 	case KVM_REG_ARM_PTIMER_CTL:
825 		timer = vcpu_ptimer(vcpu);
826 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
827 		break;
828 	case KVM_REG_ARM_PTIMER_CVAL:
829 		timer = vcpu_ptimer(vcpu);
830 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
831 		break;
832 
833 	default:
834 		return -1;
835 	}
836 
837 	return 0;
838 }
839 
840 static u64 read_timer_ctl(struct arch_timer_context *timer)
841 {
842 	/*
843 	 * Set ISTATUS bit if it's expired.
844 	 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
845 	 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
846 	 * regardless of ENABLE bit for our implementation convenience.
847 	 */
848 	u32 ctl = timer_get_ctl(timer);
849 
850 	if (!kvm_timer_compute_delta(timer))
851 		ctl |= ARCH_TIMER_CTRL_IT_STAT;
852 
853 	return ctl;
854 }
855 
856 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
857 {
858 	switch (regid) {
859 	case KVM_REG_ARM_TIMER_CTL:
860 		return kvm_arm_timer_read(vcpu,
861 					  vcpu_vtimer(vcpu), TIMER_REG_CTL);
862 	case KVM_REG_ARM_TIMER_CNT:
863 		return kvm_arm_timer_read(vcpu,
864 					  vcpu_vtimer(vcpu), TIMER_REG_CNT);
865 	case KVM_REG_ARM_TIMER_CVAL:
866 		return kvm_arm_timer_read(vcpu,
867 					  vcpu_vtimer(vcpu), TIMER_REG_CVAL);
868 	case KVM_REG_ARM_PTIMER_CTL:
869 		return kvm_arm_timer_read(vcpu,
870 					  vcpu_ptimer(vcpu), TIMER_REG_CTL);
871 	case KVM_REG_ARM_PTIMER_CNT:
872 		return kvm_arm_timer_read(vcpu,
873 					  vcpu_ptimer(vcpu), TIMER_REG_CNT);
874 	case KVM_REG_ARM_PTIMER_CVAL:
875 		return kvm_arm_timer_read(vcpu,
876 					  vcpu_ptimer(vcpu), TIMER_REG_CVAL);
877 	}
878 	return (u64)-1;
879 }
880 
881 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
882 			      struct arch_timer_context *timer,
883 			      enum kvm_arch_timer_regs treg)
884 {
885 	u64 val;
886 
887 	switch (treg) {
888 	case TIMER_REG_TVAL:
889 		val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
890 		val = lower_32_bits(val);
891 		break;
892 
893 	case TIMER_REG_CTL:
894 		val = read_timer_ctl(timer);
895 		break;
896 
897 	case TIMER_REG_CVAL:
898 		val = timer_get_cval(timer);
899 		break;
900 
901 	case TIMER_REG_CNT:
902 		val = kvm_phys_timer_read() - timer_get_offset(timer);
903 		break;
904 
905 	default:
906 		BUG();
907 	}
908 
909 	return val;
910 }
911 
912 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
913 			      enum kvm_arch_timers tmr,
914 			      enum kvm_arch_timer_regs treg)
915 {
916 	struct arch_timer_context *timer;
917 	struct timer_map map;
918 	u64 val;
919 
920 	get_timer_map(vcpu, &map);
921 	timer = vcpu_get_timer(vcpu, tmr);
922 
923 	if (timer == map.emul_ptimer)
924 		return kvm_arm_timer_read(vcpu, timer, treg);
925 
926 	preempt_disable();
927 	timer_save_state(timer);
928 
929 	val = kvm_arm_timer_read(vcpu, timer, treg);
930 
931 	timer_restore_state(timer);
932 	preempt_enable();
933 
934 	return val;
935 }
936 
937 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
938 				struct arch_timer_context *timer,
939 				enum kvm_arch_timer_regs treg,
940 				u64 val)
941 {
942 	switch (treg) {
943 	case TIMER_REG_TVAL:
944 		timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
945 		break;
946 
947 	case TIMER_REG_CTL:
948 		timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
949 		break;
950 
951 	case TIMER_REG_CVAL:
952 		timer_set_cval(timer, val);
953 		break;
954 
955 	default:
956 		BUG();
957 	}
958 }
959 
960 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
961 				enum kvm_arch_timers tmr,
962 				enum kvm_arch_timer_regs treg,
963 				u64 val)
964 {
965 	struct arch_timer_context *timer;
966 	struct timer_map map;
967 
968 	get_timer_map(vcpu, &map);
969 	timer = vcpu_get_timer(vcpu, tmr);
970 	if (timer == map.emul_ptimer) {
971 		soft_timer_cancel(&timer->hrtimer);
972 		kvm_arm_timer_write(vcpu, timer, treg, val);
973 		timer_emulate(timer);
974 	} else {
975 		preempt_disable();
976 		timer_save_state(timer);
977 		kvm_arm_timer_write(vcpu, timer, treg, val);
978 		timer_restore_state(timer);
979 		preempt_enable();
980 	}
981 }
982 
983 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
984 {
985 	if (vcpu)
986 		irqd_set_forwarded_to_vcpu(d);
987 	else
988 		irqd_clr_forwarded_to_vcpu(d);
989 
990 	return 0;
991 }
992 
993 static int timer_irq_set_irqchip_state(struct irq_data *d,
994 				       enum irqchip_irq_state which, bool val)
995 {
996 	if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
997 		return irq_chip_set_parent_state(d, which, val);
998 
999 	if (val)
1000 		irq_chip_mask_parent(d);
1001 	else
1002 		irq_chip_unmask_parent(d);
1003 
1004 	return 0;
1005 }
1006 
1007 static void timer_irq_eoi(struct irq_data *d)
1008 {
1009 	if (!irqd_is_forwarded_to_vcpu(d))
1010 		irq_chip_eoi_parent(d);
1011 }
1012 
1013 static void timer_irq_ack(struct irq_data *d)
1014 {
1015 	d = d->parent_data;
1016 	if (d->chip->irq_ack)
1017 		d->chip->irq_ack(d);
1018 }
1019 
1020 static struct irq_chip timer_chip = {
1021 	.name			= "KVM",
1022 	.irq_ack		= timer_irq_ack,
1023 	.irq_mask		= irq_chip_mask_parent,
1024 	.irq_unmask		= irq_chip_unmask_parent,
1025 	.irq_eoi		= timer_irq_eoi,
1026 	.irq_set_type		= irq_chip_set_type_parent,
1027 	.irq_set_vcpu_affinity	= timer_irq_set_vcpu_affinity,
1028 	.irq_set_irqchip_state	= timer_irq_set_irqchip_state,
1029 };
1030 
1031 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1032 				  unsigned int nr_irqs, void *arg)
1033 {
1034 	irq_hw_number_t hwirq = (uintptr_t)arg;
1035 
1036 	return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1037 					     &timer_chip, NULL);
1038 }
1039 
1040 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1041 				  unsigned int nr_irqs)
1042 {
1043 }
1044 
1045 static const struct irq_domain_ops timer_domain_ops = {
1046 	.alloc	= timer_irq_domain_alloc,
1047 	.free	= timer_irq_domain_free,
1048 };
1049 
1050 static struct irq_ops arch_timer_irq_ops = {
1051 	.get_input_level = kvm_arch_timer_get_input_level,
1052 };
1053 
1054 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1055 {
1056 	*flags = irq_get_trigger_type(virq);
1057 	if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1058 		kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1059 			virq);
1060 		*flags = IRQF_TRIGGER_LOW;
1061 	}
1062 }
1063 
1064 static int kvm_irq_init(struct arch_timer_kvm_info *info)
1065 {
1066 	struct irq_domain *domain = NULL;
1067 
1068 	if (info->virtual_irq <= 0) {
1069 		kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1070 			info->virtual_irq);
1071 		return -ENODEV;
1072 	}
1073 
1074 	host_vtimer_irq = info->virtual_irq;
1075 	kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1076 
1077 	if (kvm_vgic_global_state.no_hw_deactivation) {
1078 		struct fwnode_handle *fwnode;
1079 		struct irq_data *data;
1080 
1081 		fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1082 		if (!fwnode)
1083 			return -ENOMEM;
1084 
1085 		/* Assume both vtimer and ptimer in the same parent */
1086 		data = irq_get_irq_data(host_vtimer_irq);
1087 		domain = irq_domain_create_hierarchy(data->domain, 0,
1088 						     NR_KVM_TIMERS, fwnode,
1089 						     &timer_domain_ops, NULL);
1090 		if (!domain) {
1091 			irq_domain_free_fwnode(fwnode);
1092 			return -ENOMEM;
1093 		}
1094 
1095 		arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1096 		WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1097 					    (void *)TIMER_VTIMER));
1098 	}
1099 
1100 	if (info->physical_irq > 0) {
1101 		host_ptimer_irq = info->physical_irq;
1102 		kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
1103 
1104 		if (domain)
1105 			WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1106 						    (void *)TIMER_PTIMER));
1107 	}
1108 
1109 	return 0;
1110 }
1111 
1112 int __init kvm_timer_hyp_init(bool has_gic)
1113 {
1114 	struct arch_timer_kvm_info *info;
1115 	int err;
1116 
1117 	info = arch_timer_get_kvm_info();
1118 	timecounter = &info->timecounter;
1119 
1120 	if (!timecounter->cc) {
1121 		kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1122 		return -ENODEV;
1123 	}
1124 
1125 	err = kvm_irq_init(info);
1126 	if (err)
1127 		return err;
1128 
1129 	/* First, do the virtual EL1 timer irq */
1130 
1131 	err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1132 				 "kvm guest vtimer", kvm_get_running_vcpus());
1133 	if (err) {
1134 		kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1135 			host_vtimer_irq, err);
1136 		return err;
1137 	}
1138 
1139 	if (has_gic) {
1140 		err = irq_set_vcpu_affinity(host_vtimer_irq,
1141 					    kvm_get_running_vcpus());
1142 		if (err) {
1143 			kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1144 			goto out_free_irq;
1145 		}
1146 
1147 		static_branch_enable(&has_gic_active_state);
1148 	}
1149 
1150 	kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1151 
1152 	/* Now let's do the physical EL1 timer irq */
1153 
1154 	if (info->physical_irq > 0) {
1155 		err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1156 					 "kvm guest ptimer", kvm_get_running_vcpus());
1157 		if (err) {
1158 			kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1159 				host_ptimer_irq, err);
1160 			return err;
1161 		}
1162 
1163 		if (has_gic) {
1164 			err = irq_set_vcpu_affinity(host_ptimer_irq,
1165 						    kvm_get_running_vcpus());
1166 			if (err) {
1167 				kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1168 				goto out_free_irq;
1169 			}
1170 		}
1171 
1172 		kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1173 	} else if (has_vhe()) {
1174 		kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1175 			info->physical_irq);
1176 		err = -ENODEV;
1177 		goto out_free_irq;
1178 	}
1179 
1180 	return 0;
1181 out_free_irq:
1182 	free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1183 	return err;
1184 }
1185 
1186 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1187 {
1188 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1189 
1190 	soft_timer_cancel(&timer->bg_timer);
1191 }
1192 
1193 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1194 {
1195 	int vtimer_irq, ptimer_irq, ret;
1196 	unsigned long i;
1197 
1198 	vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1199 	ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1200 	if (ret)
1201 		return false;
1202 
1203 	ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1204 	ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1205 	if (ret)
1206 		return false;
1207 
1208 	kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1209 		if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1210 		    vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1211 			return false;
1212 	}
1213 
1214 	return true;
1215 }
1216 
1217 bool kvm_arch_timer_get_input_level(int vintid)
1218 {
1219 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1220 	struct arch_timer_context *timer;
1221 
1222 	if (WARN(!vcpu, "No vcpu context!\n"))
1223 		return false;
1224 
1225 	if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1226 		timer = vcpu_vtimer(vcpu);
1227 	else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1228 		timer = vcpu_ptimer(vcpu);
1229 	else
1230 		BUG();
1231 
1232 	return kvm_timer_should_fire(timer);
1233 }
1234 
1235 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1236 {
1237 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1238 	struct timer_map map;
1239 	int ret;
1240 
1241 	if (timer->enabled)
1242 		return 0;
1243 
1244 	/* Without a VGIC we do not map virtual IRQs to physical IRQs */
1245 	if (!irqchip_in_kernel(vcpu->kvm))
1246 		goto no_vgic;
1247 
1248 	/*
1249 	 * At this stage, we have the guarantee that the vgic is both
1250 	 * available and initialized.
1251 	 */
1252 	if (!timer_irqs_are_valid(vcpu)) {
1253 		kvm_debug("incorrectly configured timer irqs\n");
1254 		return -EINVAL;
1255 	}
1256 
1257 	get_timer_map(vcpu, &map);
1258 
1259 	ret = kvm_vgic_map_phys_irq(vcpu,
1260 				    map.direct_vtimer->host_timer_irq,
1261 				    map.direct_vtimer->irq.irq,
1262 				    &arch_timer_irq_ops);
1263 	if (ret)
1264 		return ret;
1265 
1266 	if (map.direct_ptimer) {
1267 		ret = kvm_vgic_map_phys_irq(vcpu,
1268 					    map.direct_ptimer->host_timer_irq,
1269 					    map.direct_ptimer->irq.irq,
1270 					    &arch_timer_irq_ops);
1271 	}
1272 
1273 	if (ret)
1274 		return ret;
1275 
1276 no_vgic:
1277 	timer->enabled = 1;
1278 	return 0;
1279 }
1280 
1281 /*
1282  * On VHE system, we only need to configure the EL2 timer trap register once,
1283  * not for every world switch.
1284  * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1285  * and this makes those bits have no effect for the host kernel execution.
1286  */
1287 void kvm_timer_init_vhe(void)
1288 {
1289 	/* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1290 	u32 cnthctl_shift = 10;
1291 	u64 val;
1292 
1293 	/*
1294 	 * VHE systems allow the guest direct access to the EL1 physical
1295 	 * timer/counter.
1296 	 */
1297 	val = read_sysreg(cnthctl_el2);
1298 	val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1299 	val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1300 	write_sysreg(val, cnthctl_el2);
1301 }
1302 
1303 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1304 {
1305 	struct kvm_vcpu *vcpu;
1306 	unsigned long i;
1307 
1308 	kvm_for_each_vcpu(i, vcpu, kvm) {
1309 		vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1310 		vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1311 	}
1312 }
1313 
1314 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1315 {
1316 	int __user *uaddr = (int __user *)(long)attr->addr;
1317 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1318 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1319 	int irq;
1320 
1321 	if (!irqchip_in_kernel(vcpu->kvm))
1322 		return -EINVAL;
1323 
1324 	if (get_user(irq, uaddr))
1325 		return -EFAULT;
1326 
1327 	if (!(irq_is_ppi(irq)))
1328 		return -EINVAL;
1329 
1330 	if (vcpu->arch.timer_cpu.enabled)
1331 		return -EBUSY;
1332 
1333 	switch (attr->attr) {
1334 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1335 		set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1336 		break;
1337 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1338 		set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1339 		break;
1340 	default:
1341 		return -ENXIO;
1342 	}
1343 
1344 	return 0;
1345 }
1346 
1347 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1348 {
1349 	int __user *uaddr = (int __user *)(long)attr->addr;
1350 	struct arch_timer_context *timer;
1351 	int irq;
1352 
1353 	switch (attr->attr) {
1354 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1355 		timer = vcpu_vtimer(vcpu);
1356 		break;
1357 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1358 		timer = vcpu_ptimer(vcpu);
1359 		break;
1360 	default:
1361 		return -ENXIO;
1362 	}
1363 
1364 	irq = timer->irq.irq;
1365 	return put_user(irq, uaddr);
1366 }
1367 
1368 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1369 {
1370 	switch (attr->attr) {
1371 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1372 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1373 		return 0;
1374 	}
1375 
1376 	return -ENXIO;
1377 }
1378