xref: /linux/arch/arm64/kvm/arch_timer.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
14 
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19 #include <asm/kvm_nested.h>
20 
21 #include <kvm/arm_vgic.h>
22 #include <kvm/arm_arch_timer.h>
23 
24 #include "trace.h"
25 
26 static struct timecounter *timecounter;
27 static unsigned int host_vtimer_irq;
28 static unsigned int host_ptimer_irq;
29 static u32 host_vtimer_irq_flags;
30 static u32 host_ptimer_irq_flags;
31 
32 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
33 
34 static const u8 default_ppi[] = {
35 	[TIMER_PTIMER]  = 30,
36 	[TIMER_VTIMER]  = 27,
37 	[TIMER_HPTIMER] = 26,
38 	[TIMER_HVTIMER] = 28,
39 };
40 
41 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
42 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
43 				 struct arch_timer_context *timer_ctx);
44 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
45 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
46 				struct arch_timer_context *timer,
47 				enum kvm_arch_timer_regs treg,
48 				u64 val);
49 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
50 			      struct arch_timer_context *timer,
51 			      enum kvm_arch_timer_regs treg);
52 static bool kvm_arch_timer_get_input_level(int vintid);
53 
54 static struct irq_ops arch_timer_irq_ops = {
55 	.get_input_level = kvm_arch_timer_get_input_level,
56 };
57 
58 static int nr_timers(struct kvm_vcpu *vcpu)
59 {
60 	if (!vcpu_has_nv(vcpu))
61 		return NR_KVM_EL0_TIMERS;
62 
63 	return NR_KVM_TIMERS;
64 }
65 
66 u32 timer_get_ctl(struct arch_timer_context *ctxt)
67 {
68 	struct kvm_vcpu *vcpu = ctxt->vcpu;
69 
70 	switch(arch_timer_ctx_index(ctxt)) {
71 	case TIMER_VTIMER:
72 		return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
73 	case TIMER_PTIMER:
74 		return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
75 	case TIMER_HVTIMER:
76 		return __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2);
77 	case TIMER_HPTIMER:
78 		return __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2);
79 	default:
80 		WARN_ON(1);
81 		return 0;
82 	}
83 }
84 
85 u64 timer_get_cval(struct arch_timer_context *ctxt)
86 {
87 	struct kvm_vcpu *vcpu = ctxt->vcpu;
88 
89 	switch(arch_timer_ctx_index(ctxt)) {
90 	case TIMER_VTIMER:
91 		return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
92 	case TIMER_PTIMER:
93 		return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
94 	case TIMER_HVTIMER:
95 		return __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2);
96 	case TIMER_HPTIMER:
97 		return __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
98 	default:
99 		WARN_ON(1);
100 		return 0;
101 	}
102 }
103 
104 static u64 timer_get_offset(struct arch_timer_context *ctxt)
105 {
106 	u64 offset = 0;
107 
108 	if (!ctxt)
109 		return 0;
110 
111 	if (ctxt->offset.vm_offset)
112 		offset += *ctxt->offset.vm_offset;
113 	if (ctxt->offset.vcpu_offset)
114 		offset += *ctxt->offset.vcpu_offset;
115 
116 	return offset;
117 }
118 
119 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
120 {
121 	struct kvm_vcpu *vcpu = ctxt->vcpu;
122 
123 	switch(arch_timer_ctx_index(ctxt)) {
124 	case TIMER_VTIMER:
125 		__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
126 		break;
127 	case TIMER_PTIMER:
128 		__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
129 		break;
130 	case TIMER_HVTIMER:
131 		__vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
132 		break;
133 	case TIMER_HPTIMER:
134 		__vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
135 		break;
136 	default:
137 		WARN_ON(1);
138 	}
139 }
140 
141 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
142 {
143 	struct kvm_vcpu *vcpu = ctxt->vcpu;
144 
145 	switch(arch_timer_ctx_index(ctxt)) {
146 	case TIMER_VTIMER:
147 		__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
148 		break;
149 	case TIMER_PTIMER:
150 		__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
151 		break;
152 	case TIMER_HVTIMER:
153 		__vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
154 		break;
155 	case TIMER_HPTIMER:
156 		__vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
157 		break;
158 	default:
159 		WARN_ON(1);
160 	}
161 }
162 
163 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
164 {
165 	if (!ctxt->offset.vm_offset) {
166 		WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
167 		return;
168 	}
169 
170 	WRITE_ONCE(*ctxt->offset.vm_offset, offset);
171 }
172 
173 u64 kvm_phys_timer_read(void)
174 {
175 	return timecounter->cc->read(timecounter->cc);
176 }
177 
178 void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
179 {
180 	if (vcpu_has_nv(vcpu)) {
181 		if (is_hyp_ctxt(vcpu)) {
182 			map->direct_vtimer = vcpu_hvtimer(vcpu);
183 			map->direct_ptimer = vcpu_hptimer(vcpu);
184 			map->emul_vtimer = vcpu_vtimer(vcpu);
185 			map->emul_ptimer = vcpu_ptimer(vcpu);
186 		} else {
187 			map->direct_vtimer = vcpu_vtimer(vcpu);
188 			map->direct_ptimer = vcpu_ptimer(vcpu);
189 			map->emul_vtimer = vcpu_hvtimer(vcpu);
190 			map->emul_ptimer = vcpu_hptimer(vcpu);
191 		}
192 	} else if (has_vhe()) {
193 		map->direct_vtimer = vcpu_vtimer(vcpu);
194 		map->direct_ptimer = vcpu_ptimer(vcpu);
195 		map->emul_vtimer = NULL;
196 		map->emul_ptimer = NULL;
197 	} else {
198 		map->direct_vtimer = vcpu_vtimer(vcpu);
199 		map->direct_ptimer = NULL;
200 		map->emul_vtimer = NULL;
201 		map->emul_ptimer = vcpu_ptimer(vcpu);
202 	}
203 
204 	trace_kvm_get_timer_map(vcpu->vcpu_id, map);
205 }
206 
207 static inline bool userspace_irqchip(struct kvm *kvm)
208 {
209 	return static_branch_unlikely(&userspace_irqchip_in_use) &&
210 		unlikely(!irqchip_in_kernel(kvm));
211 }
212 
213 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
214 {
215 	hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
216 		      HRTIMER_MODE_ABS_HARD);
217 }
218 
219 static void soft_timer_cancel(struct hrtimer *hrt)
220 {
221 	hrtimer_cancel(hrt);
222 }
223 
224 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
225 {
226 	struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
227 	struct arch_timer_context *ctx;
228 	struct timer_map map;
229 
230 	/*
231 	 * We may see a timer interrupt after vcpu_put() has been called which
232 	 * sets the CPU's vcpu pointer to NULL, because even though the timer
233 	 * has been disabled in timer_save_state(), the hardware interrupt
234 	 * signal may not have been retired from the interrupt controller yet.
235 	 */
236 	if (!vcpu)
237 		return IRQ_HANDLED;
238 
239 	get_timer_map(vcpu, &map);
240 
241 	if (irq == host_vtimer_irq)
242 		ctx = map.direct_vtimer;
243 	else
244 		ctx = map.direct_ptimer;
245 
246 	if (kvm_timer_should_fire(ctx))
247 		kvm_timer_update_irq(vcpu, true, ctx);
248 
249 	if (userspace_irqchip(vcpu->kvm) &&
250 	    !static_branch_unlikely(&has_gic_active_state))
251 		disable_percpu_irq(host_vtimer_irq);
252 
253 	return IRQ_HANDLED;
254 }
255 
256 static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
257 				     u64 val)
258 {
259 	u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
260 
261 	if (now < val) {
262 		u64 ns;
263 
264 		ns = cyclecounter_cyc2ns(timecounter->cc,
265 					 val - now,
266 					 timecounter->mask,
267 					 &timer_ctx->ns_frac);
268 		return ns;
269 	}
270 
271 	return 0;
272 }
273 
274 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
275 {
276 	return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
277 }
278 
279 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
280 {
281 	WARN_ON(timer_ctx && timer_ctx->loaded);
282 	return timer_ctx &&
283 		((timer_get_ctl(timer_ctx) &
284 		  (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
285 }
286 
287 static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
288 {
289 	return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
290 		vcpu_get_flag(vcpu, IN_WFIT));
291 }
292 
293 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
294 {
295 	u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
296 	struct arch_timer_context *ctx;
297 
298 	ctx = (vcpu_has_nv(vcpu) && is_hyp_ctxt(vcpu)) ? vcpu_hvtimer(vcpu)
299 						       : vcpu_vtimer(vcpu);
300 
301 	return kvm_counter_compute_delta(ctx, val);
302 }
303 
304 /*
305  * Returns the earliest expiration time in ns among guest timers.
306  * Note that it will return 0 if none of timers can fire.
307  */
308 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
309 {
310 	u64 min_delta = ULLONG_MAX;
311 	int i;
312 
313 	for (i = 0; i < nr_timers(vcpu); i++) {
314 		struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
315 
316 		WARN(ctx->loaded, "timer %d loaded\n", i);
317 		if (kvm_timer_irq_can_fire(ctx))
318 			min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
319 	}
320 
321 	if (vcpu_has_wfit_active(vcpu))
322 		min_delta = min(min_delta, wfit_delay_ns(vcpu));
323 
324 	/* If none of timers can fire, then return 0 */
325 	if (min_delta == ULLONG_MAX)
326 		return 0;
327 
328 	return min_delta;
329 }
330 
331 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
332 {
333 	struct arch_timer_cpu *timer;
334 	struct kvm_vcpu *vcpu;
335 	u64 ns;
336 
337 	timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
338 	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
339 
340 	/*
341 	 * Check that the timer has really expired from the guest's
342 	 * PoV (NTP on the host may have forced it to expire
343 	 * early). If we should have slept longer, restart it.
344 	 */
345 	ns = kvm_timer_earliest_exp(vcpu);
346 	if (unlikely(ns)) {
347 		hrtimer_forward_now(hrt, ns_to_ktime(ns));
348 		return HRTIMER_RESTART;
349 	}
350 
351 	kvm_vcpu_wake_up(vcpu);
352 	return HRTIMER_NORESTART;
353 }
354 
355 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
356 {
357 	struct arch_timer_context *ctx;
358 	struct kvm_vcpu *vcpu;
359 	u64 ns;
360 
361 	ctx = container_of(hrt, struct arch_timer_context, hrtimer);
362 	vcpu = ctx->vcpu;
363 
364 	trace_kvm_timer_hrtimer_expire(ctx);
365 
366 	/*
367 	 * Check that the timer has really expired from the guest's
368 	 * PoV (NTP on the host may have forced it to expire
369 	 * early). If not ready, schedule for a later time.
370 	 */
371 	ns = kvm_timer_compute_delta(ctx);
372 	if (unlikely(ns)) {
373 		hrtimer_forward_now(hrt, ns_to_ktime(ns));
374 		return HRTIMER_RESTART;
375 	}
376 
377 	kvm_timer_update_irq(vcpu, true, ctx);
378 	return HRTIMER_NORESTART;
379 }
380 
381 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
382 {
383 	enum kvm_arch_timers index;
384 	u64 cval, now;
385 
386 	if (!timer_ctx)
387 		return false;
388 
389 	index = arch_timer_ctx_index(timer_ctx);
390 
391 	if (timer_ctx->loaded) {
392 		u32 cnt_ctl = 0;
393 
394 		switch (index) {
395 		case TIMER_VTIMER:
396 		case TIMER_HVTIMER:
397 			cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
398 			break;
399 		case TIMER_PTIMER:
400 		case TIMER_HPTIMER:
401 			cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
402 			break;
403 		case NR_KVM_TIMERS:
404 			/* GCC is braindead */
405 			cnt_ctl = 0;
406 			break;
407 		}
408 
409 		return  (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
410 		        (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
411 		       !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
412 	}
413 
414 	if (!kvm_timer_irq_can_fire(timer_ctx))
415 		return false;
416 
417 	cval = timer_get_cval(timer_ctx);
418 	now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
419 
420 	return cval <= now;
421 }
422 
423 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
424 {
425 	return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
426 }
427 
428 /*
429  * Reflect the timer output level into the kvm_run structure
430  */
431 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
432 {
433 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
434 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
435 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
436 
437 	/* Populate the device bitmap with the timer states */
438 	regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
439 				    KVM_ARM_DEV_EL1_PTIMER);
440 	if (kvm_timer_should_fire(vtimer))
441 		regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
442 	if (kvm_timer_should_fire(ptimer))
443 		regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
444 }
445 
446 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
447 				 struct arch_timer_context *timer_ctx)
448 {
449 	int ret;
450 
451 	timer_ctx->irq.level = new_level;
452 	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
453 				   timer_ctx->irq.level);
454 
455 	if (!userspace_irqchip(vcpu->kvm)) {
456 		ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
457 					  timer_irq(timer_ctx),
458 					  timer_ctx->irq.level,
459 					  timer_ctx);
460 		WARN_ON(ret);
461 	}
462 }
463 
464 /* Only called for a fully emulated timer */
465 static void timer_emulate(struct arch_timer_context *ctx)
466 {
467 	bool should_fire = kvm_timer_should_fire(ctx);
468 
469 	trace_kvm_timer_emulate(ctx, should_fire);
470 
471 	if (should_fire != ctx->irq.level) {
472 		kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
473 		return;
474 	}
475 
476 	/*
477 	 * If the timer can fire now, we don't need to have a soft timer
478 	 * scheduled for the future.  If the timer cannot fire at all,
479 	 * then we also don't need a soft timer.
480 	 */
481 	if (should_fire || !kvm_timer_irq_can_fire(ctx))
482 		return;
483 
484 	soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
485 }
486 
487 static void set_cntvoff(u64 cntvoff)
488 {
489 	kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
490 }
491 
492 static void set_cntpoff(u64 cntpoff)
493 {
494 	if (has_cntpoff())
495 		write_sysreg_s(cntpoff, SYS_CNTPOFF_EL2);
496 }
497 
498 static void timer_save_state(struct arch_timer_context *ctx)
499 {
500 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
501 	enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
502 	unsigned long flags;
503 
504 	if (!timer->enabled)
505 		return;
506 
507 	local_irq_save(flags);
508 
509 	if (!ctx->loaded)
510 		goto out;
511 
512 	switch (index) {
513 		u64 cval;
514 
515 	case TIMER_VTIMER:
516 	case TIMER_HVTIMER:
517 		timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
518 		timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
519 
520 		/* Disable the timer */
521 		write_sysreg_el0(0, SYS_CNTV_CTL);
522 		isb();
523 
524 		/*
525 		 * The kernel may decide to run userspace after
526 		 * calling vcpu_put, so we reset cntvoff to 0 to
527 		 * ensure a consistent read between user accesses to
528 		 * the virtual counter and kernel access to the
529 		 * physical counter of non-VHE case.
530 		 *
531 		 * For VHE, the virtual counter uses a fixed virtual
532 		 * offset of zero, so no need to zero CNTVOFF_EL2
533 		 * register, but this is actually useful when switching
534 		 * between EL1/vEL2 with NV.
535 		 *
536 		 * Do it unconditionally, as this is either unavoidable
537 		 * or dirt cheap.
538 		 */
539 		set_cntvoff(0);
540 		break;
541 	case TIMER_PTIMER:
542 	case TIMER_HPTIMER:
543 		timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
544 		cval = read_sysreg_el0(SYS_CNTP_CVAL);
545 
546 		cval -= timer_get_offset(ctx);
547 
548 		timer_set_cval(ctx, cval);
549 
550 		/* Disable the timer */
551 		write_sysreg_el0(0, SYS_CNTP_CTL);
552 		isb();
553 
554 		set_cntpoff(0);
555 		break;
556 	case NR_KVM_TIMERS:
557 		BUG();
558 	}
559 
560 	trace_kvm_timer_save_state(ctx);
561 
562 	ctx->loaded = false;
563 out:
564 	local_irq_restore(flags);
565 }
566 
567 /*
568  * Schedule the background timer before calling kvm_vcpu_halt, so that this
569  * thread is removed from its waitqueue and made runnable when there's a timer
570  * interrupt to handle.
571  */
572 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
573 {
574 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
575 	struct timer_map map;
576 
577 	get_timer_map(vcpu, &map);
578 
579 	/*
580 	 * If no timers are capable of raising interrupts (disabled or
581 	 * masked), then there's no more work for us to do.
582 	 */
583 	if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
584 	    !kvm_timer_irq_can_fire(map.direct_ptimer) &&
585 	    !kvm_timer_irq_can_fire(map.emul_vtimer) &&
586 	    !kvm_timer_irq_can_fire(map.emul_ptimer) &&
587 	    !vcpu_has_wfit_active(vcpu))
588 		return;
589 
590 	/*
591 	 * At least one guest time will expire. Schedule a background timer.
592 	 * Set the earliest expiration time among the guest timers.
593 	 */
594 	soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
595 }
596 
597 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
598 {
599 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
600 
601 	soft_timer_cancel(&timer->bg_timer);
602 }
603 
604 static void timer_restore_state(struct arch_timer_context *ctx)
605 {
606 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
607 	enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
608 	unsigned long flags;
609 
610 	if (!timer->enabled)
611 		return;
612 
613 	local_irq_save(flags);
614 
615 	if (ctx->loaded)
616 		goto out;
617 
618 	switch (index) {
619 		u64 cval, offset;
620 
621 	case TIMER_VTIMER:
622 	case TIMER_HVTIMER:
623 		set_cntvoff(timer_get_offset(ctx));
624 		write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
625 		isb();
626 		write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
627 		break;
628 	case TIMER_PTIMER:
629 	case TIMER_HPTIMER:
630 		cval = timer_get_cval(ctx);
631 		offset = timer_get_offset(ctx);
632 		set_cntpoff(offset);
633 		cval += offset;
634 		write_sysreg_el0(cval, SYS_CNTP_CVAL);
635 		isb();
636 		write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
637 		break;
638 	case NR_KVM_TIMERS:
639 		BUG();
640 	}
641 
642 	trace_kvm_timer_restore_state(ctx);
643 
644 	ctx->loaded = true;
645 out:
646 	local_irq_restore(flags);
647 }
648 
649 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
650 {
651 	int r;
652 	r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
653 	WARN_ON(r);
654 }
655 
656 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
657 {
658 	struct kvm_vcpu *vcpu = ctx->vcpu;
659 	bool phys_active = false;
660 
661 	/*
662 	 * Update the timer output so that it is likely to match the
663 	 * state we're about to restore. If the timer expires between
664 	 * this point and the register restoration, we'll take the
665 	 * interrupt anyway.
666 	 */
667 	kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
668 
669 	if (irqchip_in_kernel(vcpu->kvm))
670 		phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
671 
672 	phys_active |= ctx->irq.level;
673 
674 	set_timer_irq_phys_active(ctx, phys_active);
675 }
676 
677 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
678 {
679 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
680 
681 	/*
682 	 * Update the timer output so that it is likely to match the
683 	 * state we're about to restore. If the timer expires between
684 	 * this point and the register restoration, we'll take the
685 	 * interrupt anyway.
686 	 */
687 	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
688 
689 	/*
690 	 * When using a userspace irqchip with the architected timers and a
691 	 * host interrupt controller that doesn't support an active state, we
692 	 * must still prevent continuously exiting from the guest, and
693 	 * therefore mask the physical interrupt by disabling it on the host
694 	 * interrupt controller when the virtual level is high, such that the
695 	 * guest can make forward progress.  Once we detect the output level
696 	 * being de-asserted, we unmask the interrupt again so that we exit
697 	 * from the guest when the timer fires.
698 	 */
699 	if (vtimer->irq.level)
700 		disable_percpu_irq(host_vtimer_irq);
701 	else
702 		enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
703 }
704 
705 /* If _pred is true, set bit in _set, otherwise set it in _clr */
706 #define assign_clear_set_bit(_pred, _bit, _clr, _set)			\
707 	do {								\
708 		if (_pred)						\
709 			(_set) |= (_bit);				\
710 		else							\
711 			(_clr) |= (_bit);				\
712 	} while (0)
713 
714 static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
715 					      struct timer_map *map)
716 {
717 	int hw, ret;
718 
719 	if (!irqchip_in_kernel(vcpu->kvm))
720 		return;
721 
722 	/*
723 	 * We only ever unmap the vtimer irq on a VHE system that runs nested
724 	 * virtualization, in which case we have both a valid emul_vtimer,
725 	 * emul_ptimer, direct_vtimer, and direct_ptimer.
726 	 *
727 	 * Since this is called from kvm_timer_vcpu_load(), a change between
728 	 * vEL2 and vEL1/0 will have just happened, and the timer_map will
729 	 * represent this, and therefore we switch the emul/direct mappings
730 	 * below.
731 	 */
732 	hw = kvm_vgic_get_map(vcpu, timer_irq(map->direct_vtimer));
733 	if (hw < 0) {
734 		kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_vtimer));
735 		kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_ptimer));
736 
737 		ret = kvm_vgic_map_phys_irq(vcpu,
738 					    map->direct_vtimer->host_timer_irq,
739 					    timer_irq(map->direct_vtimer),
740 					    &arch_timer_irq_ops);
741 		WARN_ON_ONCE(ret);
742 		ret = kvm_vgic_map_phys_irq(vcpu,
743 					    map->direct_ptimer->host_timer_irq,
744 					    timer_irq(map->direct_ptimer),
745 					    &arch_timer_irq_ops);
746 		WARN_ON_ONCE(ret);
747 
748 		/*
749 		 * The virtual offset behaviour is "interresting", as it
750 		 * always applies when HCR_EL2.E2H==0, but only when
751 		 * accessed from EL1 when HCR_EL2.E2H==1. So make sure we
752 		 * track E2H when putting the HV timer in "direct" mode.
753 		 */
754 		if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
755 			struct arch_timer_offset *offs = &map->direct_vtimer->offset;
756 
757 			if (vcpu_el2_e2h_is_set(vcpu))
758 				offs->vcpu_offset = NULL;
759 			else
760 				offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
761 		}
762 	}
763 }
764 
765 static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
766 {
767 	bool tpt, tpc;
768 	u64 clr, set;
769 
770 	/*
771 	 * No trapping gets configured here with nVHE. See
772 	 * __timer_enable_traps(), which is where the stuff happens.
773 	 */
774 	if (!has_vhe())
775 		return;
776 
777 	/*
778 	 * Our default policy is not to trap anything. As we progress
779 	 * within this function, reality kicks in and we start adding
780 	 * traps based on emulation requirements.
781 	 */
782 	tpt = tpc = false;
783 
784 	/*
785 	 * We have two possibility to deal with a physical offset:
786 	 *
787 	 * - Either we have CNTPOFF (yay!) or the offset is 0:
788 	 *   we let the guest freely access the HW
789 	 *
790 	 * - or neither of these condition apply:
791 	 *   we trap accesses to the HW, but still use it
792 	 *   after correcting the physical offset
793 	 */
794 	if (!has_cntpoff() && timer_get_offset(map->direct_ptimer))
795 		tpt = tpc = true;
796 
797 	/*
798 	 * Apply the enable bits that the guest hypervisor has requested for
799 	 * its own guest. We can only add traps that wouldn't have been set
800 	 * above.
801 	 */
802 	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
803 		u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
804 
805 		/* Use the VHE format for mental sanity */
806 		if (!vcpu_el2_e2h_is_set(vcpu))
807 			val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10;
808 
809 		tpt |= !(val & (CNTHCTL_EL1PCEN << 10));
810 		tpc |= !(val & (CNTHCTL_EL1PCTEN << 10));
811 	}
812 
813 	/*
814 	 * Now that we have collected our requirements, compute the
815 	 * trap and enable bits.
816 	 */
817 	set = 0;
818 	clr = 0;
819 
820 	assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr);
821 	assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr);
822 
823 	/* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */
824 	sysreg_clear_set(cnthctl_el2, clr, set);
825 }
826 
827 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
828 {
829 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
830 	struct timer_map map;
831 
832 	if (unlikely(!timer->enabled))
833 		return;
834 
835 	get_timer_map(vcpu, &map);
836 
837 	if (static_branch_likely(&has_gic_active_state)) {
838 		if (vcpu_has_nv(vcpu))
839 			kvm_timer_vcpu_load_nested_switch(vcpu, &map);
840 
841 		kvm_timer_vcpu_load_gic(map.direct_vtimer);
842 		if (map.direct_ptimer)
843 			kvm_timer_vcpu_load_gic(map.direct_ptimer);
844 	} else {
845 		kvm_timer_vcpu_load_nogic(vcpu);
846 	}
847 
848 	kvm_timer_unblocking(vcpu);
849 
850 	timer_restore_state(map.direct_vtimer);
851 	if (map.direct_ptimer)
852 		timer_restore_state(map.direct_ptimer);
853 	if (map.emul_vtimer)
854 		timer_emulate(map.emul_vtimer);
855 	if (map.emul_ptimer)
856 		timer_emulate(map.emul_ptimer);
857 
858 	timer_set_traps(vcpu, &map);
859 }
860 
861 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
862 {
863 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
864 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
865 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
866 	bool vlevel, plevel;
867 
868 	if (likely(irqchip_in_kernel(vcpu->kvm)))
869 		return false;
870 
871 	vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
872 	plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
873 
874 	return kvm_timer_should_fire(vtimer) != vlevel ||
875 	       kvm_timer_should_fire(ptimer) != plevel;
876 }
877 
878 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
879 {
880 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
881 	struct timer_map map;
882 
883 	if (unlikely(!timer->enabled))
884 		return;
885 
886 	get_timer_map(vcpu, &map);
887 
888 	timer_save_state(map.direct_vtimer);
889 	if (map.direct_ptimer)
890 		timer_save_state(map.direct_ptimer);
891 
892 	/*
893 	 * Cancel soft timer emulation, because the only case where we
894 	 * need it after a vcpu_put is in the context of a sleeping VCPU, and
895 	 * in that case we already factor in the deadline for the physical
896 	 * timer when scheduling the bg_timer.
897 	 *
898 	 * In any case, we re-schedule the hrtimer for the physical timer when
899 	 * coming back to the VCPU thread in kvm_timer_vcpu_load().
900 	 */
901 	if (map.emul_vtimer)
902 		soft_timer_cancel(&map.emul_vtimer->hrtimer);
903 	if (map.emul_ptimer)
904 		soft_timer_cancel(&map.emul_ptimer->hrtimer);
905 
906 	if (kvm_vcpu_is_blocking(vcpu))
907 		kvm_timer_blocking(vcpu);
908 }
909 
910 /*
911  * With a userspace irqchip we have to check if the guest de-asserted the
912  * timer and if so, unmask the timer irq signal on the host interrupt
913  * controller to ensure that we see future timer signals.
914  */
915 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
916 {
917 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
918 
919 	if (!kvm_timer_should_fire(vtimer)) {
920 		kvm_timer_update_irq(vcpu, false, vtimer);
921 		if (static_branch_likely(&has_gic_active_state))
922 			set_timer_irq_phys_active(vtimer, false);
923 		else
924 			enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
925 	}
926 }
927 
928 void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
929 {
930 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
931 
932 	if (unlikely(!timer->enabled))
933 		return;
934 
935 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
936 		unmask_vtimer_irq_user(vcpu);
937 }
938 
939 void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
940 {
941 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
942 	struct timer_map map;
943 
944 	get_timer_map(vcpu, &map);
945 
946 	/*
947 	 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
948 	 * and to 0 for ARMv7.  We provide an implementation that always
949 	 * resets the timer to be disabled and unmasked and is compliant with
950 	 * the ARMv7 architecture.
951 	 */
952 	for (int i = 0; i < nr_timers(vcpu); i++)
953 		timer_set_ctl(vcpu_get_timer(vcpu, i), 0);
954 
955 	/*
956 	 * A vcpu running at EL2 is in charge of the offset applied to
957 	 * the virtual timer, so use the physical VM offset, and point
958 	 * the vcpu offset to CNTVOFF_EL2.
959 	 */
960 	if (vcpu_has_nv(vcpu)) {
961 		struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
962 
963 		offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
964 		offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
965 	}
966 
967 	if (timer->enabled) {
968 		for (int i = 0; i < nr_timers(vcpu); i++)
969 			kvm_timer_update_irq(vcpu, false,
970 					     vcpu_get_timer(vcpu, i));
971 
972 		if (irqchip_in_kernel(vcpu->kvm)) {
973 			kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_vtimer));
974 			if (map.direct_ptimer)
975 				kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_ptimer));
976 		}
977 	}
978 
979 	if (map.emul_vtimer)
980 		soft_timer_cancel(&map.emul_vtimer->hrtimer);
981 	if (map.emul_ptimer)
982 		soft_timer_cancel(&map.emul_ptimer->hrtimer);
983 }
984 
985 static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
986 {
987 	struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid);
988 	struct kvm *kvm = vcpu->kvm;
989 
990 	ctxt->vcpu = vcpu;
991 
992 	if (timerid == TIMER_VTIMER)
993 		ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
994 	else
995 		ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
996 
997 	hrtimer_init(&ctxt->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
998 	ctxt->hrtimer.function = kvm_hrtimer_expire;
999 
1000 	switch (timerid) {
1001 	case TIMER_PTIMER:
1002 	case TIMER_HPTIMER:
1003 		ctxt->host_timer_irq = host_ptimer_irq;
1004 		break;
1005 	case TIMER_VTIMER:
1006 	case TIMER_HVTIMER:
1007 		ctxt->host_timer_irq = host_vtimer_irq;
1008 		break;
1009 	}
1010 }
1011 
1012 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
1013 {
1014 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1015 
1016 	for (int i = 0; i < NR_KVM_TIMERS; i++)
1017 		timer_context_init(vcpu, i);
1018 
1019 	/* Synchronize offsets across timers of a VM if not already provided */
1020 	if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
1021 		timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read());
1022 		timer_set_offset(vcpu_ptimer(vcpu), 0);
1023 	}
1024 
1025 	hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1026 	timer->bg_timer.function = kvm_bg_timer_expire;
1027 }
1028 
1029 void kvm_timer_init_vm(struct kvm *kvm)
1030 {
1031 	for (int i = 0; i < NR_KVM_TIMERS; i++)
1032 		kvm->arch.timer_data.ppi[i] = default_ppi[i];
1033 }
1034 
1035 void kvm_timer_cpu_up(void)
1036 {
1037 	enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
1038 	if (host_ptimer_irq)
1039 		enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
1040 }
1041 
1042 void kvm_timer_cpu_down(void)
1043 {
1044 	disable_percpu_irq(host_vtimer_irq);
1045 	if (host_ptimer_irq)
1046 		disable_percpu_irq(host_ptimer_irq);
1047 }
1048 
1049 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
1050 {
1051 	struct arch_timer_context *timer;
1052 
1053 	switch (regid) {
1054 	case KVM_REG_ARM_TIMER_CTL:
1055 		timer = vcpu_vtimer(vcpu);
1056 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
1057 		break;
1058 	case KVM_REG_ARM_TIMER_CNT:
1059 		if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
1060 			      &vcpu->kvm->arch.flags)) {
1061 			timer = vcpu_vtimer(vcpu);
1062 			timer_set_offset(timer, kvm_phys_timer_read() - value);
1063 		}
1064 		break;
1065 	case KVM_REG_ARM_TIMER_CVAL:
1066 		timer = vcpu_vtimer(vcpu);
1067 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
1068 		break;
1069 	case KVM_REG_ARM_PTIMER_CTL:
1070 		timer = vcpu_ptimer(vcpu);
1071 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
1072 		break;
1073 	case KVM_REG_ARM_PTIMER_CNT:
1074 		if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
1075 			      &vcpu->kvm->arch.flags)) {
1076 			timer = vcpu_ptimer(vcpu);
1077 			timer_set_offset(timer, kvm_phys_timer_read() - value);
1078 		}
1079 		break;
1080 	case KVM_REG_ARM_PTIMER_CVAL:
1081 		timer = vcpu_ptimer(vcpu);
1082 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
1083 		break;
1084 
1085 	default:
1086 		return -1;
1087 	}
1088 
1089 	return 0;
1090 }
1091 
1092 static u64 read_timer_ctl(struct arch_timer_context *timer)
1093 {
1094 	/*
1095 	 * Set ISTATUS bit if it's expired.
1096 	 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
1097 	 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
1098 	 * regardless of ENABLE bit for our implementation convenience.
1099 	 */
1100 	u32 ctl = timer_get_ctl(timer);
1101 
1102 	if (!kvm_timer_compute_delta(timer))
1103 		ctl |= ARCH_TIMER_CTRL_IT_STAT;
1104 
1105 	return ctl;
1106 }
1107 
1108 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
1109 {
1110 	switch (regid) {
1111 	case KVM_REG_ARM_TIMER_CTL:
1112 		return kvm_arm_timer_read(vcpu,
1113 					  vcpu_vtimer(vcpu), TIMER_REG_CTL);
1114 	case KVM_REG_ARM_TIMER_CNT:
1115 		return kvm_arm_timer_read(vcpu,
1116 					  vcpu_vtimer(vcpu), TIMER_REG_CNT);
1117 	case KVM_REG_ARM_TIMER_CVAL:
1118 		return kvm_arm_timer_read(vcpu,
1119 					  vcpu_vtimer(vcpu), TIMER_REG_CVAL);
1120 	case KVM_REG_ARM_PTIMER_CTL:
1121 		return kvm_arm_timer_read(vcpu,
1122 					  vcpu_ptimer(vcpu), TIMER_REG_CTL);
1123 	case KVM_REG_ARM_PTIMER_CNT:
1124 		return kvm_arm_timer_read(vcpu,
1125 					  vcpu_ptimer(vcpu), TIMER_REG_CNT);
1126 	case KVM_REG_ARM_PTIMER_CVAL:
1127 		return kvm_arm_timer_read(vcpu,
1128 					  vcpu_ptimer(vcpu), TIMER_REG_CVAL);
1129 	}
1130 	return (u64)-1;
1131 }
1132 
1133 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
1134 			      struct arch_timer_context *timer,
1135 			      enum kvm_arch_timer_regs treg)
1136 {
1137 	u64 val;
1138 
1139 	switch (treg) {
1140 	case TIMER_REG_TVAL:
1141 		val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
1142 		val = lower_32_bits(val);
1143 		break;
1144 
1145 	case TIMER_REG_CTL:
1146 		val = read_timer_ctl(timer);
1147 		break;
1148 
1149 	case TIMER_REG_CVAL:
1150 		val = timer_get_cval(timer);
1151 		break;
1152 
1153 	case TIMER_REG_CNT:
1154 		val = kvm_phys_timer_read() - timer_get_offset(timer);
1155 		break;
1156 
1157 	case TIMER_REG_VOFF:
1158 		val = *timer->offset.vcpu_offset;
1159 		break;
1160 
1161 	default:
1162 		BUG();
1163 	}
1164 
1165 	return val;
1166 }
1167 
1168 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
1169 			      enum kvm_arch_timers tmr,
1170 			      enum kvm_arch_timer_regs treg)
1171 {
1172 	struct arch_timer_context *timer;
1173 	struct timer_map map;
1174 	u64 val;
1175 
1176 	get_timer_map(vcpu, &map);
1177 	timer = vcpu_get_timer(vcpu, tmr);
1178 
1179 	if (timer == map.emul_vtimer || timer == map.emul_ptimer)
1180 		return kvm_arm_timer_read(vcpu, timer, treg);
1181 
1182 	preempt_disable();
1183 	timer_save_state(timer);
1184 
1185 	val = kvm_arm_timer_read(vcpu, timer, treg);
1186 
1187 	timer_restore_state(timer);
1188 	preempt_enable();
1189 
1190 	return val;
1191 }
1192 
1193 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
1194 				struct arch_timer_context *timer,
1195 				enum kvm_arch_timer_regs treg,
1196 				u64 val)
1197 {
1198 	switch (treg) {
1199 	case TIMER_REG_TVAL:
1200 		timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
1201 		break;
1202 
1203 	case TIMER_REG_CTL:
1204 		timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
1205 		break;
1206 
1207 	case TIMER_REG_CVAL:
1208 		timer_set_cval(timer, val);
1209 		break;
1210 
1211 	case TIMER_REG_VOFF:
1212 		*timer->offset.vcpu_offset = val;
1213 		break;
1214 
1215 	default:
1216 		BUG();
1217 	}
1218 }
1219 
1220 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
1221 				enum kvm_arch_timers tmr,
1222 				enum kvm_arch_timer_regs treg,
1223 				u64 val)
1224 {
1225 	struct arch_timer_context *timer;
1226 	struct timer_map map;
1227 
1228 	get_timer_map(vcpu, &map);
1229 	timer = vcpu_get_timer(vcpu, tmr);
1230 	if (timer == map.emul_vtimer || timer == map.emul_ptimer) {
1231 		soft_timer_cancel(&timer->hrtimer);
1232 		kvm_arm_timer_write(vcpu, timer, treg, val);
1233 		timer_emulate(timer);
1234 	} else {
1235 		preempt_disable();
1236 		timer_save_state(timer);
1237 		kvm_arm_timer_write(vcpu, timer, treg, val);
1238 		timer_restore_state(timer);
1239 		preempt_enable();
1240 	}
1241 }
1242 
1243 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
1244 {
1245 	if (vcpu)
1246 		irqd_set_forwarded_to_vcpu(d);
1247 	else
1248 		irqd_clr_forwarded_to_vcpu(d);
1249 
1250 	return 0;
1251 }
1252 
1253 static int timer_irq_set_irqchip_state(struct irq_data *d,
1254 				       enum irqchip_irq_state which, bool val)
1255 {
1256 	if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
1257 		return irq_chip_set_parent_state(d, which, val);
1258 
1259 	if (val)
1260 		irq_chip_mask_parent(d);
1261 	else
1262 		irq_chip_unmask_parent(d);
1263 
1264 	return 0;
1265 }
1266 
1267 static void timer_irq_eoi(struct irq_data *d)
1268 {
1269 	if (!irqd_is_forwarded_to_vcpu(d))
1270 		irq_chip_eoi_parent(d);
1271 }
1272 
1273 static void timer_irq_ack(struct irq_data *d)
1274 {
1275 	d = d->parent_data;
1276 	if (d->chip->irq_ack)
1277 		d->chip->irq_ack(d);
1278 }
1279 
1280 static struct irq_chip timer_chip = {
1281 	.name			= "KVM",
1282 	.irq_ack		= timer_irq_ack,
1283 	.irq_mask		= irq_chip_mask_parent,
1284 	.irq_unmask		= irq_chip_unmask_parent,
1285 	.irq_eoi		= timer_irq_eoi,
1286 	.irq_set_type		= irq_chip_set_type_parent,
1287 	.irq_set_vcpu_affinity	= timer_irq_set_vcpu_affinity,
1288 	.irq_set_irqchip_state	= timer_irq_set_irqchip_state,
1289 };
1290 
1291 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1292 				  unsigned int nr_irqs, void *arg)
1293 {
1294 	irq_hw_number_t hwirq = (uintptr_t)arg;
1295 
1296 	return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1297 					     &timer_chip, NULL);
1298 }
1299 
1300 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1301 				  unsigned int nr_irqs)
1302 {
1303 }
1304 
1305 static const struct irq_domain_ops timer_domain_ops = {
1306 	.alloc	= timer_irq_domain_alloc,
1307 	.free	= timer_irq_domain_free,
1308 };
1309 
1310 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1311 {
1312 	*flags = irq_get_trigger_type(virq);
1313 	if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1314 		kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1315 			virq);
1316 		*flags = IRQF_TRIGGER_LOW;
1317 	}
1318 }
1319 
1320 static int kvm_irq_init(struct arch_timer_kvm_info *info)
1321 {
1322 	struct irq_domain *domain = NULL;
1323 
1324 	if (info->virtual_irq <= 0) {
1325 		kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1326 			info->virtual_irq);
1327 		return -ENODEV;
1328 	}
1329 
1330 	host_vtimer_irq = info->virtual_irq;
1331 	kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1332 
1333 	if (kvm_vgic_global_state.no_hw_deactivation) {
1334 		struct fwnode_handle *fwnode;
1335 		struct irq_data *data;
1336 
1337 		fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1338 		if (!fwnode)
1339 			return -ENOMEM;
1340 
1341 		/* Assume both vtimer and ptimer in the same parent */
1342 		data = irq_get_irq_data(host_vtimer_irq);
1343 		domain = irq_domain_create_hierarchy(data->domain, 0,
1344 						     NR_KVM_TIMERS, fwnode,
1345 						     &timer_domain_ops, NULL);
1346 		if (!domain) {
1347 			irq_domain_free_fwnode(fwnode);
1348 			return -ENOMEM;
1349 		}
1350 
1351 		arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1352 		WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1353 					    (void *)TIMER_VTIMER));
1354 	}
1355 
1356 	if (info->physical_irq > 0) {
1357 		host_ptimer_irq = info->physical_irq;
1358 		kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
1359 
1360 		if (domain)
1361 			WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1362 						    (void *)TIMER_PTIMER));
1363 	}
1364 
1365 	return 0;
1366 }
1367 
1368 int __init kvm_timer_hyp_init(bool has_gic)
1369 {
1370 	struct arch_timer_kvm_info *info;
1371 	int err;
1372 
1373 	info = arch_timer_get_kvm_info();
1374 	timecounter = &info->timecounter;
1375 
1376 	if (!timecounter->cc) {
1377 		kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1378 		return -ENODEV;
1379 	}
1380 
1381 	err = kvm_irq_init(info);
1382 	if (err)
1383 		return err;
1384 
1385 	/* First, do the virtual EL1 timer irq */
1386 
1387 	err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1388 				 "kvm guest vtimer", kvm_get_running_vcpus());
1389 	if (err) {
1390 		kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1391 			host_vtimer_irq, err);
1392 		return err;
1393 	}
1394 
1395 	if (has_gic) {
1396 		err = irq_set_vcpu_affinity(host_vtimer_irq,
1397 					    kvm_get_running_vcpus());
1398 		if (err) {
1399 			kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1400 			goto out_free_vtimer_irq;
1401 		}
1402 
1403 		static_branch_enable(&has_gic_active_state);
1404 	}
1405 
1406 	kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1407 
1408 	/* Now let's do the physical EL1 timer irq */
1409 
1410 	if (info->physical_irq > 0) {
1411 		err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1412 					 "kvm guest ptimer", kvm_get_running_vcpus());
1413 		if (err) {
1414 			kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1415 				host_ptimer_irq, err);
1416 			goto out_free_vtimer_irq;
1417 		}
1418 
1419 		if (has_gic) {
1420 			err = irq_set_vcpu_affinity(host_ptimer_irq,
1421 						    kvm_get_running_vcpus());
1422 			if (err) {
1423 				kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1424 				goto out_free_ptimer_irq;
1425 			}
1426 		}
1427 
1428 		kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1429 	} else if (has_vhe()) {
1430 		kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1431 			info->physical_irq);
1432 		err = -ENODEV;
1433 		goto out_free_vtimer_irq;
1434 	}
1435 
1436 	return 0;
1437 
1438 out_free_ptimer_irq:
1439 	if (info->physical_irq > 0)
1440 		free_percpu_irq(host_ptimer_irq, kvm_get_running_vcpus());
1441 out_free_vtimer_irq:
1442 	free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1443 	return err;
1444 }
1445 
1446 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1447 {
1448 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1449 
1450 	soft_timer_cancel(&timer->bg_timer);
1451 }
1452 
1453 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1454 {
1455 	u32 ppis = 0;
1456 	bool valid;
1457 
1458 	mutex_lock(&vcpu->kvm->arch.config_lock);
1459 
1460 	for (int i = 0; i < nr_timers(vcpu); i++) {
1461 		struct arch_timer_context *ctx;
1462 		int irq;
1463 
1464 		ctx = vcpu_get_timer(vcpu, i);
1465 		irq = timer_irq(ctx);
1466 		if (kvm_vgic_set_owner(vcpu, irq, ctx))
1467 			break;
1468 
1469 		/*
1470 		 * We know by construction that we only have PPIs, so
1471 		 * all values are less than 32.
1472 		 */
1473 		ppis |= BIT(irq);
1474 	}
1475 
1476 	valid = hweight32(ppis) == nr_timers(vcpu);
1477 
1478 	if (valid)
1479 		set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, &vcpu->kvm->arch.flags);
1480 
1481 	mutex_unlock(&vcpu->kvm->arch.config_lock);
1482 
1483 	return valid;
1484 }
1485 
1486 static bool kvm_arch_timer_get_input_level(int vintid)
1487 {
1488 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1489 
1490 	if (WARN(!vcpu, "No vcpu context!\n"))
1491 		return false;
1492 
1493 	for (int i = 0; i < nr_timers(vcpu); i++) {
1494 		struct arch_timer_context *ctx;
1495 
1496 		ctx = vcpu_get_timer(vcpu, i);
1497 		if (timer_irq(ctx) == vintid)
1498 			return kvm_timer_should_fire(ctx);
1499 	}
1500 
1501 	/* A timer IRQ has fired, but no matching timer was found? */
1502 	WARN_RATELIMIT(1, "timer INTID%d unknown\n", vintid);
1503 
1504 	return false;
1505 }
1506 
1507 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1508 {
1509 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1510 	struct timer_map map;
1511 	int ret;
1512 
1513 	if (timer->enabled)
1514 		return 0;
1515 
1516 	/* Without a VGIC we do not map virtual IRQs to physical IRQs */
1517 	if (!irqchip_in_kernel(vcpu->kvm))
1518 		goto no_vgic;
1519 
1520 	/*
1521 	 * At this stage, we have the guarantee that the vgic is both
1522 	 * available and initialized.
1523 	 */
1524 	if (!timer_irqs_are_valid(vcpu)) {
1525 		kvm_debug("incorrectly configured timer irqs\n");
1526 		return -EINVAL;
1527 	}
1528 
1529 	get_timer_map(vcpu, &map);
1530 
1531 	ret = kvm_vgic_map_phys_irq(vcpu,
1532 				    map.direct_vtimer->host_timer_irq,
1533 				    timer_irq(map.direct_vtimer),
1534 				    &arch_timer_irq_ops);
1535 	if (ret)
1536 		return ret;
1537 
1538 	if (map.direct_ptimer) {
1539 		ret = kvm_vgic_map_phys_irq(vcpu,
1540 					    map.direct_ptimer->host_timer_irq,
1541 					    timer_irq(map.direct_ptimer),
1542 					    &arch_timer_irq_ops);
1543 	}
1544 
1545 	if (ret)
1546 		return ret;
1547 
1548 no_vgic:
1549 	timer->enabled = 1;
1550 	return 0;
1551 }
1552 
1553 /* If we have CNTPOFF, permanently set ECV to enable it */
1554 void kvm_timer_init_vhe(void)
1555 {
1556 	if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
1557 		sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV);
1558 }
1559 
1560 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1561 {
1562 	int __user *uaddr = (int __user *)(long)attr->addr;
1563 	int irq, idx, ret = 0;
1564 
1565 	if (!irqchip_in_kernel(vcpu->kvm))
1566 		return -EINVAL;
1567 
1568 	if (get_user(irq, uaddr))
1569 		return -EFAULT;
1570 
1571 	if (!(irq_is_ppi(irq)))
1572 		return -EINVAL;
1573 
1574 	mutex_lock(&vcpu->kvm->arch.config_lock);
1575 
1576 	if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE,
1577 		     &vcpu->kvm->arch.flags)) {
1578 		ret = -EBUSY;
1579 		goto out;
1580 	}
1581 
1582 	switch (attr->attr) {
1583 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1584 		idx = TIMER_VTIMER;
1585 		break;
1586 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1587 		idx = TIMER_PTIMER;
1588 		break;
1589 	case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
1590 		idx = TIMER_HVTIMER;
1591 		break;
1592 	case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
1593 		idx = TIMER_HPTIMER;
1594 		break;
1595 	default:
1596 		ret = -ENXIO;
1597 		goto out;
1598 	}
1599 
1600 	/*
1601 	 * We cannot validate the IRQ unicity before we run, so take it at
1602 	 * face value. The verdict will be given on first vcpu run, for each
1603 	 * vcpu. Yes this is late. Blame it on the stupid API.
1604 	 */
1605 	vcpu->kvm->arch.timer_data.ppi[idx] = irq;
1606 
1607 out:
1608 	mutex_unlock(&vcpu->kvm->arch.config_lock);
1609 	return ret;
1610 }
1611 
1612 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1613 {
1614 	int __user *uaddr = (int __user *)(long)attr->addr;
1615 	struct arch_timer_context *timer;
1616 	int irq;
1617 
1618 	switch (attr->attr) {
1619 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1620 		timer = vcpu_vtimer(vcpu);
1621 		break;
1622 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1623 		timer = vcpu_ptimer(vcpu);
1624 		break;
1625 	case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
1626 		timer = vcpu_hvtimer(vcpu);
1627 		break;
1628 	case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
1629 		timer = vcpu_hptimer(vcpu);
1630 		break;
1631 	default:
1632 		return -ENXIO;
1633 	}
1634 
1635 	irq = timer_irq(timer);
1636 	return put_user(irq, uaddr);
1637 }
1638 
1639 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1640 {
1641 	switch (attr->attr) {
1642 	case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1643 	case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1644 	case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
1645 	case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
1646 		return 0;
1647 	}
1648 
1649 	return -ENXIO;
1650 }
1651 
1652 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1653 				    struct kvm_arm_counter_offset *offset)
1654 {
1655 	int ret = 0;
1656 
1657 	if (offset->reserved)
1658 		return -EINVAL;
1659 
1660 	mutex_lock(&kvm->lock);
1661 
1662 	if (lock_all_vcpus(kvm)) {
1663 		set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
1664 
1665 		/*
1666 		 * If userspace decides to set the offset using this
1667 		 * API rather than merely restoring the counter
1668 		 * values, the offset applies to both the virtual and
1669 		 * physical views.
1670 		 */
1671 		kvm->arch.timer_data.voffset = offset->counter_offset;
1672 		kvm->arch.timer_data.poffset = offset->counter_offset;
1673 
1674 		unlock_all_vcpus(kvm);
1675 	} else {
1676 		ret = -EBUSY;
1677 	}
1678 
1679 	mutex_unlock(&kvm->lock);
1680 
1681 	return ret;
1682 }
1683