xref: /linux/arch/loongarch/kvm/timer.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_csr.h>
8 #include <asm/kvm_vcpu.h>
9 
10 /*
11  * ktime_to_tick() - Scale ktime_t to timer tick value.
12  */
13 static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
14 {
15 	u64 delta;
16 
17 	delta = ktime_to_ns(now);
18 	return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
19 }
20 
21 static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
22 {
23 	return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
24 }
25 
26 /* Low level hrtimer wake routine */
27 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
28 {
29 	struct kvm_vcpu *vcpu;
30 
31 	vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
32 	kvm_queue_irq(vcpu, INT_TI);
33 	rcuwait_wake_up(&vcpu->wait);
34 
35 	return HRTIMER_NORESTART;
36 }
37 
38 /*
39  * Initialise the timer to the specified frequency, zero it
40  */
41 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
42 {
43 	vcpu->arch.timer_mhz = timer_hz >> 20;
44 
45 	/* Starting at 0 */
46 	kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
47 }
48 
49 /*
50  * Restore soft timer state from saved context.
51  */
52 void kvm_restore_timer(struct kvm_vcpu *vcpu)
53 {
54 	unsigned long cfg, estat;
55 	unsigned long ticks, delta, period;
56 	ktime_t expire, now;
57 	struct loongarch_csrs *csr = vcpu->arch.csr;
58 
59 	/*
60 	 * Set guest stable timer cfg csr
61 	 * Disable timer before restore estat CSR register, avoid to
62 	 * get invalid timer interrupt for old timer cfg
63 	 */
64 	cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
65 
66 	write_gcsr_timercfg(0);
67 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
68 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
69 	if (!(cfg & CSR_TCFG_EN)) {
70 		/* Guest timer is disabled, just restore timer registers */
71 		kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
72 		return;
73 	}
74 
75 	/*
76 	 * Freeze the soft-timer and sync the guest stable timer with it.
77 	 */
78 	if (kvm_vcpu_is_blocking(vcpu))
79 		hrtimer_cancel(&vcpu->arch.swtimer);
80 
81 	/*
82 	 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
83 	 * If oneshot timer is fired, CSR TVAL will be -1, there are two
84 	 * conditions:
85 	 *  1) timer is fired during exiting to host
86 	 *  2) timer is fired and vm is doing timer irq, and then exiting to
87 	 *     host. Host should not inject timer irq to avoid spurious
88 	 *     timer interrupt again
89 	 */
90 	ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
91 	estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);
92 	if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) {
93 		/*
94 		 * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq
95 		 * and set CSR TVAL with -1
96 		 */
97 		write_gcsr_timertick(0);
98 
99 		/*
100 		 * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
101 		 * timer interrupt, and CSR TVAL keeps unchanged with -1, it
102 		 * avoids spurious timer interrupt
103 		 */
104 		if (!(estat & CPU_TIMER))
105 			gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
106 		return;
107 	}
108 
109 	/*
110 	 * Set remainder tick value if not expired
111 	 */
112 	delta = 0;
113 	now = ktime_get();
114 	expire = vcpu->arch.expire;
115 	if (ktime_before(now, expire))
116 		delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
117 	else if (cfg & CSR_TCFG_PERIOD) {
118 		period = cfg & CSR_TCFG_VAL;
119 		delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
120 		delta = period - (delta % period);
121 
122 		/*
123 		 * Inject timer here though sw timer should inject timer
124 		 * interrupt async already, since sw timer may be cancelled
125 		 * during injecting intr async
126 		 */
127 		kvm_queue_irq(vcpu, INT_TI);
128 	}
129 
130 	write_gcsr_timertick(delta);
131 }
132 
133 /*
134  * Save guest timer state and switch to software emulation of guest
135  * timer. The hard timer must already be in use, so preemption should be
136  * disabled.
137  */
138 static void _kvm_save_timer(struct kvm_vcpu *vcpu)
139 {
140 	unsigned long ticks, delta, cfg;
141 	ktime_t expire;
142 	struct loongarch_csrs *csr = vcpu->arch.csr;
143 
144 	cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
145 	ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
146 
147 	/*
148 	 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
149 	 * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG
150 	 * If oneshot timer is fired, CSR TVAL will be -1
151 	 * Here judge one-shot timer fired by checking whether TVAL is larger
152 	 * than TCFG
153 	 */
154 	if (ticks < cfg)
155 		delta = tick_to_ns(vcpu, ticks);
156 	else
157 		delta = 0;
158 
159 	expire = ktime_add_ns(ktime_get(), delta);
160 	vcpu->arch.expire = expire;
161 	if (kvm_vcpu_is_blocking(vcpu)) {
162 
163 		/*
164 		 * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in
165 		 * the same physical cpu in next time, and the timer should run
166 		 * in hardirq context even in the PREEMPT_RT case.
167 		 */
168 		hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD);
169 	}
170 }
171 
172 /*
173  * Save guest timer state and switch to soft guest timer if hard timer was in
174  * use.
175  */
176 void kvm_save_timer(struct kvm_vcpu *vcpu)
177 {
178 	struct loongarch_csrs *csr = vcpu->arch.csr;
179 
180 	preempt_disable();
181 
182 	/* Save hard timer state */
183 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
184 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
185 	if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
186 		_kvm_save_timer(vcpu);
187 
188 	/* Save timer-related state to vCPU context */
189 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
190 	preempt_enable();
191 }
192