xref: /linux/arch/loongarch/kvm/timer.c (revision 7a92fc8b4d20680e4c20289a670d8fca2d1f2c1b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_csr.h>
8 #include <asm/kvm_vcpu.h>
9 
10 /*
11  * ktime_to_tick() - Scale ktime_t to timer tick value.
12  */
13 static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
14 {
15 	u64 delta;
16 
17 	delta = ktime_to_ns(now);
18 	return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
19 }
20 
21 static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
22 {
23 	return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
24 }
25 
26 /*
27  * Push timer forward on timeout.
28  * Handle an hrtimer event by push the hrtimer forward a period.
29  */
30 static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
31 {
32 	unsigned long cfg, period;
33 
34 	/* Add periodic tick to current expire time */
35 	cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
36 	if (cfg & CSR_TCFG_PERIOD) {
37 		period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
38 		hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
39 		return HRTIMER_RESTART;
40 	} else
41 		return HRTIMER_NORESTART;
42 }
43 
44 /* Low level hrtimer wake routine */
45 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
46 {
47 	struct kvm_vcpu *vcpu;
48 
49 	vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
50 	kvm_queue_irq(vcpu, INT_TI);
51 	rcuwait_wake_up(&vcpu->wait);
52 
53 	return kvm_count_timeout(vcpu);
54 }
55 
56 /*
57  * Initialise the timer to the specified frequency, zero it
58  */
59 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
60 {
61 	vcpu->arch.timer_mhz = timer_hz >> 20;
62 
63 	/* Starting at 0 */
64 	kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
65 }
66 
67 /*
68  * Restore hard timer state and enable guest to access timer registers
69  * without trap, should be called with irq disabled
70  */
71 void kvm_acquire_timer(struct kvm_vcpu *vcpu)
72 {
73 	unsigned long cfg;
74 
75 	cfg = read_csr_gcfg();
76 	if (!(cfg & CSR_GCFG_TIT))
77 		return;
78 
79 	/* Enable guest access to hard timer */
80 	write_csr_gcfg(cfg & ~CSR_GCFG_TIT);
81 
82 	/*
83 	 * Freeze the soft-timer and sync the guest stable timer with it. We do
84 	 * this with interrupts disabled to avoid latency.
85 	 */
86 	hrtimer_cancel(&vcpu->arch.swtimer);
87 }
88 
89 /*
90  * Restore soft timer state from saved context.
91  */
92 void kvm_restore_timer(struct kvm_vcpu *vcpu)
93 {
94 	unsigned long cfg, delta, period;
95 	ktime_t expire, now;
96 	struct loongarch_csrs *csr = vcpu->arch.csr;
97 
98 	/*
99 	 * Set guest stable timer cfg csr
100 	 */
101 	cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
102 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
103 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
104 	if (!(cfg & CSR_TCFG_EN)) {
105 		/* Guest timer is disabled, just restore timer registers */
106 		kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
107 		return;
108 	}
109 
110 	/*
111 	 * Set remainder tick value if not expired
112 	 */
113 	now = ktime_get();
114 	expire = vcpu->arch.expire;
115 	if (ktime_before(now, expire))
116 		delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
117 	else {
118 		if (cfg & CSR_TCFG_PERIOD) {
119 			period = cfg & CSR_TCFG_VAL;
120 			delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
121 			delta = period - (delta % period);
122 		} else
123 			delta = 0;
124 		/*
125 		 * Inject timer here though sw timer should inject timer
126 		 * interrupt async already, since sw timer may be cancelled
127 		 * during injecting intr async in function kvm_acquire_timer
128 		 */
129 		kvm_queue_irq(vcpu, INT_TI);
130 	}
131 
132 	write_gcsr_timertick(delta);
133 }
134 
135 /*
136  * Save guest timer state and switch to software emulation of guest
137  * timer. The hard timer must already be in use, so preemption should be
138  * disabled.
139  */
140 static void _kvm_save_timer(struct kvm_vcpu *vcpu)
141 {
142 	unsigned long ticks, delta;
143 	ktime_t expire;
144 	struct loongarch_csrs *csr = vcpu->arch.csr;
145 
146 	ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
147 	delta = tick_to_ns(vcpu, ticks);
148 	expire = ktime_add_ns(ktime_get(), delta);
149 	vcpu->arch.expire = expire;
150 	if (ticks) {
151 		/*
152 		 * Update hrtimer to use new timeout
153 		 * HRTIMER_MODE_PINNED is suggested since vcpu may run in
154 		 * the same physical cpu in next time
155 		 */
156 		hrtimer_cancel(&vcpu->arch.swtimer);
157 		hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
158 	} else
159 		/*
160 		 * Inject timer interrupt so that hall polling can dectect and exit
161 		 */
162 		kvm_queue_irq(vcpu, INT_TI);
163 }
164 
165 /*
166  * Save guest timer state and switch to soft guest timer if hard timer was in
167  * use.
168  */
169 void kvm_save_timer(struct kvm_vcpu *vcpu)
170 {
171 	unsigned long cfg;
172 	struct loongarch_csrs *csr = vcpu->arch.csr;
173 
174 	preempt_disable();
175 	cfg = read_csr_gcfg();
176 	if (!(cfg & CSR_GCFG_TIT)) {
177 		/* Disable guest use of hard timer */
178 		write_csr_gcfg(cfg | CSR_GCFG_TIT);
179 
180 		/* Save hard timer state */
181 		kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
182 		kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
183 		if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
184 			_kvm_save_timer(vcpu);
185 	}
186 
187 	/* Save timer-related state to vCPU context */
188 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
189 	preempt_enable();
190 }
191 
192 void kvm_reset_timer(struct kvm_vcpu *vcpu)
193 {
194 	write_gcsr_timercfg(0);
195 	kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
196 	hrtimer_cancel(&vcpu->arch.swtimer);
197 }
198