1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/delay.h>
8 #include <asm/kvm_csr.h>
9 #include <asm/kvm_vcpu.h>
10
11 /*
12 * ktime_to_tick() - Scale ktime_t to timer tick value.
13 */
ktime_to_tick(struct kvm_vcpu * vcpu,ktime_t now)14 static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
15 {
16 u64 delta;
17
18 delta = ktime_to_ns(now);
19 return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
20 }
21
tick_to_ns(struct kvm_vcpu * vcpu,u64 tick)22 static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
23 {
24 return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
25 }
26
27 /* Low level hrtimer wake routine */
kvm_swtimer_wakeup(struct hrtimer * timer)28 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
29 {
30 struct kvm_vcpu *vcpu;
31
32 vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
33 kvm_queue_irq(vcpu, INT_TI);
34 rcuwait_wake_up(&vcpu->wait);
35
36 return HRTIMER_NORESTART;
37 }
38
39 /*
40 * Initialise the timer to the specified frequency, zero it
41 */
kvm_init_timer(struct kvm_vcpu * vcpu,unsigned long timer_hz)42 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
43 {
44 vcpu->arch.timer_mhz = timer_hz >> 20;
45
46 /* Starting at 0 */
47 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
48 }
49
50 /*
51 * Restore soft timer state from saved context.
52 */
kvm_restore_timer(struct kvm_vcpu * vcpu)53 void kvm_restore_timer(struct kvm_vcpu *vcpu)
54 {
55 unsigned long cfg, estat;
56 unsigned long ticks, delta, period;
57 ktime_t expire, now;
58 struct loongarch_csrs *csr = vcpu->arch.csr;
59
60 /*
61 * Set guest stable timer cfg csr
62 * Disable timer before restore estat CSR register, avoid to
63 * get invalid timer interrupt for old timer cfg
64 */
65 cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
66
67 write_gcsr_timercfg(0);
68 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
69 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
70 if (!(cfg & CSR_TCFG_EN)) {
71 /* Guest timer is disabled, just restore timer registers */
72 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
73 return;
74 }
75
76 /*
77 * Freeze the soft-timer and sync the guest stable timer with it.
78 */
79 if (kvm_vcpu_is_blocking(vcpu))
80 hrtimer_cancel(&vcpu->arch.swtimer);
81
82 /*
83 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
84 * If oneshot timer is fired, CSR TVAL will be -1, there are two
85 * conditions:
86 * 1) timer is fired during exiting to host
87 * 2) timer is fired and vm is doing timer irq, and then exiting to
88 * host. Host should not inject timer irq to avoid spurious
89 * timer interrupt again
90 */
91 ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
92 estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);
93 if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) {
94 /*
95 * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq
96 * and set CSR TVAL with -1
97 */
98 write_gcsr_timertick(0);
99
100 /*
101 * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
102 * timer interrupt, and CSR TVAL keeps unchanged with -1, it
103 * avoids spurious timer interrupt
104 */
105 if (!(estat & CPU_TIMER)) {
106 __delay(2); /* Wait cycles until timer interrupt injected */
107
108 /* Write TVAL with max value if no TI shot */
109 estat = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT);
110 if (!(estat & CPU_TIMER))
111 write_gcsr_timertick(CSR_TCFG_VAL);
112 gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
113 }
114 return;
115 }
116
117 /*
118 * Set remainder tick value if not expired
119 */
120 delta = 0;
121 now = ktime_get();
122 expire = vcpu->arch.expire;
123 if (ktime_before(now, expire))
124 delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
125 else if (cfg & CSR_TCFG_PERIOD) {
126 period = cfg & CSR_TCFG_VAL;
127 delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
128 delta = period - (delta % period);
129
130 /*
131 * Inject timer here though sw timer should inject timer
132 * interrupt async already, since sw timer may be cancelled
133 * during injecting intr async
134 */
135 kvm_queue_irq(vcpu, INT_TI);
136 }
137
138 write_gcsr_timertick(delta);
139 }
140
141 /*
142 * Save guest timer state and switch to software emulation of guest
143 * timer. The hard timer must already be in use, so preemption should be
144 * disabled.
145 */
_kvm_save_timer(struct kvm_vcpu * vcpu)146 static void _kvm_save_timer(struct kvm_vcpu *vcpu)
147 {
148 unsigned long ticks, delta, cfg;
149 ktime_t expire;
150 struct loongarch_csrs *csr = vcpu->arch.csr;
151
152 cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
153 ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
154
155 /*
156 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
157 * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG
158 * If oneshot timer is fired, CSR TVAL will be -1
159 * Here judge one-shot timer fired by checking whether TVAL is larger
160 * than TCFG
161 */
162 if (ticks < cfg)
163 delta = tick_to_ns(vcpu, ticks);
164 else
165 delta = 0;
166
167 expire = ktime_add_ns(ktime_get(), delta);
168 vcpu->arch.expire = expire;
169 if (kvm_vcpu_is_blocking(vcpu)) {
170
171 /*
172 * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in
173 * the same physical cpu in next time, and the timer should run
174 * in hardirq context even in the PREEMPT_RT case.
175 */
176 hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD);
177 }
178 }
179
180 /*
181 * Save guest timer state and switch to soft guest timer if hard timer was in
182 * use.
183 */
kvm_save_timer(struct kvm_vcpu * vcpu)184 void kvm_save_timer(struct kvm_vcpu *vcpu)
185 {
186 struct loongarch_csrs *csr = vcpu->arch.csr;
187
188 preempt_disable();
189
190 /* Save hard timer state */
191 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
192 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
193 if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
194 _kvm_save_timer(vcpu);
195
196 /* Save timer-related state to vCPU context */
197 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
198 preempt_enable();
199 }
200