1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/delay.h>
8 #include <asm/kvm_csr.h>
9 #include <asm/kvm_vcpu.h>
10
11 /*
12 * ktime_to_tick() - Scale ktime_t to timer tick value.
13 */
ktime_to_tick(struct kvm_vcpu * vcpu,ktime_t now)14 static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
15 {
16 u64 delta;
17
18 delta = ktime_to_ns(now);
19 return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
20 }
21
tick_to_ns(struct kvm_vcpu * vcpu,u64 tick)22 static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
23 {
24 return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
25 }
26
27 /* Low level hrtimer wake routine */
kvm_swtimer_wakeup(struct hrtimer * timer)28 enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
29 {
30 struct kvm_vcpu *vcpu;
31
32 vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
33 kvm_queue_irq(vcpu, INT_TI);
34 rcuwait_wake_up(&vcpu->wait);
35
36 return HRTIMER_NORESTART;
37 }
38
39 /*
40 * Initialise the timer to the specified frequency, zero it
41 */
kvm_init_timer(struct kvm_vcpu * vcpu,unsigned long timer_hz)42 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
43 {
44 vcpu->arch.timer_mhz = timer_hz >> 20;
45
46 /* Starting at 0 */
47 kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
48 }
49
50 /*
51 * Restore soft timer state from saved context.
52 */
kvm_restore_timer(struct kvm_vcpu * vcpu)53 void kvm_restore_timer(struct kvm_vcpu *vcpu)
54 {
55 unsigned long cfg, estat;
56 unsigned long ticks, delta, period;
57 ktime_t expire, now;
58 struct loongarch_csrs *csr = vcpu->arch.csr;
59
60 /*
61 * Set guest stable timer cfg csr
62 * Disable timer before restore estat CSR register, avoid to
63 * get invalid timer interrupt for old timer cfg
64 */
65 cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
66
67 write_gcsr_timercfg(0);
68 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
69 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
70 if (!(cfg & CSR_TCFG_EN)) {
71 /* Guest timer is disabled, just restore timer registers */
72 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
73 return;
74 }
75
76 /*
77 * Freeze the soft-timer and sync the guest stable timer with it.
78 */
79 if (kvm_vcpu_is_blocking(vcpu))
80 hrtimer_cancel(&vcpu->arch.swtimer);
81
82 /*
83 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
84 * If oneshot timer is fired, CSR TVAL will be -1, there are two
85 * conditions:
86 * 1) timer is fired during exiting to host
87 * 2) timer is fired and vm is doing timer irq, and then exiting to
88 * host. Host should not inject timer irq to avoid spurious
89 * timer interrupt again
90 */
91 ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
92 estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);
93 if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) {
94 /*
95 * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq
96 * and set CSR TVAL with -1
97 */
98 write_gcsr_timertick(0);
99 __delay(2); /* Wait cycles until timer interrupt injected */
100
101 /*
102 * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
103 * timer interrupt, and CSR TVAL keeps unchanged with -1, it
104 * avoids spurious timer interrupt
105 */
106 if (!(estat & CPU_TIMER))
107 gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
108 return;
109 }
110
111 /*
112 * Set remainder tick value if not expired
113 */
114 delta = 0;
115 now = ktime_get();
116 expire = vcpu->arch.expire;
117 if (ktime_before(now, expire))
118 delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
119 else if (cfg & CSR_TCFG_PERIOD) {
120 period = cfg & CSR_TCFG_VAL;
121 delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
122 delta = period - (delta % period);
123
124 /*
125 * Inject timer here though sw timer should inject timer
126 * interrupt async already, since sw timer may be cancelled
127 * during injecting intr async
128 */
129 kvm_queue_irq(vcpu, INT_TI);
130 }
131
132 write_gcsr_timertick(delta);
133 }
134
135 /*
136 * Save guest timer state and switch to software emulation of guest
137 * timer. The hard timer must already be in use, so preemption should be
138 * disabled.
139 */
_kvm_save_timer(struct kvm_vcpu * vcpu)140 static void _kvm_save_timer(struct kvm_vcpu *vcpu)
141 {
142 unsigned long ticks, delta, cfg;
143 ktime_t expire;
144 struct loongarch_csrs *csr = vcpu->arch.csr;
145
146 cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
147 ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
148
149 /*
150 * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
151 * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG
152 * If oneshot timer is fired, CSR TVAL will be -1
153 * Here judge one-shot timer fired by checking whether TVAL is larger
154 * than TCFG
155 */
156 if (ticks < cfg)
157 delta = tick_to_ns(vcpu, ticks);
158 else
159 delta = 0;
160
161 expire = ktime_add_ns(ktime_get(), delta);
162 vcpu->arch.expire = expire;
163 if (kvm_vcpu_is_blocking(vcpu)) {
164
165 /*
166 * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in
167 * the same physical cpu in next time, and the timer should run
168 * in hardirq context even in the PREEMPT_RT case.
169 */
170 hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD);
171 }
172 }
173
174 /*
175 * Save guest timer state and switch to soft guest timer if hard timer was in
176 * use.
177 */
kvm_save_timer(struct kvm_vcpu * vcpu)178 void kvm_save_timer(struct kvm_vcpu *vcpu)
179 {
180 struct loongarch_csrs *csr = vcpu->arch.csr;
181
182 preempt_disable();
183
184 /* Save hard timer state */
185 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
186 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
187 if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
188 _kvm_save_timer(vcpu);
189
190 /* Save timer-related state to vCPU context */
191 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
192 preempt_enable();
193 }
194