Lines Matching +full:- +full:gt
1 // SPDX-License-Identifier: GPL-2.0
13 #include <clocksource/timer-riscv.h>
18 static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt) in kvm_riscv_current_cycles() argument
20 return get_cycles64() + gt->time_delta; in kvm_riscv_current_cycles()
24 struct kvm_guest_timer *gt, in kvm_riscv_delta_cycles2ns() argument
31 cycles_now = kvm_riscv_current_cycles(gt); in kvm_riscv_delta_cycles2ns()
33 cycles_delta = cycles - cycles_now; in kvm_riscv_delta_cycles2ns()
36 delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift; in kvm_riscv_delta_cycles2ns()
47 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_hrtimer_expired() local
49 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { in kvm_riscv_vcpu_hrtimer_expired()
50 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); in kvm_riscv_vcpu_hrtimer_expired()
51 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); in kvm_riscv_vcpu_hrtimer_expired()
55 t->next_set = false; in kvm_riscv_vcpu_hrtimer_expired()
63 if (!t->init_done || !t->next_set) in kvm_riscv_vcpu_timer_cancel()
64 return -EINVAL; in kvm_riscv_vcpu_timer_cancel()
66 hrtimer_cancel(&t->hrt); in kvm_riscv_vcpu_timer_cancel()
67 t->next_set = false; in kvm_riscv_vcpu_timer_cancel()
85 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_update_hrtimer()
86 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_update_hrtimer() local
89 if (!t->init_done) in kvm_riscv_vcpu_update_hrtimer()
90 return -EINVAL; in kvm_riscv_vcpu_update_hrtimer()
94 delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t); in kvm_riscv_vcpu_update_hrtimer()
95 t->next_cycles = ncycles; in kvm_riscv_vcpu_update_hrtimer()
96 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); in kvm_riscv_vcpu_update_hrtimer()
97 t->next_set = true; in kvm_riscv_vcpu_update_hrtimer()
104 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_next_event()
106 return t->timer_next_event(vcpu, ncycles); in kvm_riscv_vcpu_timer_next_event()
114 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_vstimer_expired() local
116 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { in kvm_riscv_vcpu_vstimer_expired()
117 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); in kvm_riscv_vcpu_vstimer_expired()
118 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); in kvm_riscv_vcpu_vstimer_expired()
122 t->next_set = false; in kvm_riscv_vcpu_vstimer_expired()
130 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_pending()
131 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_timer_pending() local
133 if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) || in kvm_riscv_vcpu_timer_pending()
142 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_blocking()
143 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_timer_blocking() local
146 if (!t->init_done) in kvm_riscv_vcpu_timer_blocking()
149 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); in kvm_riscv_vcpu_timer_blocking()
150 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); in kvm_riscv_vcpu_timer_blocking()
151 t->next_set = true; in kvm_riscv_vcpu_timer_blocking()
156 kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); in kvm_riscv_vcpu_timer_unblocking()
162 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_get_reg_timer()
163 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_get_reg_timer() local
164 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; in kvm_riscv_vcpu_get_reg_timer()
165 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | in kvm_riscv_vcpu_get_reg_timer()
170 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) in kvm_riscv_vcpu_get_reg_timer()
171 return -EINVAL; in kvm_riscv_vcpu_get_reg_timer()
173 return -ENOENT; in kvm_riscv_vcpu_get_reg_timer()
180 reg_val = kvm_riscv_current_cycles(gt); in kvm_riscv_vcpu_get_reg_timer()
183 reg_val = t->next_cycles; in kvm_riscv_vcpu_get_reg_timer()
186 reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON : in kvm_riscv_vcpu_get_reg_timer()
190 return -ENOENT; in kvm_riscv_vcpu_get_reg_timer()
193 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) in kvm_riscv_vcpu_get_reg_timer()
194 return -EFAULT; in kvm_riscv_vcpu_get_reg_timer()
202 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_set_reg_timer()
203 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_set_reg_timer() local
204 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; in kvm_riscv_vcpu_set_reg_timer()
205 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | in kvm_riscv_vcpu_set_reg_timer()
211 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) in kvm_riscv_vcpu_set_reg_timer()
212 return -EINVAL; in kvm_riscv_vcpu_set_reg_timer()
214 return -ENOENT; in kvm_riscv_vcpu_set_reg_timer()
216 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) in kvm_riscv_vcpu_set_reg_timer()
217 return -EFAULT; in kvm_riscv_vcpu_set_reg_timer()
222 return -EINVAL; in kvm_riscv_vcpu_set_reg_timer()
225 gt->time_delta = reg_val - get_cycles64(); in kvm_riscv_vcpu_set_reg_timer()
228 t->next_cycles = reg_val; in kvm_riscv_vcpu_set_reg_timer()
237 ret = -ENOENT; in kvm_riscv_vcpu_set_reg_timer()
246 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_init()
248 if (t->init_done) in kvm_riscv_vcpu_timer_init()
249 return -EINVAL; in kvm_riscv_vcpu_timer_init()
251 t->init_done = true; in kvm_riscv_vcpu_timer_init()
252 t->next_set = false; in kvm_riscv_vcpu_timer_init()
256 t->sstc_enabled = true; in kvm_riscv_vcpu_timer_init()
257 hrtimer_setup(&t->hrt, kvm_riscv_vcpu_vstimer_expired, CLOCK_MONOTONIC, in kvm_riscv_vcpu_timer_init()
259 t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; in kvm_riscv_vcpu_timer_init()
261 t->sstc_enabled = false; in kvm_riscv_vcpu_timer_init()
262 hrtimer_setup(&t->hrt, kvm_riscv_vcpu_hrtimer_expired, CLOCK_MONOTONIC, in kvm_riscv_vcpu_timer_init()
264 t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; in kvm_riscv_vcpu_timer_init()
274 ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); in kvm_riscv_vcpu_timer_deinit()
275 vcpu->arch.timer.init_done = false; in kvm_riscv_vcpu_timer_deinit()
282 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_reset()
284 t->next_cycles = -1ULL; in kvm_riscv_vcpu_timer_reset()
285 return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); in kvm_riscv_vcpu_timer_reset()
290 struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; in kvm_riscv_vcpu_update_timedelta() local
293 ncsr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta)); in kvm_riscv_vcpu_update_timedelta()
294 ncsr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32)); in kvm_riscv_vcpu_update_timedelta()
296 ncsr_write(CSR_HTIMEDELTA, gt->time_delta); in kvm_riscv_vcpu_update_timedelta()
302 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_restore()
306 if (!t->sstc_enabled) in kvm_riscv_vcpu_timer_restore()
310 ncsr_write(CSR_VSTIMECMP, (u32)t->next_cycles); in kvm_riscv_vcpu_timer_restore()
311 ncsr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32)); in kvm_riscv_vcpu_timer_restore()
313 ncsr_write(CSR_VSTIMECMP, t->next_cycles); in kvm_riscv_vcpu_timer_restore()
317 if (unlikely(!t->init_done)) in kvm_riscv_vcpu_timer_restore()
325 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_sync()
327 if (!t->sstc_enabled) in kvm_riscv_vcpu_timer_sync()
331 t->next_cycles = ncsr_read(CSR_VSTIMECMP); in kvm_riscv_vcpu_timer_sync()
332 t->next_cycles |= (u64)ncsr_read(CSR_VSTIMECMPH) << 32; in kvm_riscv_vcpu_timer_sync()
334 t->next_cycles = ncsr_read(CSR_VSTIMECMP); in kvm_riscv_vcpu_timer_sync()
340 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_save()
342 if (!t->sstc_enabled) in kvm_riscv_vcpu_timer_save()
349 * If VS-timer expires when no VCPU running on a host CPU then in kvm_riscv_vcpu_timer_save()
351 * in no power savings. This is because as-per RISC-V Privileged in kvm_riscv_vcpu_timer_save()
357 * To address the above issue, vstimecmp CSR must be set to -1UL in kvm_riscv_vcpu_timer_save()
358 * over here when VCPU is scheduled-out or exits to user space. in kvm_riscv_vcpu_timer_save()
361 csr_write(CSR_VSTIMECMP, -1UL); in kvm_riscv_vcpu_timer_save()
363 csr_write(CSR_VSTIMECMPH, -1UL); in kvm_riscv_vcpu_timer_save()
367 if (unlikely(!t->init_done)) in kvm_riscv_vcpu_timer_save()
376 struct kvm_guest_timer *gt = &kvm->arch.timer; in kvm_riscv_guest_timer_init() local
378 riscv_cs_get_mult_shift(>->nsec_mult, >->nsec_shift); in kvm_riscv_guest_timer_init()
379 gt->time_delta = -get_cycles64(); in kvm_riscv_guest_timer_init()