xref: /linux/arch/loongarch/include/asm/kvm_vcpu.h (revision 51d90a15fedf8366cb96ef68d0ea2d0bf15417d2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #ifndef __ASM_LOONGARCH_KVM_VCPU_H__
7 #define __ASM_LOONGARCH_KVM_VCPU_H__
8 
9 #include <linux/kvm_host.h>
10 #include <asm/loongarch.h>
11 
12 /* Controlled by 0x5 guest estat */
13 #define CPU_SIP0			(_ULCAST_(1))
14 #define CPU_SIP1			(_ULCAST_(1) << 1)
15 #define CPU_PMU				(_ULCAST_(1) << 10)
16 #define CPU_TIMER			(_ULCAST_(1) << 11)
17 #define CPU_IPI				(_ULCAST_(1) << 12)
18 #define CPU_AVEC                        (_ULCAST_(1) << 14)
19 
20 /* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */
21 #define CPU_IP0				(_ULCAST_(1))
22 #define CPU_IP1				(_ULCAST_(1) << 1)
23 #define CPU_IP2				(_ULCAST_(1) << 2)
24 #define CPU_IP3				(_ULCAST_(1) << 3)
25 #define CPU_IP4				(_ULCAST_(1) << 4)
26 #define CPU_IP5				(_ULCAST_(1) << 5)
27 #define CPU_IP6				(_ULCAST_(1) << 6)
28 #define CPU_IP7				(_ULCAST_(1) << 7)
29 
30 #define MNSEC_PER_SEC			(NSEC_PER_SEC >> 20)
31 
32 /* KVM_IRQ_LINE irq field index values */
33 #define KVM_LOONGSON_IRQ_TYPE_SHIFT	24
34 #define KVM_LOONGSON_IRQ_TYPE_MASK	0xff
35 #define KVM_LOONGSON_IRQ_VCPU_SHIFT	16
36 #define KVM_LOONGSON_IRQ_VCPU_MASK	0xff
37 #define KVM_LOONGSON_IRQ_NUM_SHIFT	0
38 #define KVM_LOONGSON_IRQ_NUM_MASK	0xffff
39 
40 typedef union loongarch_instruction  larch_inst;
41 typedef int (*exit_handle_fn)(struct kvm_vcpu *, int);
42 
43 int  kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
44 int  kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
45 int  kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
46 int  kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
47 int  kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run);
48 int  kvm_emu_idle(struct kvm_vcpu *vcpu);
49 int  kvm_pending_timer(struct kvm_vcpu *vcpu);
50 int  kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
51 void kvm_deliver_intr(struct kvm_vcpu *vcpu);
52 void kvm_deliver_exception(struct kvm_vcpu *vcpu);
53 
54 void kvm_own_fpu(struct kvm_vcpu *vcpu);
55 void kvm_lose_fpu(struct kvm_vcpu *vcpu);
56 void kvm_save_fpu(struct loongarch_fpu *fpu);
57 void kvm_restore_fpu(struct loongarch_fpu *fpu);
58 void kvm_restore_fcsr(struct loongarch_fpu *fpu);
59 
60 #ifdef CONFIG_CPU_HAS_LSX
61 int kvm_own_lsx(struct kvm_vcpu *vcpu);
62 void kvm_save_lsx(struct loongarch_fpu *fpu);
63 void kvm_restore_lsx(struct loongarch_fpu *fpu);
64 #else
kvm_own_lsx(struct kvm_vcpu * vcpu)65 static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; }
kvm_save_lsx(struct loongarch_fpu * fpu)66 static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
kvm_restore_lsx(struct loongarch_fpu * fpu)67 static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
68 #endif
69 
70 #ifdef CONFIG_CPU_HAS_LASX
71 int kvm_own_lasx(struct kvm_vcpu *vcpu);
72 void kvm_save_lasx(struct loongarch_fpu *fpu);
73 void kvm_restore_lasx(struct loongarch_fpu *fpu);
74 #else
kvm_own_lasx(struct kvm_vcpu * vcpu)75 static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; }
kvm_save_lasx(struct loongarch_fpu * fpu)76 static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
kvm_restore_lasx(struct loongarch_fpu * fpu)77 static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
78 #endif
79 
80 #ifdef CONFIG_CPU_HAS_LBT
81 int kvm_own_lbt(struct kvm_vcpu *vcpu);
82 #else
kvm_own_lbt(struct kvm_vcpu * vcpu)83 static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
84 #endif
85 
86 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
87 void kvm_save_timer(struct kvm_vcpu *vcpu);
88 void kvm_restore_timer(struct kvm_vcpu *vcpu);
89 
90 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
91 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid);
92 
93 /*
94  * Loongarch KVM guest interrupt handling
95  */
kvm_queue_irq(struct kvm_vcpu * vcpu,unsigned int irq)96 static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
97 {
98 	set_bit(irq, &vcpu->arch.irq_pending);
99 	clear_bit(irq, &vcpu->arch.irq_clear);
100 }
101 
kvm_dequeue_irq(struct kvm_vcpu * vcpu,unsigned int irq)102 static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
103 {
104 	clear_bit(irq, &vcpu->arch.irq_pending);
105 	set_bit(irq, &vcpu->arch.irq_clear);
106 }
107 
kvm_queue_exception(struct kvm_vcpu * vcpu,unsigned int code,unsigned int subcode)108 static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
109 			unsigned int code, unsigned int subcode)
110 {
111 	/* only one exception can be injected */
112 	if (!vcpu->arch.exception_pending) {
113 		set_bit(code, &vcpu->arch.exception_pending);
114 		vcpu->arch.esubcode = subcode;
115 		return 0;
116 	} else
117 		return -1;
118 }
119 
kvm_read_reg(struct kvm_vcpu * vcpu,int num)120 static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num)
121 {
122 	return vcpu->arch.gprs[num];
123 }
124 
kvm_write_reg(struct kvm_vcpu * vcpu,int num,unsigned long val)125 static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val)
126 {
127 	vcpu->arch.gprs[num] = val;
128 }
129 
kvm_pvtime_supported(void)130 static inline bool kvm_pvtime_supported(void)
131 {
132 	return !!sched_info_on();
133 }
134 
kvm_guest_has_pv_feature(struct kvm_vcpu * vcpu,unsigned int feature)135 static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature)
136 {
137 	return vcpu->kvm->arch.pv_features & BIT(feature);
138 }
139 
140 #endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
141