xref: /linux/arch/loongarch/include/asm/kvm_para.h (revision 64dd3b6a79f0907d36de481b0f15fab323a53e5a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_LOONGARCH_KVM_PARA_H
3 #define _ASM_LOONGARCH_KVM_PARA_H
4 
5 #include <uapi/asm/kvm_para.h>
6 
7 /*
8  * Hypercall code field
9  */
10 #define HYPERVISOR_KVM			1
11 #define HYPERVISOR_VENDOR_SHIFT		8
12 #define HYPERCALL_ENCODE(vendor, code)	((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
13 
14 #define KVM_HCALL_CODE_SERVICE		0
15 #define KVM_HCALL_CODE_SWDBG		1
16 
17 #define KVM_HCALL_SERVICE		HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
18 #define  KVM_HCALL_FUNC_IPI		1
19 #define  KVM_HCALL_FUNC_NOTIFY		2
20 
21 #define KVM_HCALL_SWDBG			HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
22 
23 /*
24  * LoongArch hypercall return code
25  */
26 #define KVM_HCALL_SUCCESS		0
27 #define KVM_HCALL_INVALID_CODE		-1UL
28 #define KVM_HCALL_INVALID_PARAMETER	-2UL
29 
30 #define KVM_STEAL_PHYS_VALID		BIT_ULL(0)
31 #define KVM_STEAL_PHYS_MASK		GENMASK_ULL(63, 6)
32 
33 struct kvm_steal_time {
34 	__u64 steal;
35 	__u32 version;
36 	__u32 flags;
37 	__u32 pad[12];
38 };
39 
40 /*
41  * Hypercall interface for KVM hypervisor
42  *
43  * a0: function identifier
44  * a1-a5: args
45  * Return value will be placed in a0.
46  * Up to 5 arguments are passed in a1, a2, a3, a4, a5.
47  */
kvm_hypercall0(u64 fid)48 static __always_inline long kvm_hypercall0(u64 fid)
49 {
50 	register long ret asm("a0");
51 	register unsigned long fun asm("a0") = fid;
52 
53 	__asm__ __volatile__(
54 		"hvcl "__stringify(KVM_HCALL_SERVICE)
55 		: "=r" (ret)
56 		: "r" (fun)
57 		: "memory"
58 		);
59 
60 	return ret;
61 }
62 
kvm_hypercall1(u64 fid,unsigned long arg0)63 static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
64 {
65 	register long ret asm("a0");
66 	register unsigned long fun asm("a0") = fid;
67 	register unsigned long a1  asm("a1") = arg0;
68 
69 	__asm__ __volatile__(
70 		"hvcl "__stringify(KVM_HCALL_SERVICE)
71 		: "=r" (ret)
72 		: "r" (fun), "r" (a1)
73 		: "memory"
74 		);
75 
76 	return ret;
77 }
78 
kvm_hypercall2(u64 fid,unsigned long arg0,unsigned long arg1)79 static __always_inline long kvm_hypercall2(u64 fid,
80 		unsigned long arg0, unsigned long arg1)
81 {
82 	register long ret asm("a0");
83 	register unsigned long fun asm("a0") = fid;
84 	register unsigned long a1  asm("a1") = arg0;
85 	register unsigned long a2  asm("a2") = arg1;
86 
87 	__asm__ __volatile__(
88 		"hvcl "__stringify(KVM_HCALL_SERVICE)
89 		: "=r" (ret)
90 		: "r" (fun), "r" (a1), "r" (a2)
91 		: "memory"
92 		);
93 
94 	return ret;
95 }
96 
kvm_hypercall3(u64 fid,unsigned long arg0,unsigned long arg1,unsigned long arg2)97 static __always_inline long kvm_hypercall3(u64 fid,
98 	unsigned long arg0, unsigned long arg1, unsigned long arg2)
99 {
100 	register long ret asm("a0");
101 	register unsigned long fun asm("a0") = fid;
102 	register unsigned long a1  asm("a1") = arg0;
103 	register unsigned long a2  asm("a2") = arg1;
104 	register unsigned long a3  asm("a3") = arg2;
105 
106 	__asm__ __volatile__(
107 		"hvcl "__stringify(KVM_HCALL_SERVICE)
108 		: "=r" (ret)
109 		: "r" (fun), "r" (a1), "r" (a2), "r" (a3)
110 		: "memory"
111 		);
112 
113 	return ret;
114 }
115 
kvm_hypercall4(u64 fid,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3)116 static __always_inline long kvm_hypercall4(u64 fid,
117 		unsigned long arg0, unsigned long arg1,
118 		unsigned long arg2, unsigned long arg3)
119 {
120 	register long ret asm("a0");
121 	register unsigned long fun asm("a0") = fid;
122 	register unsigned long a1  asm("a1") = arg0;
123 	register unsigned long a2  asm("a2") = arg1;
124 	register unsigned long a3  asm("a3") = arg2;
125 	register unsigned long a4  asm("a4") = arg3;
126 
127 	__asm__ __volatile__(
128 		"hvcl "__stringify(KVM_HCALL_SERVICE)
129 		: "=r" (ret)
130 		: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
131 		: "memory"
132 		);
133 
134 	return ret;
135 }
136 
kvm_hypercall5(u64 fid,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4)137 static __always_inline long kvm_hypercall5(u64 fid,
138 		unsigned long arg0, unsigned long arg1,
139 		unsigned long arg2, unsigned long arg3, unsigned long arg4)
140 {
141 	register long ret asm("a0");
142 	register unsigned long fun asm("a0") = fid;
143 	register unsigned long a1  asm("a1") = arg0;
144 	register unsigned long a2  asm("a2") = arg1;
145 	register unsigned long a3  asm("a3") = arg2;
146 	register unsigned long a4  asm("a4") = arg3;
147 	register unsigned long a5  asm("a5") = arg4;
148 
149 	__asm__ __volatile__(
150 		"hvcl "__stringify(KVM_HCALL_SERVICE)
151 		: "=r" (ret)
152 		: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
153 		: "memory"
154 		);
155 
156 	return ret;
157 }
158 
159 #ifdef CONFIG_PARAVIRT
160 bool kvm_para_available(void);
161 unsigned int kvm_arch_para_features(void);
162 #else
kvm_para_available(void)163 static inline bool kvm_para_available(void)
164 {
165 	return false;
166 }
167 
kvm_arch_para_features(void)168 static inline unsigned int kvm_arch_para_features(void)
169 {
170 	return 0;
171 }
172 #endif
173 
kvm_arch_para_hints(void)174 static inline unsigned int kvm_arch_para_hints(void)
175 {
176 	return 0;
177 }
178 
kvm_check_and_clear_guest_paused(void)179 static inline bool kvm_check_and_clear_guest_paused(void)
180 {
181 	return false;
182 }
183 
184 #endif /* _ASM_LOONGARCH_KVM_PARA_H */
185