xref: /linux/arch/loongarch/include/asm/kvm_para.h (revision b00f7f4f8e936da55f2e6c7fd96391ef54c145fc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_LOONGARCH_KVM_PARA_H
3 #define _ASM_LOONGARCH_KVM_PARA_H
4 
5 /*
6  * Hypercall code field
7  */
8 #define HYPERVISOR_KVM			1
9 #define HYPERVISOR_VENDOR_SHIFT		8
10 #define HYPERCALL_ENCODE(vendor, code)	((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
11 
12 #define KVM_HCALL_CODE_SERVICE		0
13 #define KVM_HCALL_CODE_SWDBG		1
14 
15 #define KVM_HCALL_SERVICE		HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
16 #define  KVM_HCALL_FUNC_IPI		1
17 #define  KVM_HCALL_FUNC_NOTIFY		2
18 
19 #define KVM_HCALL_SWDBG			HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
20 
21 /*
22  * LoongArch hypercall return code
23  */
24 #define KVM_HCALL_SUCCESS		0
25 #define KVM_HCALL_INVALID_CODE		-1UL
26 #define KVM_HCALL_INVALID_PARAMETER	-2UL
27 
28 #define KVM_STEAL_PHYS_VALID		BIT_ULL(0)
29 #define KVM_STEAL_PHYS_MASK		GENMASK_ULL(63, 6)
30 
31 struct kvm_steal_time {
32 	__u64 steal;
33 	__u32 version;
34 	__u32 flags;
35 	__u32 pad[12];
36 };
37 
38 /*
39  * Hypercall interface for KVM hypervisor
40  *
41  * a0: function identifier
42  * a1-a5: args
43  * Return value will be placed in a0.
44  * Up to 5 arguments are passed in a1, a2, a3, a4, a5.
45  */
46 static __always_inline long kvm_hypercall0(u64 fid)
47 {
48 	register long ret asm("a0");
49 	register unsigned long fun asm("a0") = fid;
50 
51 	__asm__ __volatile__(
52 		"hvcl "__stringify(KVM_HCALL_SERVICE)
53 		: "=r" (ret)
54 		: "r" (fun)
55 		: "memory"
56 		);
57 
58 	return ret;
59 }
60 
61 static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
62 {
63 	register long ret asm("a0");
64 	register unsigned long fun asm("a0") = fid;
65 	register unsigned long a1  asm("a1") = arg0;
66 
67 	__asm__ __volatile__(
68 		"hvcl "__stringify(KVM_HCALL_SERVICE)
69 		: "=r" (ret)
70 		: "r" (fun), "r" (a1)
71 		: "memory"
72 		);
73 
74 	return ret;
75 }
76 
77 static __always_inline long kvm_hypercall2(u64 fid,
78 		unsigned long arg0, unsigned long arg1)
79 {
80 	register long ret asm("a0");
81 	register unsigned long fun asm("a0") = fid;
82 	register unsigned long a1  asm("a1") = arg0;
83 	register unsigned long a2  asm("a2") = arg1;
84 
85 	__asm__ __volatile__(
86 		"hvcl "__stringify(KVM_HCALL_SERVICE)
87 		: "=r" (ret)
88 		: "r" (fun), "r" (a1), "r" (a2)
89 		: "memory"
90 		);
91 
92 	return ret;
93 }
94 
95 static __always_inline long kvm_hypercall3(u64 fid,
96 	unsigned long arg0, unsigned long arg1, unsigned long arg2)
97 {
98 	register long ret asm("a0");
99 	register unsigned long fun asm("a0") = fid;
100 	register unsigned long a1  asm("a1") = arg0;
101 	register unsigned long a2  asm("a2") = arg1;
102 	register unsigned long a3  asm("a3") = arg2;
103 
104 	__asm__ __volatile__(
105 		"hvcl "__stringify(KVM_HCALL_SERVICE)
106 		: "=r" (ret)
107 		: "r" (fun), "r" (a1), "r" (a2), "r" (a3)
108 		: "memory"
109 		);
110 
111 	return ret;
112 }
113 
114 static __always_inline long kvm_hypercall4(u64 fid,
115 		unsigned long arg0, unsigned long arg1,
116 		unsigned long arg2, unsigned long arg3)
117 {
118 	register long ret asm("a0");
119 	register unsigned long fun asm("a0") = fid;
120 	register unsigned long a1  asm("a1") = arg0;
121 	register unsigned long a2  asm("a2") = arg1;
122 	register unsigned long a3  asm("a3") = arg2;
123 	register unsigned long a4  asm("a4") = arg3;
124 
125 	__asm__ __volatile__(
126 		"hvcl "__stringify(KVM_HCALL_SERVICE)
127 		: "=r" (ret)
128 		: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
129 		: "memory"
130 		);
131 
132 	return ret;
133 }
134 
135 static __always_inline long kvm_hypercall5(u64 fid,
136 		unsigned long arg0, unsigned long arg1,
137 		unsigned long arg2, unsigned long arg3, unsigned long arg4)
138 {
139 	register long ret asm("a0");
140 	register unsigned long fun asm("a0") = fid;
141 	register unsigned long a1  asm("a1") = arg0;
142 	register unsigned long a2  asm("a2") = arg1;
143 	register unsigned long a3  asm("a3") = arg2;
144 	register unsigned long a4  asm("a4") = arg3;
145 	register unsigned long a5  asm("a5") = arg4;
146 
147 	__asm__ __volatile__(
148 		"hvcl "__stringify(KVM_HCALL_SERVICE)
149 		: "=r" (ret)
150 		: "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
151 		: "memory"
152 		);
153 
154 	return ret;
155 }
156 
157 static inline unsigned int kvm_arch_para_features(void)
158 {
159 	return 0;
160 }
161 
162 static inline unsigned int kvm_arch_para_hints(void)
163 {
164 	return 0;
165 }
166 
167 static inline bool kvm_check_and_clear_guest_paused(void)
168 {
169 	return false;
170 }
171 
172 #endif /* _ASM_LOONGARCH_KVM_PARA_H */
173