xref: /linux/arch/loongarch/kvm/switch.S (revision e80948062dcfff0543c5c60ba8654e825bf73b5a)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6#include <linux/linkage.h>
7#include <linux/kvm_types.h>
8#include <asm/asm.h>
9#include <asm/asmmacro.h>
10#include <asm/loongarch.h>
11#include <asm/page.h>
12#include <asm/regdef.h>
13#include <asm/unwind_hints.h>
14
15#define HGPR_OFFSET(x)		(PT_R0 + 8*x)
16#define GGPR_OFFSET(x)		(KVM_ARCH_GGPR + 8*x)
17
18.macro kvm_save_host_gpr base
19	.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
20	st.d	$r\n, \base, HGPR_OFFSET(\n)
21	.endr
22.endm
23
24.macro kvm_restore_host_gpr base
25	.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
26	ld.d	$r\n, \base, HGPR_OFFSET(\n)
27	.endr
28.endm
29
30/*
31 * Save and restore all GPRs except base register,
32 * and default value of base register is a2.
33 */
34.macro kvm_save_guest_gprs base
35	.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
36	st.d	$r\n, \base, GGPR_OFFSET(\n)
37	.endr
38.endm
39
40.macro kvm_restore_guest_gprs base
41	.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
42	ld.d	$r\n, \base, GGPR_OFFSET(\n)
43	.endr
44.endm
45
46/*
47 * Prepare switch to guest, save host regs and restore guest regs.
48 * a2: kvm_vcpu_arch, don't touch it until 'ertn'
49 * t0, t1: temp register
50 */
51.macro kvm_switch_to_guest
52	/* Set host ECFG.VS=0, all exceptions share one exception entry */
53	csrrd		t0, LOONGARCH_CSR_ECFG
54	bstrins.w	t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT
55	csrwr		t0, LOONGARCH_CSR_ECFG
56
57	/* Load up the new EENTRY */
58	ld.d	t0, a2, KVM_ARCH_GEENTRY
59	csrwr	t0, LOONGARCH_CSR_EENTRY
60
61	/* Set Guest ERA */
62	ld.d	t0, a2, KVM_ARCH_GPC
63	csrwr	t0, LOONGARCH_CSR_ERA
64
65	/* Load PGD for KVM hypervisor */
66	ld.d	t0, a2, KVM_ARCH_KVMPGD
67	csrwr	t0, LOONGARCH_CSR_PGDL
68
69	/* Mix GID and RID */
70	csrrd		t1, LOONGARCH_CSR_GSTAT
71	bstrpick.w	t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT
72	csrrd		t0, LOONGARCH_CSR_GTLBC
73	bstrins.w	t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
74	csrwr		t0, LOONGARCH_CSR_GTLBC
75
76	/*
77	 * Enable intr in root mode with future ertn so that host interrupt
78	 * can be responsed during VM runs
79	 * Guest CRMD comes from separate GCSR_CRMD register
80	 */
81	ori	t0, zero, CSR_PRMD_PIE
82	csrwr	t0, LOONGARCH_CSR_PRMD
83
84	/* Set PVM bit to setup ertn to guest context */
85	ori	t0, zero, CSR_GSTAT_PVM
86	csrxchg	t0, t0,   LOONGARCH_CSR_GSTAT
87
88	/* Load Guest GPRs */
89	kvm_restore_guest_gprs a2
90	/* Load KVM_ARCH register */
91	ld.d	a2, a2,	(KVM_ARCH_GGPR + 8 * REG_A2)
92
93	ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */
94.endm
95
96	/*
97	 * Exception entry for general exception from guest mode
98	 *  - IRQ is disabled
99	 *  - kernel privilege in root mode
100	 *  - page mode keep unchanged from previous PRMD in root mode
101	 *  - Fixme: tlb exception cannot happen since registers relative with TLB
102	 *  -        is still in guest mode, such as pgd table/vmid registers etc,
103	 *  -        will fix with hw page walk enabled in future
104	 * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
105	 *
106	 * PGD register is shared between root kernel and kvm hypervisor.
107	 * So world switch entry should be in DMW area rather than TLB area
108	 * to avoid page fault re-enter.
109	 */
110	.text
111	.p2align PAGE_SHIFT
112	.cfi_sections	.debug_frame
113SYM_CODE_START(kvm_exc_entry)
114	UNWIND_HINT_END_OF_STACK
115	csrwr	a2,   KVM_TEMP_KS
116	csrrd	a2,   KVM_VCPU_KS
117	addi.d	a2,   a2, KVM_VCPU_ARCH
118
119	/* After save GPRs, free to use any GPR */
120	kvm_save_guest_gprs a2
121	/* Save guest A2 */
122	csrrd	t0,	KVM_TEMP_KS
123	st.d	t0,	a2,	(KVM_ARCH_GGPR + 8 * REG_A2)
124
125	/* A2 is kvm_vcpu_arch, A1 is free to use */
126	csrrd	s1,   KVM_VCPU_KS
127	ld.d	s0,   s1, KVM_VCPU_RUN
128
129	csrrd	t0,   LOONGARCH_CSR_ESTAT
130	st.d	t0,   a2, KVM_ARCH_HESTAT
131	csrrd	t0,   LOONGARCH_CSR_ERA
132	st.d	t0,   a2, KVM_ARCH_GPC
133	csrrd	t0,   LOONGARCH_CSR_BADV
134	st.d	t0,   a2, KVM_ARCH_HBADV
135	csrrd	t0,   LOONGARCH_CSR_BADI
136	st.d	t0,   a2, KVM_ARCH_HBADI
137
138	/* Restore host ECFG.VS */
139	csrrd	t0, LOONGARCH_CSR_ECFG
140	ld.d	t1, a2, KVM_ARCH_HECFG
141	or	t0, t0, t1
142	csrwr	t0, LOONGARCH_CSR_ECFG
143
144	/* Restore host EENTRY */
145	ld.d	t0, a2, KVM_ARCH_HEENTRY
146	csrwr	t0, LOONGARCH_CSR_EENTRY
147
148	/* Restore host pgd table */
149	ld.d    t0, a2, KVM_ARCH_HPGD
150	csrwr   t0, LOONGARCH_CSR_PGDL
151
152	/*
153	 * Disable PGM bit to enter root mode by default with next ertn
154	 */
155	ori	t0, zero, CSR_GSTAT_PVM
156	csrxchg	zero, t0, LOONGARCH_CSR_GSTAT
157
158	/*
159	 * Clear GTLBC.TGID field
160	 *       0: for root  tlb update in future tlb instr
161	 *  others: for guest tlb update like gpa to hpa in future tlb instr
162	 */
163	csrrd	t0, LOONGARCH_CSR_GTLBC
164	bstrins.w	t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
165	csrwr	t0, LOONGARCH_CSR_GTLBC
166	ld.d	tp, a2, KVM_ARCH_HTP
167	ld.d	sp, a2, KVM_ARCH_HSP
168	/* restore per cpu register */
169	ld.d	u0, a2, KVM_ARCH_HPERCPU
170	addi.d	sp, sp, -PT_SIZE
171
172	/* Prepare handle exception */
173	or	a0, s0, zero
174	or	a1, s1, zero
175	ld.d	t8, a2, KVM_ARCH_HANDLE_EXIT
176	jirl	ra, t8, 0
177
178	or	a2, s1, zero
179	addi.d	a2, a2, KVM_VCPU_ARCH
180
181	/* Resume host when ret <= 0 */
182	blez	a0, ret_to_host
183
184	/*
185         * Return to guest
186         * Save per cpu register again, maybe switched to another cpu
187         */
188	st.d	u0, a2, KVM_ARCH_HPERCPU
189
190	/* Save kvm_vcpu to kscratch */
191	csrwr	s1, KVM_VCPU_KS
192	kvm_switch_to_guest
193
194ret_to_host:
195	ld.d    a2, a2, KVM_ARCH_HSP
196	addi.d  a2, a2, -PT_SIZE
197	kvm_restore_host_gpr    a2
198	jr      ra
199
200SYM_CODE_END(kvm_exc_entry)
201EXPORT_SYMBOL_FOR_KVM(kvm_exc_entry)
202
203/*
204 * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
205 *
206 * @register_param:
207 *  a0: kvm_run* run
208 *  a1: kvm_vcpu* vcpu
209 */
210SYM_FUNC_START(kvm_enter_guest)
211	/* Allocate space in stack bottom */
212	addi.d	a2, sp, -PT_SIZE
213	/* Save host GPRs */
214	kvm_save_host_gpr a2
215
216	addi.d	a2, a1, KVM_VCPU_ARCH
217	st.d	sp, a2, KVM_ARCH_HSP
218	st.d	tp, a2, KVM_ARCH_HTP
219	/* Save per cpu register */
220	st.d	u0, a2, KVM_ARCH_HPERCPU
221
222	/* Save kvm_vcpu to kscratch */
223	csrwr	a1, KVM_VCPU_KS
224	kvm_switch_to_guest
225SYM_FUNC_END(kvm_enter_guest)
226EXPORT_SYMBOL_FOR_KVM(kvm_enter_guest)
227
228SYM_FUNC_START(kvm_save_fpu)
229	fpu_save_csr	a0 t1
230	fpu_save_double a0 t1
231	fpu_save_cc	a0 t1 t2
232	jr              ra
233SYM_FUNC_END(kvm_save_fpu)
234EXPORT_SYMBOL_FOR_KVM(kvm_save_fpu)
235
236SYM_FUNC_START(kvm_restore_fpu)
237	fpu_restore_double a0 t1
238	fpu_restore_csr    a0 t1 t2
239	fpu_restore_cc	   a0 t1 t2
240	jr                 ra
241SYM_FUNC_END(kvm_restore_fpu)
242EXPORT_SYMBOL_FOR_KVM(kvm_restore_fpu)
243
244#ifdef CONFIG_CPU_HAS_LSX
245SYM_FUNC_START(kvm_save_lsx)
246	fpu_save_csr    a0 t1
247	fpu_save_cc     a0 t1 t2
248	lsx_save_data   a0 t1
249	jr              ra
250SYM_FUNC_END(kvm_save_lsx)
251EXPORT_SYMBOL_FOR_KVM(kvm_save_lsx)
252
253SYM_FUNC_START(kvm_restore_lsx)
254	lsx_restore_data a0 t1
255	fpu_restore_cc   a0 t1 t2
256	fpu_restore_csr  a0 t1 t2
257	jr               ra
258SYM_FUNC_END(kvm_restore_lsx)
259EXPORT_SYMBOL_FOR_KVM(kvm_restore_lsx)
260#endif
261
262#ifdef CONFIG_CPU_HAS_LASX
263SYM_FUNC_START(kvm_save_lasx)
264	fpu_save_csr    a0 t1
265	fpu_save_cc     a0 t1 t2
266	lasx_save_data  a0 t1
267	jr              ra
268SYM_FUNC_END(kvm_save_lasx)
269EXPORT_SYMBOL_FOR_KVM(kvm_save_lasx)
270
271SYM_FUNC_START(kvm_restore_lasx)
272	lasx_restore_data a0 t1
273	fpu_restore_cc    a0 t1 t2
274	fpu_restore_csr   a0 t1 t2
275	jr                ra
276SYM_FUNC_END(kvm_restore_lasx)
277EXPORT_SYMBOL_FOR_KVM(kvm_restore_lasx)
278#endif
279
280#ifdef CONFIG_CPU_HAS_LBT
281STACK_FRAME_NON_STANDARD kvm_restore_fpu
282#ifdef CONFIG_CPU_HAS_LSX
283STACK_FRAME_NON_STANDARD kvm_restore_lsx
284#endif
285#ifdef CONFIG_CPU_HAS_LASX
286STACK_FRAME_NON_STANDARD kvm_restore_lasx
287#endif
288#endif
289