1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6#include <linux/linkage.h> 7#include <asm/asm.h> 8#include <asm/asmmacro.h> 9#include <asm/loongarch.h> 10#include <asm/regdef.h> 11#include <asm/stackframe.h> 12 13#define HGPR_OFFSET(x) (PT_R0 + 8*x) 14#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x) 15 16.macro kvm_save_host_gpr base 17 .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 18 st.d $r\n, \base, HGPR_OFFSET(\n) 19 .endr 20.endm 21 22.macro kvm_restore_host_gpr base 23 .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 24 ld.d $r\n, \base, HGPR_OFFSET(\n) 25 .endr 26.endm 27 28/* 29 * Save and restore all GPRs except base register, 30 * and default value of base register is a2. 31 */ 32.macro kvm_save_guest_gprs base 33 .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 34 st.d $r\n, \base, GGPR_OFFSET(\n) 35 .endr 36.endm 37 38.macro kvm_restore_guest_gprs base 39 .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 40 ld.d $r\n, \base, GGPR_OFFSET(\n) 41 .endr 42.endm 43 44/* 45 * Prepare switch to guest, save host regs and restore guest regs. 46 * a2: kvm_vcpu_arch, don't touch it until 'ertn' 47 * t0, t1: temp register 48 */ 49.macro kvm_switch_to_guest 50 /* Set host ECFG.VS=0, all exceptions share one exception entry */ 51 csrrd t0, LOONGARCH_CSR_ECFG 52 bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT 53 csrwr t0, LOONGARCH_CSR_ECFG 54 55 /* Load up the new EENTRY */ 56 ld.d t0, a2, KVM_ARCH_GEENTRY 57 csrwr t0, LOONGARCH_CSR_EENTRY 58 59 /* Set Guest ERA */ 60 ld.d t0, a2, KVM_ARCH_GPC 61 csrwr t0, LOONGARCH_CSR_ERA 62 63 /* Save host PGDL */ 64 csrrd t0, LOONGARCH_CSR_PGDL 65 st.d t0, a2, KVM_ARCH_HPGD 66 67 /* Switch to kvm */ 68 ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH 69 70 /* Load guest PGDL */ 71 li.w t0, KVM_GPGD 72 ldx.d t0, t1, t0 73 csrwr t0, LOONGARCH_CSR_PGDL 74 75 /* Mix GID and RID */ 76 csrrd t1, LOONGARCH_CSR_GSTAT 77 bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT 78 csrrd t0, LOONGARCH_CSR_GTLBC 79 bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT 80 csrwr t0, LOONGARCH_CSR_GTLBC 81 82 /* 83 * Enable intr in root mode with future ertn so that host interrupt 84 * can be responsed during VM runs 85 * Guest CRMD comes from separate GCSR_CRMD register 86 */ 87 ori t0, zero, CSR_PRMD_PIE 88 csrxchg t0, t0, LOONGARCH_CSR_PRMD 89 90 /* Set PVM bit to setup ertn to guest context */ 91 ori t0, zero, CSR_GSTAT_PVM 92 csrxchg t0, t0, LOONGARCH_CSR_GSTAT 93 94 /* Load Guest GPRs */ 95 kvm_restore_guest_gprs a2 96 /* Load KVM_ARCH register */ 97 ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2) 98 99 ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */ 100.endm 101 102 /* 103 * Exception entry for general exception from guest mode 104 * - IRQ is disabled 105 * - kernel privilege in root mode 106 * - page mode keep unchanged from previous PRMD in root mode 107 * - Fixme: tlb exception cannot happen since registers relative with TLB 108 * - is still in guest mode, such as pgd table/vmid registers etc, 109 * - will fix with hw page walk enabled in future 110 * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS 111 */ 112 .text 113 .cfi_sections .debug_frame 114SYM_CODE_START(kvm_exc_entry) 115 csrwr a2, KVM_TEMP_KS 116 csrrd a2, KVM_VCPU_KS 117 addi.d a2, a2, KVM_VCPU_ARCH 118 119 /* After save GPRs, free to use any GPR */ 120 kvm_save_guest_gprs a2 121 /* Save guest A2 */ 122 csrrd t0, KVM_TEMP_KS 123 st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2) 124 125 /* A2 is kvm_vcpu_arch, A1 is free to use */ 126 csrrd s1, KVM_VCPU_KS 127 ld.d s0, s1, KVM_VCPU_RUN 128 129 csrrd t0, LOONGARCH_CSR_ESTAT 130 st.d t0, a2, KVM_ARCH_HESTAT 131 csrrd t0, LOONGARCH_CSR_ERA 132 st.d t0, a2, KVM_ARCH_GPC 133 csrrd t0, LOONGARCH_CSR_BADV 134 st.d t0, a2, KVM_ARCH_HBADV 135 csrrd t0, LOONGARCH_CSR_BADI 136 st.d t0, a2, KVM_ARCH_HBADI 137 138 /* Restore host ECFG.VS */ 139 csrrd t0, LOONGARCH_CSR_ECFG 140 ld.d t1, a2, KVM_ARCH_HECFG 141 or t0, t0, t1 142 csrwr t0, LOONGARCH_CSR_ECFG 143 144 /* Restore host EENTRY */ 145 ld.d t0, a2, KVM_ARCH_HEENTRY 146 csrwr t0, LOONGARCH_CSR_EENTRY 147 148 /* Restore host pgd table */ 149 ld.d t0, a2, KVM_ARCH_HPGD 150 csrwr t0, LOONGARCH_CSR_PGDL 151 152 /* 153 * Disable PGM bit to enter root mode by default with next ertn 154 */ 155 ori t0, zero, CSR_GSTAT_PVM 156 csrxchg zero, t0, LOONGARCH_CSR_GSTAT 157 158 /* 159 * Clear GTLBC.TGID field 160 * 0: for root tlb update in future tlb instr 161 * others: for guest tlb update like gpa to hpa in future tlb instr 162 */ 163 csrrd t0, LOONGARCH_CSR_GTLBC 164 bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT 165 csrwr t0, LOONGARCH_CSR_GTLBC 166 ld.d tp, a2, KVM_ARCH_HTP 167 ld.d sp, a2, KVM_ARCH_HSP 168 /* restore per cpu register */ 169 ld.d u0, a2, KVM_ARCH_HPERCPU 170 addi.d sp, sp, -PT_SIZE 171 172 /* Prepare handle exception */ 173 or a0, s0, zero 174 or a1, s1, zero 175 ld.d t8, a2, KVM_ARCH_HANDLE_EXIT 176 jirl ra, t8, 0 177 178 or a2, s1, zero 179 addi.d a2, a2, KVM_VCPU_ARCH 180 181 /* Resume host when ret <= 0 */ 182 blez a0, ret_to_host 183 184 /* 185 * Return to guest 186 * Save per cpu register again, maybe switched to another cpu 187 */ 188 st.d u0, a2, KVM_ARCH_HPERCPU 189 190 /* Save kvm_vcpu to kscratch */ 191 csrwr s1, KVM_VCPU_KS 192 kvm_switch_to_guest 193 194ret_to_host: 195 ld.d a2, a2, KVM_ARCH_HSP 196 addi.d a2, a2, -PT_SIZE 197 kvm_restore_host_gpr a2 198 jr ra 199 200SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL) 201SYM_CODE_END(kvm_exc_entry) 202 203/* 204 * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu) 205 * 206 * @register_param: 207 * a0: kvm_run* run 208 * a1: kvm_vcpu* vcpu 209 */ 210SYM_FUNC_START(kvm_enter_guest) 211 /* Allocate space in stack bottom */ 212 addi.d a2, sp, -PT_SIZE 213 /* Save host GPRs */ 214 kvm_save_host_gpr a2 215 216 addi.d a2, a1, KVM_VCPU_ARCH 217 st.d sp, a2, KVM_ARCH_HSP 218 st.d tp, a2, KVM_ARCH_HTP 219 /* Save per cpu register */ 220 st.d u0, a2, KVM_ARCH_HPERCPU 221 222 /* Save kvm_vcpu to kscratch */ 223 csrwr a1, KVM_VCPU_KS 224 kvm_switch_to_guest 225SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL) 226SYM_FUNC_END(kvm_enter_guest) 227 228SYM_FUNC_START(kvm_save_fpu) 229 fpu_save_csr a0 t1 230 fpu_save_double a0 t1 231 fpu_save_cc a0 t1 t2 232 jr ra 233SYM_FUNC_END(kvm_save_fpu) 234 235SYM_FUNC_START(kvm_restore_fpu) 236 fpu_restore_double a0 t1 237 fpu_restore_csr a0 t1 t2 238 fpu_restore_cc a0 t1 t2 239 jr ra 240SYM_FUNC_END(kvm_restore_fpu) 241 242#ifdef CONFIG_CPU_HAS_LSX 243SYM_FUNC_START(kvm_save_lsx) 244 fpu_save_csr a0 t1 245 fpu_save_cc a0 t1 t2 246 lsx_save_data a0 t1 247 jr ra 248SYM_FUNC_END(kvm_save_lsx) 249 250SYM_FUNC_START(kvm_restore_lsx) 251 lsx_restore_data a0 t1 252 fpu_restore_cc a0 t1 t2 253 fpu_restore_csr a0 t1 t2 254 jr ra 255SYM_FUNC_END(kvm_restore_lsx) 256#endif 257 258#ifdef CONFIG_CPU_HAS_LASX 259SYM_FUNC_START(kvm_save_lasx) 260 fpu_save_csr a0 t1 261 fpu_save_cc a0 t1 t2 262 lasx_save_data a0 t1 263 jr ra 264SYM_FUNC_END(kvm_save_lasx) 265 266SYM_FUNC_START(kvm_restore_lasx) 267 lasx_restore_data a0 t1 268 fpu_restore_cc a0 t1 t2 269 fpu_restore_csr a0 t1 t2 270 jr ra 271SYM_FUNC_END(kvm_restore_lasx) 272#endif 273 .section ".rodata" 274SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) 275SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) 276