1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #include <linux/err.h> 7 #include <linux/module.h> 8 #include <linux/kvm_host.h> 9 #include <asm/cacheflush.h> 10 #include <asm/cpufeature.h> 11 #include <asm/kvm_csr.h> 12 #include <asm/kvm_eiointc.h> 13 #include "trace.h" 14 15 unsigned long vpid_mask; 16 struct kvm_world_switch *kvm_loongarch_ops; 17 static int gcsr_flag[CSR_MAX_NUMS]; 18 static struct kvm_context __percpu *vmcs; 19 20 int get_gcsr_flag(int csr) 21 { 22 if (csr < CSR_MAX_NUMS) 23 return gcsr_flag[csr]; 24 25 return INVALID_GCSR; 26 } 27 28 static inline void set_gcsr_sw_flag(int csr) 29 { 30 if (csr < CSR_MAX_NUMS) 31 gcsr_flag[csr] |= SW_GCSR; 32 } 33 34 static inline void set_gcsr_hw_flag(int csr) 35 { 36 if (csr < CSR_MAX_NUMS) 37 gcsr_flag[csr] |= HW_GCSR; 38 } 39 40 /* 41 * The default value of gcsr_flag[CSR] is 0, and we use this 42 * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the 43 * gcsr is software or hardware. It will be used by get/set_gcsr, 44 * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it, 45 * else use software csr to emulate it. 46 */ 47 static void kvm_init_gcsr_flag(void) 48 { 49 set_gcsr_hw_flag(LOONGARCH_CSR_CRMD); 50 set_gcsr_hw_flag(LOONGARCH_CSR_PRMD); 51 set_gcsr_hw_flag(LOONGARCH_CSR_EUEN); 52 set_gcsr_hw_flag(LOONGARCH_CSR_MISC); 53 set_gcsr_hw_flag(LOONGARCH_CSR_ECFG); 54 set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT); 55 set_gcsr_hw_flag(LOONGARCH_CSR_ERA); 56 set_gcsr_hw_flag(LOONGARCH_CSR_BADV); 57 set_gcsr_hw_flag(LOONGARCH_CSR_BADI); 58 set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY); 59 set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX); 60 set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI); 61 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0); 62 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1); 63 set_gcsr_hw_flag(LOONGARCH_CSR_ASID); 64 set_gcsr_hw_flag(LOONGARCH_CSR_PGDL); 65 set_gcsr_hw_flag(LOONGARCH_CSR_PGDH); 66 set_gcsr_hw_flag(LOONGARCH_CSR_PGD); 67 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0); 68 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1); 69 set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE); 70 set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG); 71 set_gcsr_hw_flag(LOONGARCH_CSR_CPUID); 72 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1); 73 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2); 74 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3); 75 set_gcsr_hw_flag(LOONGARCH_CSR_KS0); 76 set_gcsr_hw_flag(LOONGARCH_CSR_KS1); 77 set_gcsr_hw_flag(LOONGARCH_CSR_KS2); 78 set_gcsr_hw_flag(LOONGARCH_CSR_KS3); 79 set_gcsr_hw_flag(LOONGARCH_CSR_KS4); 80 set_gcsr_hw_flag(LOONGARCH_CSR_KS5); 81 set_gcsr_hw_flag(LOONGARCH_CSR_KS6); 82 set_gcsr_hw_flag(LOONGARCH_CSR_KS7); 83 set_gcsr_hw_flag(LOONGARCH_CSR_TMID); 84 set_gcsr_hw_flag(LOONGARCH_CSR_TCFG); 85 set_gcsr_hw_flag(LOONGARCH_CSR_TVAL); 86 set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR); 87 set_gcsr_hw_flag(LOONGARCH_CSR_CNTC); 88 set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL); 89 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY); 90 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV); 91 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA); 92 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE); 93 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0); 94 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1); 95 set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI); 96 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD); 97 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0); 98 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1); 99 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2); 100 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3); 101 102 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1); 103 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2); 104 set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL); 105 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1); 106 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2); 107 set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY); 108 set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA); 109 set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE); 110 set_gcsr_sw_flag(LOONGARCH_CSR_CTAG); 111 set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG); 112 set_gcsr_sw_flag(LOONGARCH_CSR_DERA); 113 set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE); 114 115 set_gcsr_sw_flag(LOONGARCH_CSR_FWPC); 116 set_gcsr_sw_flag(LOONGARCH_CSR_FWPS); 117 set_gcsr_sw_flag(LOONGARCH_CSR_MWPC); 118 set_gcsr_sw_flag(LOONGARCH_CSR_MWPS); 119 120 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR); 121 set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK); 122 set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL); 123 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID); 124 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR); 125 set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK); 126 set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL); 127 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID); 128 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR); 129 set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK); 130 set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL); 131 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID); 132 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR); 133 set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK); 134 set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL); 135 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID); 136 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR); 137 set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK); 138 set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL); 139 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID); 140 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR); 141 set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK); 142 set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL); 143 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID); 144 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR); 145 set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK); 146 set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL); 147 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID); 148 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR); 149 set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK); 150 set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL); 151 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID); 152 153 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR); 154 set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK); 155 set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL); 156 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID); 157 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR); 158 set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK); 159 set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL); 160 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID); 161 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR); 162 set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK); 163 set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL); 164 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID); 165 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR); 166 set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK); 167 set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL); 168 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID); 169 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR); 170 set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK); 171 set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL); 172 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID); 173 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR); 174 set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK); 175 set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL); 176 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID); 177 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR); 178 set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK); 179 set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL); 180 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID); 181 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR); 182 set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK); 183 set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL); 184 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID); 185 186 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0); 187 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0); 188 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1); 189 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1); 190 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2); 191 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2); 192 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3); 193 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); 194 } 195 196 static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu) 197 { 198 unsigned long vpid; 199 struct kvm_context *context; 200 201 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); 202 vpid = context->vpid_cache + 1; 203 if (!(vpid & vpid_mask)) { 204 /* finish round of vpid loop */ 205 if (unlikely(!vpid)) 206 vpid = vpid_mask + 1; 207 208 ++vpid; /* vpid 0 reserved for root */ 209 210 /* start new vpid cycle */ 211 kvm_flush_tlb_all(); 212 } 213 214 context->vpid_cache = vpid; 215 vcpu->arch.vpid = vpid; 216 } 217 218 void kvm_check_vpid(struct kvm_vcpu *vcpu) 219 { 220 int cpu; 221 bool migrated; 222 unsigned long ver, old, vpid; 223 struct kvm_context *context; 224 225 cpu = smp_processor_id(); 226 /* 227 * Are we entering guest context on a different CPU to last time? 228 * If so, the vCPU's guest TLB state on this CPU may be stale. 229 */ 230 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); 231 migrated = (vcpu->cpu != cpu); 232 233 /* 234 * Check if our vpid is of an older version 235 * 236 * We also discard the stored vpid if we've executed on 237 * another CPU, as the guest mappings may have changed without 238 * hypervisor knowledge. 239 */ 240 ver = vcpu->arch.vpid & ~vpid_mask; 241 old = context->vpid_cache & ~vpid_mask; 242 if (migrated || (ver != old)) { 243 kvm_update_vpid(vcpu, cpu); 244 trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); 245 vcpu->cpu = cpu; 246 kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); 247 } 248 249 /* Restore GSTAT(0x50).vpid */ 250 vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT; 251 change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid); 252 } 253 254 void kvm_init_vmcs(struct kvm *kvm) 255 { 256 kvm->arch.vmcs = vmcs; 257 } 258 259 long kvm_arch_dev_ioctl(struct file *filp, 260 unsigned int ioctl, unsigned long arg) 261 { 262 return -ENOIOCTLCMD; 263 } 264 265 int kvm_arch_enable_virtualization_cpu(void) 266 { 267 unsigned long env, gcfg = 0; 268 269 env = read_csr_gcfg(); 270 271 /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */ 272 write_csr_gcfg(0); 273 write_csr_gstat(0); 274 write_csr_gintc(0); 275 clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); 276 277 /* 278 * Enable virtualization features granting guest direct control of 279 * certain features: 280 * GCI=2: Trap on init or unimplement cache instruction. 281 * TORU=0: Trap on Root Unimplement. 282 * CACTRL=1: Root control cache. 283 * TOP=0: Trap on Previlege. 284 * TOE=0: Trap on Exception. 285 * TIT=0: Trap on Timer. 286 */ 287 if (env & CSR_GCFG_GCIP_ALL) 288 gcfg |= CSR_GCFG_GCI_SECURE; 289 if (env & CSR_GCFG_MATC_ROOT) 290 gcfg |= CSR_GCFG_MATC_ROOT; 291 292 write_csr_gcfg(gcfg); 293 294 kvm_flush_tlb_all(); 295 296 /* Enable using TGID */ 297 set_csr_gtlbc(CSR_GTLBC_USETGID); 298 kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", 299 read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); 300 301 return 0; 302 } 303 304 void kvm_arch_disable_virtualization_cpu(void) 305 { 306 write_csr_gcfg(0); 307 write_csr_gstat(0); 308 write_csr_gintc(0); 309 clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); 310 311 /* Flush any remaining guest TLB entries */ 312 kvm_flush_tlb_all(); 313 } 314 315 static int kvm_loongarch_env_init(void) 316 { 317 int cpu, order, ret; 318 void *addr; 319 struct kvm_context *context; 320 321 vmcs = alloc_percpu(struct kvm_context); 322 if (!vmcs) { 323 pr_err("kvm: failed to allocate percpu kvm_context\n"); 324 return -ENOMEM; 325 } 326 327 kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL); 328 if (!kvm_loongarch_ops) { 329 free_percpu(vmcs); 330 vmcs = NULL; 331 return -ENOMEM; 332 } 333 334 /* 335 * PGD register is shared between root kernel and kvm hypervisor. 336 * So world switch entry should be in DMW area rather than TLB area 337 * to avoid page fault reenter. 338 * 339 * In future if hardware pagetable walking is supported, we won't 340 * need to copy world switch code to DMW area. 341 */ 342 order = get_order(kvm_exception_size + kvm_enter_guest_size); 343 addr = (void *)__get_free_pages(GFP_KERNEL, order); 344 if (!addr) { 345 free_percpu(vmcs); 346 vmcs = NULL; 347 kfree(kvm_loongarch_ops); 348 kvm_loongarch_ops = NULL; 349 return -ENOMEM; 350 } 351 352 memcpy(addr, kvm_exc_entry, kvm_exception_size); 353 memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size); 354 flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size); 355 kvm_loongarch_ops->exc_entry = addr; 356 kvm_loongarch_ops->enter_guest = addr + kvm_exception_size; 357 kvm_loongarch_ops->page_order = order; 358 359 vpid_mask = read_csr_gstat(); 360 vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT; 361 if (vpid_mask) 362 vpid_mask = GENMASK(vpid_mask - 1, 0); 363 364 for_each_possible_cpu(cpu) { 365 context = per_cpu_ptr(vmcs, cpu); 366 context->vpid_cache = vpid_mask + 1; 367 context->last_vcpu = NULL; 368 } 369 370 kvm_init_gcsr_flag(); 371 372 /* Register LoongArch IPI interrupt controller interface. */ 373 ret = kvm_loongarch_register_ipi_device(); 374 if (ret) 375 return ret; 376 377 /* Register LoongArch EIOINTC interrupt controller interface. */ 378 ret = kvm_loongarch_register_eiointc_device(); 379 380 return ret; 381 } 382 383 static void kvm_loongarch_env_exit(void) 384 { 385 unsigned long addr; 386 387 if (vmcs) 388 free_percpu(vmcs); 389 390 if (kvm_loongarch_ops) { 391 if (kvm_loongarch_ops->exc_entry) { 392 addr = (unsigned long)kvm_loongarch_ops->exc_entry; 393 free_pages(addr, kvm_loongarch_ops->page_order); 394 } 395 kfree(kvm_loongarch_ops); 396 } 397 } 398 399 static int kvm_loongarch_init(void) 400 { 401 int r; 402 403 if (!cpu_has_lvz) { 404 kvm_info("Hardware virtualization not available\n"); 405 return -ENODEV; 406 } 407 r = kvm_loongarch_env_init(); 408 if (r) 409 return r; 410 411 return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); 412 } 413 414 static void kvm_loongarch_exit(void) 415 { 416 kvm_exit(); 417 kvm_loongarch_env_exit(); 418 } 419 420 module_init(kvm_loongarch_init); 421 module_exit(kvm_loongarch_exit); 422 423 #ifdef MODULE 424 static const struct cpu_feature kvm_feature[] = { 425 { .feature = cpu_feature(LOONGARCH_LVZ) }, 426 {}, 427 }; 428 MODULE_DEVICE_TABLE(cpu, kvm_feature); 429 #endif 430