1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited 4 */ 5 6 #include <linux/err.h> 7 #include <linux/module.h> 8 #include <linux/kvm_host.h> 9 #include <asm/cacheflush.h> 10 #include <asm/cpufeature.h> 11 #include <asm/kvm_csr.h> 12 #include <asm/kvm_eiointc.h> 13 #include <asm/kvm_pch_pic.h> 14 #include "trace.h" 15 16 unsigned long vpid_mask; 17 struct kvm_world_switch *kvm_loongarch_ops; 18 static int gcsr_flag[CSR_MAX_NUMS]; 19 static struct kvm_context __percpu *vmcs; 20 21 int get_gcsr_flag(int csr) 22 { 23 if (csr < CSR_MAX_NUMS) 24 return gcsr_flag[csr]; 25 26 return INVALID_GCSR; 27 } 28 29 static inline void set_gcsr_sw_flag(int csr) 30 { 31 if (csr < CSR_MAX_NUMS) 32 gcsr_flag[csr] |= SW_GCSR; 33 } 34 35 static inline void set_gcsr_hw_flag(int csr) 36 { 37 if (csr < CSR_MAX_NUMS) 38 gcsr_flag[csr] |= HW_GCSR; 39 } 40 41 /* 42 * The default value of gcsr_flag[CSR] is 0, and we use this 43 * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the 44 * gcsr is software or hardware. It will be used by get/set_gcsr, 45 * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it, 46 * else use software csr to emulate it. 47 */ 48 static void kvm_init_gcsr_flag(void) 49 { 50 set_gcsr_hw_flag(LOONGARCH_CSR_CRMD); 51 set_gcsr_hw_flag(LOONGARCH_CSR_PRMD); 52 set_gcsr_hw_flag(LOONGARCH_CSR_EUEN); 53 set_gcsr_hw_flag(LOONGARCH_CSR_MISC); 54 set_gcsr_hw_flag(LOONGARCH_CSR_ECFG); 55 set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT); 56 set_gcsr_hw_flag(LOONGARCH_CSR_ERA); 57 set_gcsr_hw_flag(LOONGARCH_CSR_BADV); 58 set_gcsr_hw_flag(LOONGARCH_CSR_BADI); 59 set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY); 60 set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX); 61 set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI); 62 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0); 63 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1); 64 set_gcsr_hw_flag(LOONGARCH_CSR_ASID); 65 set_gcsr_hw_flag(LOONGARCH_CSR_PGDL); 66 set_gcsr_hw_flag(LOONGARCH_CSR_PGDH); 67 set_gcsr_hw_flag(LOONGARCH_CSR_PGD); 68 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0); 69 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1); 70 set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE); 71 set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG); 72 set_gcsr_hw_flag(LOONGARCH_CSR_CPUID); 73 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1); 74 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2); 75 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3); 76 set_gcsr_hw_flag(LOONGARCH_CSR_KS0); 77 set_gcsr_hw_flag(LOONGARCH_CSR_KS1); 78 set_gcsr_hw_flag(LOONGARCH_CSR_KS2); 79 set_gcsr_hw_flag(LOONGARCH_CSR_KS3); 80 set_gcsr_hw_flag(LOONGARCH_CSR_KS4); 81 set_gcsr_hw_flag(LOONGARCH_CSR_KS5); 82 set_gcsr_hw_flag(LOONGARCH_CSR_KS6); 83 set_gcsr_hw_flag(LOONGARCH_CSR_KS7); 84 set_gcsr_hw_flag(LOONGARCH_CSR_TMID); 85 set_gcsr_hw_flag(LOONGARCH_CSR_TCFG); 86 set_gcsr_hw_flag(LOONGARCH_CSR_TVAL); 87 set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR); 88 set_gcsr_hw_flag(LOONGARCH_CSR_CNTC); 89 set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL); 90 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY); 91 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV); 92 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA); 93 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE); 94 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0); 95 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1); 96 set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI); 97 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD); 98 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0); 99 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1); 100 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2); 101 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3); 102 103 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1); 104 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2); 105 set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL); 106 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1); 107 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2); 108 set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY); 109 set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA); 110 set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE); 111 set_gcsr_sw_flag(LOONGARCH_CSR_CTAG); 112 set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG); 113 set_gcsr_sw_flag(LOONGARCH_CSR_DERA); 114 set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE); 115 116 set_gcsr_sw_flag(LOONGARCH_CSR_FWPC); 117 set_gcsr_sw_flag(LOONGARCH_CSR_FWPS); 118 set_gcsr_sw_flag(LOONGARCH_CSR_MWPC); 119 set_gcsr_sw_flag(LOONGARCH_CSR_MWPS); 120 121 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR); 122 set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK); 123 set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL); 124 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID); 125 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR); 126 set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK); 127 set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL); 128 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID); 129 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR); 130 set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK); 131 set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL); 132 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID); 133 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR); 134 set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK); 135 set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL); 136 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID); 137 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR); 138 set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK); 139 set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL); 140 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID); 141 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR); 142 set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK); 143 set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL); 144 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID); 145 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR); 146 set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK); 147 set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL); 148 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID); 149 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR); 150 set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK); 151 set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL); 152 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID); 153 154 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR); 155 set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK); 156 set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL); 157 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID); 158 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR); 159 set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK); 160 set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL); 161 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID); 162 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR); 163 set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK); 164 set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL); 165 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID); 166 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR); 167 set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK); 168 set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL); 169 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID); 170 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR); 171 set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK); 172 set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL); 173 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID); 174 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR); 175 set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK); 176 set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL); 177 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID); 178 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR); 179 set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK); 180 set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL); 181 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID); 182 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR); 183 set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK); 184 set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL); 185 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID); 186 187 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0); 188 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0); 189 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1); 190 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1); 191 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2); 192 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2); 193 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3); 194 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); 195 } 196 197 static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu) 198 { 199 unsigned long vpid; 200 struct kvm_context *context; 201 202 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); 203 vpid = context->vpid_cache + 1; 204 if (!(vpid & vpid_mask)) { 205 /* finish round of vpid loop */ 206 if (unlikely(!vpid)) 207 vpid = vpid_mask + 1; 208 209 ++vpid; /* vpid 0 reserved for root */ 210 211 /* start new vpid cycle */ 212 kvm_flush_tlb_all(); 213 } 214 215 context->vpid_cache = vpid; 216 vcpu->arch.vpid = vpid; 217 } 218 219 void kvm_check_vpid(struct kvm_vcpu *vcpu) 220 { 221 int cpu; 222 bool migrated; 223 unsigned long ver, old, vpid; 224 struct kvm_context *context; 225 226 cpu = smp_processor_id(); 227 /* 228 * Are we entering guest context on a different CPU to last time? 229 * If so, the vCPU's guest TLB state on this CPU may be stale. 230 */ 231 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); 232 migrated = (vcpu->cpu != cpu); 233 234 /* 235 * Check if our vpid is of an older version 236 * 237 * We also discard the stored vpid if we've executed on 238 * another CPU, as the guest mappings may have changed without 239 * hypervisor knowledge. 240 */ 241 ver = vcpu->arch.vpid & ~vpid_mask; 242 old = context->vpid_cache & ~vpid_mask; 243 if (migrated || (ver != old)) { 244 kvm_update_vpid(vcpu, cpu); 245 trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); 246 vcpu->cpu = cpu; 247 kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); 248 } 249 250 /* Restore GSTAT(0x50).vpid */ 251 vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT; 252 change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid); 253 } 254 255 void kvm_init_vmcs(struct kvm *kvm) 256 { 257 kvm->arch.vmcs = vmcs; 258 } 259 260 long kvm_arch_dev_ioctl(struct file *filp, 261 unsigned int ioctl, unsigned long arg) 262 { 263 return -ENOIOCTLCMD; 264 } 265 266 int kvm_arch_enable_virtualization_cpu(void) 267 { 268 unsigned long env, gcfg = 0; 269 270 env = read_csr_gcfg(); 271 272 /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */ 273 write_csr_gcfg(0); 274 write_csr_gstat(0); 275 write_csr_gintc(0); 276 clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); 277 278 /* 279 * Enable virtualization features granting guest direct control of 280 * certain features: 281 * GCI=2: Trap on init or unimplement cache instruction. 282 * TORU=0: Trap on Root Unimplement. 283 * CACTRL=1: Root control cache. 284 * TOP=0: Trap on Previlege. 285 * TOE=0: Trap on Exception. 286 * TIT=0: Trap on Timer. 287 */ 288 if (env & CSR_GCFG_GCIP_ALL) 289 gcfg |= CSR_GCFG_GCI_SECURE; 290 if (env & CSR_GCFG_MATC_ROOT) 291 gcfg |= CSR_GCFG_MATC_ROOT; 292 293 write_csr_gcfg(gcfg); 294 295 kvm_flush_tlb_all(); 296 297 /* Enable using TGID */ 298 set_csr_gtlbc(CSR_GTLBC_USETGID); 299 kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", 300 read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); 301 302 return 0; 303 } 304 305 void kvm_arch_disable_virtualization_cpu(void) 306 { 307 write_csr_gcfg(0); 308 write_csr_gstat(0); 309 write_csr_gintc(0); 310 clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); 311 312 /* Flush any remaining guest TLB entries */ 313 kvm_flush_tlb_all(); 314 } 315 316 static int kvm_loongarch_env_init(void) 317 { 318 int cpu, order, ret; 319 void *addr; 320 struct kvm_context *context; 321 322 vmcs = alloc_percpu(struct kvm_context); 323 if (!vmcs) { 324 pr_err("kvm: failed to allocate percpu kvm_context\n"); 325 return -ENOMEM; 326 } 327 328 kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL); 329 if (!kvm_loongarch_ops) { 330 free_percpu(vmcs); 331 vmcs = NULL; 332 return -ENOMEM; 333 } 334 335 /* 336 * PGD register is shared between root kernel and kvm hypervisor. 337 * So world switch entry should be in DMW area rather than TLB area 338 * to avoid page fault reenter. 339 * 340 * In future if hardware pagetable walking is supported, we won't 341 * need to copy world switch code to DMW area. 342 */ 343 order = get_order(kvm_exception_size + kvm_enter_guest_size); 344 addr = (void *)__get_free_pages(GFP_KERNEL, order); 345 if (!addr) { 346 free_percpu(vmcs); 347 vmcs = NULL; 348 kfree(kvm_loongarch_ops); 349 kvm_loongarch_ops = NULL; 350 return -ENOMEM; 351 } 352 353 memcpy(addr, kvm_exc_entry, kvm_exception_size); 354 memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size); 355 flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size); 356 kvm_loongarch_ops->exc_entry = addr; 357 kvm_loongarch_ops->enter_guest = addr + kvm_exception_size; 358 kvm_loongarch_ops->page_order = order; 359 360 vpid_mask = read_csr_gstat(); 361 vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT; 362 if (vpid_mask) 363 vpid_mask = GENMASK(vpid_mask - 1, 0); 364 365 for_each_possible_cpu(cpu) { 366 context = per_cpu_ptr(vmcs, cpu); 367 context->vpid_cache = vpid_mask + 1; 368 context->last_vcpu = NULL; 369 } 370 371 kvm_init_gcsr_flag(); 372 373 /* Register LoongArch IPI interrupt controller interface. */ 374 ret = kvm_loongarch_register_ipi_device(); 375 if (ret) 376 return ret; 377 378 /* Register LoongArch EIOINTC interrupt controller interface. */ 379 ret = kvm_loongarch_register_eiointc_device(); 380 if (ret) 381 return ret; 382 383 /* Register LoongArch PCH-PIC interrupt controller interface. */ 384 ret = kvm_loongarch_register_pch_pic_device(); 385 386 return ret; 387 } 388 389 static void kvm_loongarch_env_exit(void) 390 { 391 unsigned long addr; 392 393 if (vmcs) 394 free_percpu(vmcs); 395 396 if (kvm_loongarch_ops) { 397 if (kvm_loongarch_ops->exc_entry) { 398 addr = (unsigned long)kvm_loongarch_ops->exc_entry; 399 free_pages(addr, kvm_loongarch_ops->page_order); 400 } 401 kfree(kvm_loongarch_ops); 402 } 403 } 404 405 static int kvm_loongarch_init(void) 406 { 407 int r; 408 409 if (!cpu_has_lvz) { 410 kvm_info("Hardware virtualization not available\n"); 411 return -ENODEV; 412 } 413 r = kvm_loongarch_env_init(); 414 if (r) 415 return r; 416 417 return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); 418 } 419 420 static void kvm_loongarch_exit(void) 421 { 422 kvm_exit(); 423 kvm_loongarch_env_exit(); 424 } 425 426 module_init(kvm_loongarch_init); 427 module_exit(kvm_loongarch_exit); 428 429 #ifdef MODULE 430 static const struct cpu_feature kvm_feature[] = { 431 { .feature = cpu_feature(LOONGARCH_LVZ) }, 432 {}, 433 }; 434 MODULE_DEVICE_TABLE(cpu, kvm_feature); 435 #endif 436