1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/err.h> 11 #include <linux/module.h> 12 #include <linux/kvm_host.h> 13 #include <asm/cpufeature.h> 14 #include <asm/kvm_mmu.h> 15 #include <asm/kvm_nacl.h> 16 #include <asm/sbi.h> 17 18 long kvm_arch_dev_ioctl(struct file *filp, 19 unsigned int ioctl, unsigned long arg) 20 { 21 return -EINVAL; 22 } 23 24 int kvm_arch_enable_virtualization_cpu(void) 25 { 26 int rc; 27 28 rc = kvm_riscv_nacl_enable(); 29 if (rc) 30 return rc; 31 32 csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT); 33 csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT); 34 35 /* VS should access only the time counter directly. Everything else should trap */ 36 csr_write(CSR_HCOUNTEREN, 0x02); 37 38 csr_write(CSR_HVIP, 0); 39 40 kvm_riscv_aia_enable(); 41 42 return 0; 43 } 44 45 void kvm_arch_disable_virtualization_cpu(void) 46 { 47 kvm_riscv_aia_disable(); 48 49 /* 50 * After clearing the hideleg CSR, the host kernel will receive 51 * spurious interrupts if hvip CSR has pending interrupts and the 52 * corresponding enable bits in vsie CSR are asserted. To avoid it, 53 * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR. 54 */ 55 csr_write(CSR_VSIE, 0); 56 csr_write(CSR_HVIP, 0); 57 csr_write(CSR_HEDELEG, 0); 58 csr_write(CSR_HIDELEG, 0); 59 60 kvm_riscv_nacl_disable(); 61 } 62 63 static void kvm_riscv_teardown(void) 64 { 65 kvm_riscv_aia_exit(); 66 kvm_riscv_nacl_exit(); 67 kvm_unregister_perf_callbacks(); 68 } 69 70 static int __init riscv_kvm_init(void) 71 { 72 int rc; 73 char slist[64]; 74 const char *str; 75 76 if (!riscv_isa_extension_available(NULL, h)) { 77 kvm_info("hypervisor extension not available\n"); 78 return -ENODEV; 79 } 80 81 if (sbi_spec_is_0_1()) { 82 kvm_info("require SBI v0.2 or higher\n"); 83 return -ENODEV; 84 } 85 86 if (!sbi_probe_extension(SBI_EXT_RFENCE)) { 87 kvm_info("require SBI RFENCE extension\n"); 88 return -ENODEV; 89 } 90 91 rc = kvm_riscv_nacl_init(); 92 if (rc && rc != -ENODEV) 93 return rc; 94 95 kvm_riscv_gstage_mode_detect(); 96 97 kvm_riscv_gstage_vmid_detect(); 98 99 rc = kvm_riscv_aia_init(); 100 if (rc && rc != -ENODEV) { 101 kvm_riscv_nacl_exit(); 102 return rc; 103 } 104 105 kvm_info("hypervisor extension available\n"); 106 107 if (kvm_riscv_nacl_available()) { 108 rc = 0; 109 slist[0] = '\0'; 110 if (kvm_riscv_nacl_sync_csr_available()) { 111 if (rc) 112 strcat(slist, ", "); 113 strcat(slist, "sync_csr"); 114 rc++; 115 } 116 if (kvm_riscv_nacl_sync_hfence_available()) { 117 if (rc) 118 strcat(slist, ", "); 119 strcat(slist, "sync_hfence"); 120 rc++; 121 } 122 if (kvm_riscv_nacl_sync_sret_available()) { 123 if (rc) 124 strcat(slist, ", "); 125 strcat(slist, "sync_sret"); 126 rc++; 127 } 128 if (kvm_riscv_nacl_autoswap_csr_available()) { 129 if (rc) 130 strcat(slist, ", "); 131 strcat(slist, "autoswap_csr"); 132 rc++; 133 } 134 kvm_info("using SBI nested acceleration with %s\n", 135 (rc) ? slist : "no features"); 136 } 137 138 switch (kvm_riscv_gstage_mode) { 139 case HGATP_MODE_SV32X4: 140 str = "Sv32x4"; 141 break; 142 case HGATP_MODE_SV39X4: 143 str = "Sv39x4"; 144 break; 145 case HGATP_MODE_SV48X4: 146 str = "Sv48x4"; 147 break; 148 case HGATP_MODE_SV57X4: 149 str = "Sv57x4"; 150 break; 151 default: 152 return -ENODEV; 153 } 154 kvm_info("using %s G-stage page table format\n", str); 155 156 kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits()); 157 158 if (kvm_riscv_aia_available()) 159 kvm_info("AIA available with %d guest external interrupts\n", 160 kvm_riscv_aia_nr_hgei); 161 162 kvm_register_perf_callbacks(NULL); 163 164 rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); 165 if (rc) { 166 kvm_riscv_teardown(); 167 return rc; 168 } 169 170 return 0; 171 } 172 module_init(riscv_kvm_init); 173 174 static void __exit riscv_kvm_exit(void) 175 { 176 kvm_exit(); 177 178 kvm_riscv_teardown(); 179 } 180 module_exit(riscv_kvm_exit); 181