1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/err.h> 11 #include <linux/module.h> 12 #include <linux/kvm_host.h> 13 #include <asm/cpufeature.h> 14 #include <asm/kvm_nacl.h> 15 #include <asm/sbi.h> 16 17 long kvm_arch_dev_ioctl(struct file *filp, 18 unsigned int ioctl, unsigned long arg) 19 { 20 return -EINVAL; 21 } 22 23 int kvm_arch_enable_virtualization_cpu(void) 24 { 25 int rc; 26 27 rc = kvm_riscv_nacl_enable(); 28 if (rc) 29 return rc; 30 31 csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT); 32 csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT); 33 34 /* VS should access only the time counter directly. Everything else should trap */ 35 csr_write(CSR_HCOUNTEREN, 0x02); 36 37 csr_write(CSR_HVIP, 0); 38 39 kvm_riscv_aia_enable(); 40 41 return 0; 42 } 43 44 void kvm_arch_disable_virtualization_cpu(void) 45 { 46 kvm_riscv_aia_disable(); 47 48 /* 49 * After clearing the hideleg CSR, the host kernel will receive 50 * spurious interrupts if hvip CSR has pending interrupts and the 51 * corresponding enable bits in vsie CSR are asserted. To avoid it, 52 * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR. 53 */ 54 csr_write(CSR_VSIE, 0); 55 csr_write(CSR_HVIP, 0); 56 csr_write(CSR_HEDELEG, 0); 57 csr_write(CSR_HIDELEG, 0); 58 59 kvm_riscv_nacl_disable(); 60 } 61 62 static void kvm_riscv_teardown(void) 63 { 64 kvm_riscv_aia_exit(); 65 kvm_riscv_nacl_exit(); 66 kvm_unregister_perf_callbacks(); 67 } 68 69 static int __init riscv_kvm_init(void) 70 { 71 int rc; 72 char slist[64]; 73 const char *str; 74 75 if (!riscv_isa_extension_available(NULL, h)) { 76 kvm_info("hypervisor extension not available\n"); 77 return -ENODEV; 78 } 79 80 if (sbi_spec_is_0_1()) { 81 kvm_info("require SBI v0.2 or higher\n"); 82 return -ENODEV; 83 } 84 85 if (!sbi_probe_extension(SBI_EXT_RFENCE)) { 86 kvm_info("require SBI RFENCE extension\n"); 87 return -ENODEV; 88 } 89 90 rc = kvm_riscv_nacl_init(); 91 if (rc && rc != -ENODEV) 92 return rc; 93 94 kvm_riscv_gstage_mode_detect(); 95 96 kvm_riscv_gstage_vmid_detect(); 97 98 rc = kvm_riscv_aia_init(); 99 if (rc && rc != -ENODEV) { 100 kvm_riscv_nacl_exit(); 101 return rc; 102 } 103 104 kvm_info("hypervisor extension available\n"); 105 106 if (kvm_riscv_nacl_available()) { 107 rc = 0; 108 slist[0] = '\0'; 109 if (kvm_riscv_nacl_sync_csr_available()) { 110 if (rc) 111 strcat(slist, ", "); 112 strcat(slist, "sync_csr"); 113 rc++; 114 } 115 if (kvm_riscv_nacl_sync_hfence_available()) { 116 if (rc) 117 strcat(slist, ", "); 118 strcat(slist, "sync_hfence"); 119 rc++; 120 } 121 if (kvm_riscv_nacl_sync_sret_available()) { 122 if (rc) 123 strcat(slist, ", "); 124 strcat(slist, "sync_sret"); 125 rc++; 126 } 127 if (kvm_riscv_nacl_autoswap_csr_available()) { 128 if (rc) 129 strcat(slist, ", "); 130 strcat(slist, "autoswap_csr"); 131 rc++; 132 } 133 kvm_info("using SBI nested acceleration with %s\n", 134 (rc) ? slist : "no features"); 135 } 136 137 switch (kvm_riscv_gstage_mode()) { 138 case HGATP_MODE_SV32X4: 139 str = "Sv32x4"; 140 break; 141 case HGATP_MODE_SV39X4: 142 str = "Sv39x4"; 143 break; 144 case HGATP_MODE_SV48X4: 145 str = "Sv48x4"; 146 break; 147 case HGATP_MODE_SV57X4: 148 str = "Sv57x4"; 149 break; 150 default: 151 return -ENODEV; 152 } 153 kvm_info("using %s G-stage page table format\n", str); 154 155 kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits()); 156 157 if (kvm_riscv_aia_available()) 158 kvm_info("AIA available with %d guest external interrupts\n", 159 kvm_riscv_aia_nr_hgei); 160 161 kvm_register_perf_callbacks(NULL); 162 163 rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); 164 if (rc) { 165 kvm_riscv_teardown(); 166 return rc; 167 } 168 169 return 0; 170 } 171 module_init(riscv_kvm_init); 172 173 static void __exit riscv_kvm_exit(void) 174 { 175 kvm_riscv_teardown(); 176 177 kvm_exit(); 178 } 179 module_exit(riscv_kvm_exit); 180